< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page

  34 #include "compiler/disassembler.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSet.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "jfr/jfrEvents.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "opto/addnode.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/callGenerator.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/castnode.hpp"
  46 #include "opto/cfgnode.hpp"
  47 #include "opto/chaitin.hpp"
  48 #include "opto/compile.hpp"
  49 #include "opto/connode.hpp"
  50 #include "opto/convertnode.hpp"
  51 #include "opto/divnode.hpp"
  52 #include "opto/escape.hpp"
  53 #include "opto/idealGraphPrinter.hpp"

  54 #include "opto/loopnode.hpp"
  55 #include "opto/machnode.hpp"
  56 #include "opto/macro.hpp"
  57 #include "opto/matcher.hpp"
  58 #include "opto/mathexactnode.hpp"
  59 #include "opto/memnode.hpp"
  60 #include "opto/mulnode.hpp"
  61 #include "opto/narrowptrnode.hpp"
  62 #include "opto/node.hpp"
  63 #include "opto/opcodes.hpp"
  64 #include "opto/output.hpp"
  65 #include "opto/parse.hpp"
  66 #include "opto/phaseX.hpp"
  67 #include "opto/rootnode.hpp"
  68 #include "opto/runtime.hpp"
  69 #include "opto/stringopts.hpp"
  70 #include "opto/type.hpp"
  71 #include "opto/vectornode.hpp"
  72 #include "runtime/arguments.hpp"
  73 #include "runtime/sharedRuntime.hpp"

 388   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 389     Node* cast = range_check_cast_node(i);
 390     if (!useful.member(cast)) {
 391       remove_range_check_cast(cast);
 392     }
 393   }
 394   // Remove useless expensive nodes
 395   for (int i = C->expensive_count()-1; i >= 0; i--) {
 396     Node* n = C->expensive_node(i);
 397     if (!useful.member(n)) {
 398       remove_expensive_node(n);
 399     }
 400   }
 401   // Remove useless Opaque4 nodes
 402   for (int i = opaque4_count() - 1; i >= 0; i--) {
 403     Node* opaq = opaque4_node(i);
 404     if (!useful.member(opaq)) {
 405       remove_opaque4_node(opaq);
 406     }
 407   }







 408   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 409   bs->eliminate_useless_gc_barriers(useful, this);
 410   // clean up the late inline lists
 411   remove_useless_late_inlines(&_string_late_inlines, useful);
 412   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 413   remove_useless_late_inlines(&_late_inlines, useful);
 414   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 415 }
 416 
 417 // ============================================================================
 418 //------------------------------CompileWrapper---------------------------------
 419 class CompileWrapper : public StackObj {
 420   Compile *const _compile;
 421  public:
 422   CompileWrapper(Compile* compile);
 423 
 424   ~CompileWrapper();
 425 };
 426 
 427 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {

 614   // Node list that Iterative GVN will start with
 615   Unique_Node_List for_igvn(comp_arena());
 616   set_for_igvn(&for_igvn);
 617 
 618   // GVN that will be run immediately on new nodes
 619   uint estimated_size = method()->code_size()*4+64;
 620   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 621   PhaseGVN gvn(node_arena(), estimated_size);
 622   set_initial_gvn(&gvn);
 623 
 624   print_inlining_init();
 625   { // Scope for timing the parser
 626     TracePhase tp("parse", &timers[_t_parser]);
 627 
 628     // Put top into the hash table ASAP.
 629     initial_gvn()->transform_no_reclaim(top());
 630 
 631     // Set up tf(), start(), and find a CallGenerator.
 632     CallGenerator* cg = NULL;
 633     if (is_osr_compilation()) {
 634       const TypeTuple *domain = StartOSRNode::osr_domain();
 635       const TypeTuple *range = TypeTuple::make_range(method()->signature());
 636       init_tf(TypeFunc::make(domain, range));
 637       StartNode* s = new StartOSRNode(root(), domain);
 638       initial_gvn()->set_type_bottom(s);
 639       init_start(s);
 640       cg = CallGenerator::for_osr(method(), entry_bci());
 641     } else {
 642       // Normal case.
 643       init_tf(TypeFunc::make(method()));
 644       StartNode* s = new StartNode(root(), tf()->domain());
 645       initial_gvn()->set_type_bottom(s);
 646       init_start(s);
 647       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 648         // With java.lang.ref.reference.get() we must go through the
 649         // intrinsic - even when get() is the root
 650         // method of the compile - so that, if necessary, the value in
 651         // the referent field of the reference object gets recorded by
 652         // the pre-barrier code.
 653         cg = find_intrinsic(method(), false);
 654       }
 655       if (cg == NULL) {
 656         float past_uses = method()->interpreter_invocation_count();
 657         float expected_uses = past_uses;
 658         cg = CallGenerator::for_inline(method(), expected_uses);
 659       }
 660     }
 661     if (failing())  return;
 662     if (cg == NULL) {
 663       record_method_not_compilable("cannot parse method");
 664       return;

 749     }
 750   }
 751 #endif
 752 
 753 #ifdef ASSERT
 754   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 755   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 756 #endif
 757 
 758   // Dump compilation data to replay it.
 759   if (directive->DumpReplayOption) {
 760     env()->dump_replay_data(_compile_id);
 761   }
 762   if (directive->DumpInlineOption && (ilt() != NULL)) {
 763     env()->dump_inline_data(_compile_id);
 764   }
 765 
 766   // Now that we know the size of all the monitors we can add a fixed slot
 767   // for the original deopt pc.
 768   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);




 769   set_fixed_slots(next_slot);
 770 
 771   // Compute when to use implicit null checks. Used by matching trap based
 772   // nodes and NullCheck optimization.
 773   set_allowed_deopt_reasons();
 774 
 775   // Now generate code
 776   Code_Gen();
 777 }
 778 
 779 //------------------------------Compile----------------------------------------
 780 // Compile a runtime stub
 781 Compile::Compile( ciEnv* ci_env,
 782                   TypeFunc_generator generator,
 783                   address stub_function,
 784                   const char *stub_name,
 785                   int is_fancy_jump,
 786                   bool pass_tls,
 787                   bool save_arg_registers,
 788                   bool return_pc,

 903   // Create Debug Information Recorder to record scopes, oopmaps, etc.
 904   env()->set_oop_recorder(new OopRecorder(env()->arena()));
 905   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
 906   env()->set_dependencies(new Dependencies(env()));
 907 
 908   _fixed_slots = 0;
 909   set_has_split_ifs(false);
 910   set_has_loops(has_method() && method()->has_loops()); // first approximation
 911   set_has_stringbuilder(false);
 912   set_has_boxed_value(false);
 913   _trap_can_recompile = false;  // no traps emitted yet
 914   _major_progress = true; // start out assuming good things will happen
 915   set_has_unsafe_access(false);
 916   set_max_vector_size(0);
 917   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
 918   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
 919   set_decompile_count(0);
 920 
 921   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
 922   _loop_opts_cnt = LoopOptsCount;



 923   set_do_inlining(Inline);
 924   set_max_inline_size(MaxInlineSize);
 925   set_freq_inline_size(FreqInlineSize);
 926   set_do_scheduling(OptoScheduling);
 927   set_do_count_invocations(false);
 928   set_do_method_data_update(false);
 929 
 930   set_do_vector_loop(false);
 931 
 932   if (AllowVectorizeOnDemand) {
 933     if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
 934       set_do_vector_loop(true);
 935       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
 936     } else if (has_method() && method()->name() != 0 &&
 937                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
 938       set_do_vector_loop(true);
 939     }
 940   }
 941   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
 942   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})

 986   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
 987   {
 988     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
 989   }
 990   // Initialize the first few types.
 991   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
 992   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
 993   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
 994   _num_alias_types = AliasIdxRaw+1;
 995   // Zero out the alias type cache.
 996   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
 997   // A NULL adr_type hits in the cache right away.  Preload the right answer.
 998   probe_alias_cache(NULL)->_index = AliasIdxTop;
 999 
1000   _intrinsics = NULL;
1001   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1002   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1003   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1004   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1005   _opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);

1006   register_library_intrinsics();
1007 #ifdef ASSERT
1008   _type_verify_symmetry = true;
1009   _phase_optimize_finished = false;
1010 #endif
1011 }
1012 
1013 //---------------------------init_start----------------------------------------
1014 // Install the StartNode on this compile object.
1015 void Compile::init_start(StartNode* s) {
1016   if (failing())
1017     return; // already failing
1018   assert(s == start(), "");
1019 }
1020 
1021 /**
1022  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1023  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1024  * the ideal graph.
1025  */

1214 bool Compile::allow_range_check_smearing() const {
1215   // If this method has already thrown a range-check,
1216   // assume it was because we already tried range smearing
1217   // and it failed.
1218   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1219   return !already_trapped;
1220 }
1221 
1222 
1223 //------------------------------flatten_alias_type-----------------------------
1224 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1225   int offset = tj->offset();
1226   TypePtr::PTR ptr = tj->ptr();
1227 
1228   // Known instance (scalarizable allocation) alias only with itself.
1229   bool is_known_inst = tj->isa_oopptr() != NULL &&
1230                        tj->is_oopptr()->is_known_instance();
1231 
1232   // Process weird unsafe references.
1233   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1234     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");

1235     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1236     tj = TypeOopPtr::BOTTOM;
1237     ptr = tj->ptr();
1238     offset = tj->offset();
1239   }
1240 
1241   // Array pointers need some flattening
1242   const TypeAryPtr *ta = tj->isa_aryptr();
1243   if (ta && ta->is_stable()) {
1244     // Erase stability property for alias analysis.
1245     tj = ta = ta->cast_to_stable(false);
1246   }









1247   if( ta && is_known_inst ) {
1248     if ( offset != Type::OffsetBot &&
1249          offset > arrayOopDesc::length_offset_in_bytes() ) {
1250       offset = Type::OffsetBot; // Flatten constant access into array body only
1251       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
1252     }
1253   } else if( ta && _AliasLevel >= 2 ) {
1254     // For arrays indexed by constant indices, we flatten the alias
1255     // space to include all of the array body.  Only the header, klass
1256     // and array length can be accessed un-aliased.


1257     if( offset != Type::OffsetBot ) {
1258       if( ta->const_oop() ) { // MethodData* or Method*
1259         offset = Type::OffsetBot;   // Flatten constant access into array body
1260         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1261       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1262         // range is OK as-is.
1263         tj = ta = TypeAryPtr::RANGE;
1264       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1265         tj = TypeInstPtr::KLASS; // all klass loads look alike
1266         ta = TypeAryPtr::RANGE; // generic ignored junk
1267         ptr = TypePtr::BotPTR;
1268       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1269         tj = TypeInstPtr::MARK;
1270         ta = TypeAryPtr::RANGE; // generic ignored junk
1271         ptr = TypePtr::BotPTR;
1272       } else {                  // Random constant offset into array body
1273         offset = Type::OffsetBot;   // Flatten constant access into array body
1274         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1275       }
1276     }
1277     // Arrays of fixed size alias with arrays of unknown size.
1278     if (ta->size() != TypeInt::POS) {
1279       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1280       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1281     }
1282     // Arrays of known objects become arrays of unknown objects.
1283     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1284       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1285       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1286     }
1287     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1288       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1289       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);





1290     }
1291     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1292     // cannot be distinguished by bytecode alone.
1293     if (ta->elem() == TypeInt::BOOL) {
1294       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1295       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1296       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1297     }
1298     // During the 2nd round of IterGVN, NotNull castings are removed.
1299     // Make sure the Bottom and NotNull variants alias the same.
1300     // Also, make sure exact and non-exact variants alias the same.
1301     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1302       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
1303     }
1304   }
1305 
1306   // Oop pointers need some flattening
1307   const TypeInstPtr *to = tj->isa_instptr();
1308   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1309     ciInstanceKlass *k = to->klass()->as_instance_klass();
1310     if( ptr == TypePtr::Constant ) {
1311       if (to->klass() != ciEnv::current()->Class_klass() ||
1312           offset < k->size_helper() * wordSize) {
1313         // No constant oop pointers (such as Strings); they alias with
1314         // unknown strings.
1315         assert(!is_known_inst, "not scalarizable allocation");
1316         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1317       }
1318     } else if( is_known_inst ) {
1319       tj = to; // Keep NotNull and klass_is_exact for instance type
1320     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1321       // During the 2nd round of IterGVN, NotNull castings are removed.
1322       // Make sure the Bottom and NotNull variants alias the same.
1323       // Also, make sure exact and non-exact variants alias the same.
1324       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1325     }
1326     if (to->speculative() != NULL) {
1327       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
1328     }
1329     // Canonicalize the holder of this field
1330     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1331       // First handle header references such as a LoadKlassNode, even if the
1332       // object's klass is unloaded at compile time (4965979).
1333       if (!is_known_inst) { // Do it only for non-instance types
1334         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1335       }
1336     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1337       // Static fields are in the space above the normal instance
1338       // fields in the java.lang.Class instance.
1339       if (to->klass() != ciEnv::current()->Class_klass()) {
1340         to = NULL;
1341         tj = TypeOopPtr::BOTTOM;
1342         offset = tj->offset();
1343       }
1344     } else {
1345       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1346       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1347         if( is_known_inst ) {
1348           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1349         } else {
1350           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1351         }
1352       }
1353     }
1354   }
1355 
1356   // Klass pointers to object array klasses need some flattening
1357   const TypeKlassPtr *tk = tj->isa_klassptr();
1358   if( tk ) {
1359     // If we are referencing a field within a Klass, we need
1360     // to assume the worst case of an Object.  Both exact and
1361     // inexact types must flatten to the same alias class so
1362     // use NotNull as the PTR.
1363     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1364 
1365       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1366                                    TypeKlassPtr::OBJECT->klass(),
1367                                    offset);
1368     }
1369 
1370     ciKlass* klass = tk->klass();
1371     if( klass->is_obj_array_klass() ) {
1372       ciKlass* k = TypeAryPtr::OOPS->klass();
1373       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1374         k = TypeInstPtr::BOTTOM->klass();
1375       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
1376     }
1377 
1378     // Check for precise loads from the primary supertype array and force them
1379     // to the supertype cache alias index.  Check for generic array loads from
1380     // the primary supertype array and also force them to the supertype cache
1381     // alias index.  Since the same load can reach both, we need to merge
1382     // these 2 disparate memories into the same alias class.  Since the
1383     // primary supertype array is read-only, there's no chance of confusion
1384     // where we bypass an array load and an array store.
1385     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1386     if (offset == Type::OffsetBot ||
1387         (offset >= primary_supers_offset &&
1388          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1389         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1390       offset = in_bytes(Klass::secondary_super_cache_offset());
1391       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
1392     }
1393   }
1394 
1395   // Flatten all Raw pointers together.
1396   if (tj->base() == Type::RawPtr)
1397     tj = TypeRawPtr::BOTTOM;
1398 
1399   if (tj->base() == Type::AnyPtr)
1400     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1401 
1402   // Flatten all to bottom for now
1403   switch( _AliasLevel ) {
1404   case 0:
1405     tj = TypePtr::BOTTOM;
1406     break;
1407   case 1:                       // Flatten to: oop, static, field or array
1408     switch (tj->base()) {
1409     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1410     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1411     case Type::AryPtr:   // do not distinguish arrays at all

1510   intptr_t key = (intptr_t) adr_type;
1511   key ^= key >> logAliasCacheSize;
1512   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1513 }
1514 
1515 
1516 //-----------------------------grow_alias_types--------------------------------
1517 void Compile::grow_alias_types() {
1518   const int old_ats  = _max_alias_types; // how many before?
1519   const int new_ats  = old_ats;          // how many more?
1520   const int grow_ats = old_ats+new_ats;  // how many now?
1521   _max_alias_types = grow_ats;
1522   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1523   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1524   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1525   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1526 }
1527 
1528 
1529 //--------------------------------find_alias_type------------------------------
1530 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1531   if (_AliasLevel == 0)
1532     return alias_type(AliasIdxBot);
1533 
1534   AliasCacheEntry* ace = probe_alias_cache(adr_type);
1535   if (ace->_adr_type == adr_type) {
1536     return alias_type(ace->_index);



1537   }
1538 
1539   // Handle special cases.
1540   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1541   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1542 
1543   // Do it the slow way.
1544   const TypePtr* flat = flatten_alias_type(adr_type);
1545 
1546 #ifdef ASSERT
1547   {
1548     ResourceMark rm;
1549     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1550            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1551     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1552            Type::str(adr_type));
1553     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1554       const TypeOopPtr* foop = flat->is_oopptr();
1555       // Scalarizable allocations have exact klass always.
1556       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1566     if (alias_type(i)->adr_type() == flat) {
1567       idx = i;
1568       break;
1569     }
1570   }
1571 
1572   if (idx == AliasIdxTop) {
1573     if (no_create)  return NULL;
1574     // Grow the array if necessary.
1575     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1576     // Add a new alias type.
1577     idx = _num_alias_types++;
1578     _alias_types[idx]->Init(idx, flat);
1579     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1580     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1581     if (flat->isa_instptr()) {
1582       if (flat->offset() == java_lang_Class::klass_offset()
1583           && flat->is_instptr()->klass() == env()->Class_klass())
1584         alias_type(idx)->set_rewritable(false);
1585     }

1586     if (flat->isa_aryptr()) {
1587 #ifdef ASSERT
1588       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1589       // (T_BYTE has the weakest alignment and size restrictions...)
1590       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1591 #endif

1592       if (flat->offset() == TypePtr::OffsetBot) {
1593         alias_type(idx)->set_element(flat->is_aryptr()->elem());








1594       }
1595     }
1596     if (flat->isa_klassptr()) {
1597       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1598         alias_type(idx)->set_rewritable(false);
1599       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1600         alias_type(idx)->set_rewritable(false);
1601       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1602         alias_type(idx)->set_rewritable(false);
1603       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1604         alias_type(idx)->set_rewritable(false);


1605       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1606         alias_type(idx)->set_rewritable(false);
1607     }
1608     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1609     // but the base pointer type is not distinctive enough to identify
1610     // references into JavaThread.)
1611 
1612     // Check for final fields.
1613     const TypeInstPtr* tinst = flat->isa_instptr();
1614     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1615       ciField* field;
1616       if (tinst->const_oop() != NULL &&
1617           tinst->klass() == ciEnv::current()->Class_klass() &&
1618           tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1619         // static field
1620         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1621         field = k->get_field_by_offset(tinst->offset(), true);




1622       } else {
1623         ciInstanceKlass *k = tinst->klass()->as_instance_klass();
1624         field = k->get_field_by_offset(tinst->offset(), false);
1625       }
1626       assert(field == NULL ||
1627              original_field == NULL ||
1628              (field->holder() == original_field->holder() &&
1629               field->offset() == original_field->offset() &&
1630               field->is_static() == original_field->is_static()), "wrong field?");
1631       // Set field() and is_rewritable() attributes.
1632       if (field != NULL)  alias_type(idx)->set_field(field);







1633     }
1634   }
1635 
1636   // Fill the cache for next time.
1637   ace->_adr_type = adr_type;
1638   ace->_index    = idx;
1639   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");

1640 
1641   // Might as well try to fill the cache for the flattened version, too.
1642   AliasCacheEntry* face = probe_alias_cache(flat);
1643   if (face->_adr_type == NULL) {
1644     face->_adr_type = flat;
1645     face->_index    = idx;
1646     assert(alias_type(flat) == alias_type(idx), "flat type must work too");

1647   }
1648 
1649   return alias_type(idx);
1650 }
1651 
1652 
1653 Compile::AliasType* Compile::alias_type(ciField* field) {
1654   const TypeOopPtr* t;
1655   if (field->is_static())
1656     t = TypeInstPtr::make(field->holder()->java_mirror());
1657   else
1658     t = TypeOopPtr::make_from_klass_raw(field->holder());
1659   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1660   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1661   return atp;
1662 }
1663 
1664 
1665 //------------------------------have_alias_type--------------------------------
1666 bool Compile::have_alias_type(const TypePtr* adr_type) {

1788   }
1789   assert(range_check_cast_count() == 0, "should be empty");
1790 }
1791 
1792 void Compile::add_opaque4_node(Node* n) {
1793   assert(n->Opcode() == Op_Opaque4, "Opaque4 only");
1794   assert(!_opaque4_nodes->contains(n), "duplicate entry in Opaque4 list");
1795   _opaque4_nodes->append(n);
1796 }
1797 
1798 // Remove all Opaque4 nodes.
1799 void Compile::remove_opaque4_nodes(PhaseIterGVN &igvn) {
1800   for (int i = opaque4_count(); i > 0; i--) {
1801     Node* opaq = opaque4_node(i-1);
1802     assert(opaq->Opcode() == Op_Opaque4, "Opaque4 only");
1803     igvn.replace_node(opaq, opaq->in(2));
1804   }
1805   assert(opaque4_count() == 0, "should be empty");
1806 }
1807 




























































































































































































































































































































































1808 // StringOpts and late inlining of string methods
1809 void Compile::inline_string_calls(bool parse_time) {
1810   {
1811     // remove useless nodes to make the usage analysis simpler
1812     ResourceMark rm;
1813     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1814   }
1815 
1816   {
1817     ResourceMark rm;
1818     print_method(PHASE_BEFORE_STRINGOPTS, 3);
1819     PhaseStringOpts pso(initial_gvn(), for_igvn());
1820     print_method(PHASE_AFTER_STRINGOPTS, 3);
1821   }
1822 
1823   // now inline anything that we skipped the first time around
1824   if (!parse_time) {
1825     _late_inlines_pos = _late_inlines.length();
1826   }
1827 

2067   remove_speculative_types(igvn);
2068 
2069   // No more new expensive nodes will be added to the list from here
2070   // so keep only the actual candidates for optimizations.
2071   cleanup_expensive_nodes(igvn);
2072 
2073   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2074     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2075     initial_gvn()->replace_with(&igvn);
2076     for_igvn()->clear();
2077     Unique_Node_List new_worklist(C->comp_arena());
2078     {
2079       ResourceMark rm;
2080       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2081     }
2082     set_for_igvn(&new_worklist);
2083     igvn = PhaseIterGVN(initial_gvn());
2084     igvn.optimize();
2085   }
2086 







2087   // Perform escape analysis
2088   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2089     if (has_loops()) {
2090       // Cleanup graph (remove dead nodes).
2091       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2092       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2093       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2094       if (failing())  return;
2095     }
2096     ConnectionGraph::do_analysis(this, &igvn);
2097 
2098     if (failing())  return;
2099 
2100     // Optimize out fields loads from scalar replaceable allocations.
2101     igvn.optimize();
2102     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2103 
2104     if (failing())  return;
2105 
2106     if (congraph() != NULL && macro_count() > 0) {
2107       TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2108       PhaseMacroExpand mexp(igvn);
2109       mexp.eliminate_macro_nodes();
2110       igvn.set_delay_transform(false);
2111 
2112       igvn.optimize();
2113       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2114 
2115       if (failing())  return;
2116     }
2117   }
2118 





2119   // Loop transforms on the ideal graph.  Range Check Elimination,
2120   // peeling, unrolling, etc.
2121 
2122   // Set loop opts counter
2123   if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2124     {
2125       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2126       PhaseIdealLoop::optimize(igvn, LoopOptsDefault);
2127       _loop_opts_cnt--;
2128       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2129       if (failing())  return;
2130     }
2131     // Loop opts pass if partial peeling occurred in previous pass
2132     if(PartialPeelLoop && major_progress() && (_loop_opts_cnt > 0)) {
2133       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2134       PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
2135       _loop_opts_cnt--;
2136       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2137       if (failing())  return;
2138     }

2741             // Accumulate any precedence edges
2742             if (mem->in(i) != NULL) {
2743               n->add_prec(mem->in(i));
2744             }
2745           }
2746           // Everything above this point has been processed.
2747           done = true;
2748         }
2749         // Eliminate the previous StoreCM
2750         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2751         assert(mem->outcnt() == 0, "should be dead");
2752         mem->disconnect_inputs(NULL, this);
2753       } else {
2754         prev = mem;
2755       }
2756       mem = prev->in(MemNode::Memory);
2757     }
2758   }
2759 }
2760 

2761 //------------------------------final_graph_reshaping_impl----------------------
2762 // Implement items 1-5 from final_graph_reshaping below.
2763 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2764 
2765   if ( n->outcnt() == 0 ) return; // dead node
2766   uint nop = n->Opcode();
2767 
2768   // Check for 2-input instruction with "last use" on right input.
2769   // Swap to left input.  Implements item (2).
2770   if( n->req() == 3 &&          // two-input instruction
2771       n->in(1)->outcnt() > 1 && // left use is NOT a last use
2772       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2773       n->in(2)->outcnt() == 1 &&// right use IS a last use
2774       !n->in(2)->is_Con() ) {   // right use is not a constant
2775     // Check for commutative opcode
2776     switch( nop ) {
2777     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
2778     case Op_MaxI:  case Op_MinI:
2779     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
2780     case Op_AndL:  case Op_XorL:  case Op_OrL:

3479           // Replace all nodes with identical edges as m with m
3480           k->subsume_by(m, this);
3481         }
3482       }
3483     }
3484     break;
3485   }
3486   case Op_CmpUL: {
3487     if (!Matcher::has_match_rule(Op_CmpUL)) {
3488       // No support for unsigned long comparisons
3489       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3490       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3491       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3492       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3493       Node* andl = new AndLNode(orl, remove_sign_mask);
3494       Node* cmp = new CmpLNode(andl, n->in(2));
3495       n->subsume_by(cmp, this);
3496     }
3497     break;
3498   }








3499   default:
3500     assert(!n->is_Call(), "");
3501     assert(!n->is_Mem(), "");
3502     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3503     break;
3504   }
3505 }
3506 
3507 //------------------------------final_graph_reshaping_walk---------------------
3508 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3509 // requires that the walk visits a node's inputs before visiting the node.
3510 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3511   Unique_Node_List sfpt;
3512 
3513   frc._visited.set(root->_idx); // first, mark node as visited
3514   uint cnt = root->req();
3515   Node *n = root;
3516   uint  i = 0;
3517   while (true) {
3518     if (i < cnt) {

3826   }
3827 }
3828 
3829 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
3830   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
3831 }
3832 
3833 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
3834   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
3835 }
3836 
3837 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
3838   if (holder->is_initialized()) {
3839     return false;
3840   }
3841   if (holder->is_being_initialized()) {
3842     if (accessing_method->holder() == holder) {
3843       // Access inside a class. The barrier can be elided when access happens in <clinit>,
3844       // <init>, or a static method. In all those cases, there was an initialization
3845       // barrier on the holder klass passed.
3846       if (accessing_method->is_static_initializer() ||
3847           accessing_method->is_object_initializer() ||
3848           accessing_method->is_static()) {
3849         return false;
3850       }
3851     } else if (accessing_method->holder()->is_subclass_of(holder)) {
3852       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
3853       // In case of <init> or a static method, the barrier is on the subclass is not enough:
3854       // child class can become fully initialized while its parent class is still being initialized.
3855       if (accessing_method->is_static_initializer()) {
3856         return false;
3857       }
3858     }
3859     ciMethod* root = method(); // the root method of compilation
3860     if (root != accessing_method) {
3861       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
3862     }
3863   }
3864   return true;
3865 }
3866 
3867 #ifndef PRODUCT
3868 //------------------------------verify_graph_edges---------------------------
3869 // Walk the Graph and verify that there is a one-to-one correspondence
3870 // between Use-Def edges and Def-Use edges in the graph.
3871 void Compile::verify_graph_edges(bool no_dead_code) {
3872   if (VerifyGraphEdges) {
3873     Unique_Node_List visited;
3874     // Call recursive graph walk to check edges
3875     _root->verify_edges(visited);

3956                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
3957   }
3958 
3959   if (VerifyIdealNodeCount) {
3960     Compile::current()->print_missing_nodes();
3961   }
3962 #endif
3963 
3964   if (_log != NULL) {
3965     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3966   }
3967 }
3968 
3969 //----------------------------static_subtype_check-----------------------------
3970 // Shortcut important common cases when superklass is exact:
3971 // (0) superklass is java.lang.Object (can occur in reflective code)
3972 // (1) subklass is already limited to a subtype of superklass => always ok
3973 // (2) subklass does not overlap with superklass => always fail
3974 // (3) superklass has NO subtypes and we can check with a simple compare.
3975 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
3976   if (StressReflectiveCode) {
3977     return SSC_full_test;       // Let caller generate the general case.
3978   }
3979 
3980   if (superk == env()->Object_klass()) {
3981     return SSC_always_true;     // (0) this test cannot fail
3982   }
3983 
3984   ciType* superelem = superk;
3985   if (superelem->is_array_klass())

3986     superelem = superelem->as_array_klass()->base_element_type();

3987 
3988   if (!subk->is_interface()) {  // cannot trust static interface types yet
3989     if (subk->is_subtype_of(superk)) {
3990       return SSC_always_true;   // (1) false path dead; no dynamic test needed
3991     }
3992     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
3993         !superk->is_subtype_of(subk)) {
3994       return SSC_always_false;
3995     }
3996   }
3997 
3998   // If casting to an instance klass, it must have no subtypes
3999   if (superk->is_interface()) {
4000     // Cannot trust interfaces yet.
4001     // %%% S.B. superk->nof_implementors() == 1
4002   } else if (superelem->is_instance_klass()) {
4003     ciInstanceKlass* ik = superelem->as_instance_klass();
4004     if (!ik->has_subklass() && !ik->is_interface()) {
4005       if (!ik->is_final()) {
4006         // Add a dependency if there is a chance of a later subclass.

4427     for (uint next = 0; next < worklist.size(); ++next) {
4428       Node *n  = worklist.at(next);
4429       const Type* t = igvn.type_or_null(n);
4430       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4431       if (n->is_Type()) {
4432         t = n->as_Type()->type();
4433         assert(t == t->remove_speculative(), "no more speculative types");
4434       }
4435       uint max = n->len();
4436       for( uint i = 0; i < max; ++i ) {
4437         Node *m = n->in(i);
4438         if (not_a_node(m))  continue;
4439         worklist.push(m);
4440       }
4441     }
4442     igvn.check_no_speculative_types();
4443 #endif
4444   }
4445 }
4446 





















4447 // Auxiliary method to support randomized stressing/fuzzing.
4448 //
4449 // This method can be called the arbitrary number of times, with current count
4450 // as the argument. The logic allows selecting a single candidate from the
4451 // running list of candidates as follows:
4452 //    int count = 0;
4453 //    Cand* selected = null;
4454 //    while(cand = cand->next()) {
4455 //      if (randomized_select(++count)) {
4456 //        selected = cand;
4457 //      }
4458 //    }
4459 //
4460 // Including count equalizes the chances any candidate is "selected".
4461 // This is useful when we don't have the complete list of candidates to choose
4462 // from uniformly. In this case, we need to adjust the randomicity of the
4463 // selection, or else we will end up biasing the selection towards the latter
4464 // candidates.
4465 //
4466 // Quick back-envelope calculation shows that for the list of n candidates

  34 #include "compiler/disassembler.hpp"
  35 #include "compiler/oopMap.hpp"
  36 #include "gc/shared/barrierSet.hpp"
  37 #include "gc/shared/c2/barrierSetC2.hpp"
  38 #include "jfr/jfrEvents.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "opto/addnode.hpp"
  41 #include "opto/block.hpp"
  42 #include "opto/c2compiler.hpp"
  43 #include "opto/callGenerator.hpp"
  44 #include "opto/callnode.hpp"
  45 #include "opto/castnode.hpp"
  46 #include "opto/cfgnode.hpp"
  47 #include "opto/chaitin.hpp"
  48 #include "opto/compile.hpp"
  49 #include "opto/connode.hpp"
  50 #include "opto/convertnode.hpp"
  51 #include "opto/divnode.hpp"
  52 #include "opto/escape.hpp"
  53 #include "opto/idealGraphPrinter.hpp"
  54 #include "opto/inlinetypenode.hpp"
  55 #include "opto/loopnode.hpp"
  56 #include "opto/machnode.hpp"
  57 #include "opto/macro.hpp"
  58 #include "opto/matcher.hpp"
  59 #include "opto/mathexactnode.hpp"
  60 #include "opto/memnode.hpp"
  61 #include "opto/mulnode.hpp"
  62 #include "opto/narrowptrnode.hpp"
  63 #include "opto/node.hpp"
  64 #include "opto/opcodes.hpp"
  65 #include "opto/output.hpp"
  66 #include "opto/parse.hpp"
  67 #include "opto/phaseX.hpp"
  68 #include "opto/rootnode.hpp"
  69 #include "opto/runtime.hpp"
  70 #include "opto/stringopts.hpp"
  71 #include "opto/type.hpp"
  72 #include "opto/vectornode.hpp"
  73 #include "runtime/arguments.hpp"
  74 #include "runtime/sharedRuntime.hpp"

 389   for (int i = range_check_cast_count() - 1; i >= 0; i--) {
 390     Node* cast = range_check_cast_node(i);
 391     if (!useful.member(cast)) {
 392       remove_range_check_cast(cast);
 393     }
 394   }
 395   // Remove useless expensive nodes
 396   for (int i = C->expensive_count()-1; i >= 0; i--) {
 397     Node* n = C->expensive_node(i);
 398     if (!useful.member(n)) {
 399       remove_expensive_node(n);
 400     }
 401   }
 402   // Remove useless Opaque4 nodes
 403   for (int i = opaque4_count() - 1; i >= 0; i--) {
 404     Node* opaq = opaque4_node(i);
 405     if (!useful.member(opaq)) {
 406       remove_opaque4_node(opaq);
 407     }
 408   }
 409   // Remove useless inline type nodes
 410   for (int i = _inline_type_nodes->length() - 1; i >= 0; i--) {
 411     Node* vt = _inline_type_nodes->at(i);
 412     if (!useful.member(vt)) {
 413       _inline_type_nodes->remove(vt);
 414     }
 415   }
 416   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 417   bs->eliminate_useless_gc_barriers(useful, this);
 418   // clean up the late inline lists
 419   remove_useless_late_inlines(&_string_late_inlines, useful);
 420   remove_useless_late_inlines(&_boxing_late_inlines, useful);
 421   remove_useless_late_inlines(&_late_inlines, useful);
 422   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
 423 }
 424 
 425 // ============================================================================
 426 //------------------------------CompileWrapper---------------------------------
 427 class CompileWrapper : public StackObj {
 428   Compile *const _compile;
 429  public:
 430   CompileWrapper(Compile* compile);
 431 
 432   ~CompileWrapper();
 433 };
 434 
 435 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {

 622   // Node list that Iterative GVN will start with
 623   Unique_Node_List for_igvn(comp_arena());
 624   set_for_igvn(&for_igvn);
 625 
 626   // GVN that will be run immediately on new nodes
 627   uint estimated_size = method()->code_size()*4+64;
 628   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
 629   PhaseGVN gvn(node_arena(), estimated_size);
 630   set_initial_gvn(&gvn);
 631 
 632   print_inlining_init();
 633   { // Scope for timing the parser
 634     TracePhase tp("parse", &timers[_t_parser]);
 635 
 636     // Put top into the hash table ASAP.
 637     initial_gvn()->transform_no_reclaim(top());
 638 
 639     // Set up tf(), start(), and find a CallGenerator.
 640     CallGenerator* cg = NULL;
 641     if (is_osr_compilation()) {
 642       init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
 643       StartNode* s = new StartOSRNode(root(), tf()->domain_sig());


 644       initial_gvn()->set_type_bottom(s);
 645       init_start(s);
 646       cg = CallGenerator::for_osr(method(), entry_bci());
 647     } else {
 648       // Normal case.
 649       init_tf(TypeFunc::make(method()));
 650       StartNode* s = new StartNode(root(), tf()->domain_cc());
 651       initial_gvn()->set_type_bottom(s);
 652       init_start(s);
 653       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
 654         // With java.lang.ref.reference.get() we must go through the
 655         // intrinsic - even when get() is the root
 656         // method of the compile - so that, if necessary, the value in
 657         // the referent field of the reference object gets recorded by
 658         // the pre-barrier code.
 659         cg = find_intrinsic(method(), false);
 660       }
 661       if (cg == NULL) {
 662         float past_uses = method()->interpreter_invocation_count();
 663         float expected_uses = past_uses;
 664         cg = CallGenerator::for_inline(method(), expected_uses);
 665       }
 666     }
 667     if (failing())  return;
 668     if (cg == NULL) {
 669       record_method_not_compilable("cannot parse method");
 670       return;

 755     }
 756   }
 757 #endif
 758 
 759 #ifdef ASSERT
 760   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 761   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 762 #endif
 763 
 764   // Dump compilation data to replay it.
 765   if (directive->DumpReplayOption) {
 766     env()->dump_replay_data(_compile_id);
 767   }
 768   if (directive->DumpInlineOption && (ilt() != NULL)) {
 769     env()->dump_inline_data(_compile_id);
 770   }
 771 
 772   // Now that we know the size of all the monitors we can add a fixed slot
 773   // for the original deopt pc.
 774   int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
 775   if (needs_stack_repair()) {
 776     // One extra slot for the special stack increment value
 777     next_slot += 2;
 778   }
 779   set_fixed_slots(next_slot);
 780 
 781   // Compute when to use implicit null checks. Used by matching trap based
 782   // nodes and NullCheck optimization.
 783   set_allowed_deopt_reasons();
 784 
 785   // Now generate code
 786   Code_Gen();
 787 }
 788 
 789 //------------------------------Compile----------------------------------------
 790 // Compile a runtime stub
 791 Compile::Compile( ciEnv* ci_env,
 792                   TypeFunc_generator generator,
 793                   address stub_function,
 794                   const char *stub_name,
 795                   int is_fancy_jump,
 796                   bool pass_tls,
 797                   bool save_arg_registers,
 798                   bool return_pc,

 913   // Create Debug Information Recorder to record scopes, oopmaps, etc.
 914   env()->set_oop_recorder(new OopRecorder(env()->arena()));
 915   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
 916   env()->set_dependencies(new Dependencies(env()));
 917 
 918   _fixed_slots = 0;
 919   set_has_split_ifs(false);
 920   set_has_loops(has_method() && method()->has_loops()); // first approximation
 921   set_has_stringbuilder(false);
 922   set_has_boxed_value(false);
 923   _trap_can_recompile = false;  // no traps emitted yet
 924   _major_progress = true; // start out assuming good things will happen
 925   set_has_unsafe_access(false);
 926   set_max_vector_size(0);
 927   set_clear_upper_avx(false);  //false as default for clear upper bits of ymm registers
 928   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
 929   set_decompile_count(0);
 930 
 931   set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
 932   _loop_opts_cnt = LoopOptsCount;
 933   _has_flattened_accesses = false;
 934   _flattened_accesses_share_alias = true;
 935 
 936   set_do_inlining(Inline);
 937   set_max_inline_size(MaxInlineSize);
 938   set_freq_inline_size(FreqInlineSize);
 939   set_do_scheduling(OptoScheduling);
 940   set_do_count_invocations(false);
 941   set_do_method_data_update(false);
 942 
 943   set_do_vector_loop(false);
 944 
 945   if (AllowVectorizeOnDemand) {
 946     if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
 947       set_do_vector_loop(true);
 948       NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n",  method()->name()->as_quoted_ascii());})
 949     } else if (has_method() && method()->name() != 0 &&
 950                method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
 951       set_do_vector_loop(true);
 952     }
 953   }
 954   set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
 955   NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n",  method()->name()->as_quoted_ascii());})

 999   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1000   {
1001     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1002   }
1003   // Initialize the first few types.
1004   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1005   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1006   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1007   _num_alias_types = AliasIdxRaw+1;
1008   // Zero out the alias type cache.
1009   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1010   // A NULL adr_type hits in the cache right away.  Preload the right answer.
1011   probe_alias_cache(NULL)->_index = AliasIdxTop;
1012 
1013   _intrinsics = NULL;
1014   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1015   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1016   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1017   _range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1018   _opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1019   _inline_type_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1020   register_library_intrinsics();
1021 #ifdef ASSERT
1022   _type_verify_symmetry = true;
1023   _phase_optimize_finished = false;
1024 #endif
1025 }
1026 
1027 //---------------------------init_start----------------------------------------
1028 // Install the StartNode on this compile object.
1029 void Compile::init_start(StartNode* s) {
1030   if (failing())
1031     return; // already failing
1032   assert(s == start(), "");
1033 }
1034 
1035 /**
1036  * Return the 'StartNode'. We must not have a pending failure, since the ideal graph
1037  * can be in an inconsistent state, i.e., we can get segmentation faults when traversing
1038  * the ideal graph.
1039  */

1228 bool Compile::allow_range_check_smearing() const {
1229   // If this method has already thrown a range-check,
1230   // assume it was because we already tried range smearing
1231   // and it failed.
1232   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1233   return !already_trapped;
1234 }
1235 
1236 
1237 //------------------------------flatten_alias_type-----------------------------
1238 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1239   int offset = tj->offset();
1240   TypePtr::PTR ptr = tj->ptr();
1241 
1242   // Known instance (scalarizable allocation) alias only with itself.
1243   bool is_known_inst = tj->isa_oopptr() != NULL &&
1244                        tj->is_oopptr()->is_known_instance();
1245 
1246   // Process weird unsafe references.
1247   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1248     bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass();
1249     assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops");
1250     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1251     tj = TypeOopPtr::BOTTOM;
1252     ptr = tj->ptr();
1253     offset = tj->offset();
1254   }
1255 
1256   // Array pointers need some flattening
1257   const TypeAryPtr *ta = tj->isa_aryptr();
1258   if (ta && ta->is_stable()) {
1259     // Erase stability property for alias analysis.
1260     tj = ta = ta->cast_to_stable(false);
1261   }
1262   if (ta && ta->is_not_flat()) {
1263     // Erase not flat property for alias analysis.
1264     tj = ta = ta->cast_to_not_flat(false);
1265   }
1266   if (ta && ta->is_not_null_free()) {
1267     // Erase not null free property for alias analysis.
1268     tj = ta = ta->cast_to_not_null_free(false);
1269   }
1270 
1271   if( ta && is_known_inst ) {
1272     if ( offset != Type::OffsetBot &&
1273          offset > arrayOopDesc::length_offset_in_bytes() ) {
1274       offset = Type::OffsetBot; // Flatten constant access into array body only
1275       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id());
1276     }
1277   } else if( ta && _AliasLevel >= 2 ) {
1278     // For arrays indexed by constant indices, we flatten the alias
1279     // space to include all of the array body.  Only the header, klass
1280     // and array length can be accessed un-aliased.
1281     // For flattened inline type array, each field has its own slice so
1282     // we must include the field offset.
1283     if( offset != Type::OffsetBot ) {
1284       if( ta->const_oop() ) { // MethodData* or Method*
1285         offset = Type::OffsetBot;   // Flatten constant access into array body
1286         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1287       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1288         // range is OK as-is.
1289         tj = ta = TypeAryPtr::RANGE;
1290       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1291         tj = TypeInstPtr::KLASS; // all klass loads look alike
1292         ta = TypeAryPtr::RANGE; // generic ignored junk
1293         ptr = TypePtr::BotPTR;
1294       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1295         tj = TypeInstPtr::MARK;
1296         ta = TypeAryPtr::RANGE; // generic ignored junk
1297         ptr = TypePtr::BotPTR;
1298       } else {                  // Random constant offset into array body
1299         offset = Type::OffsetBot;   // Flatten constant access into array body
1300         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1301       }
1302     }
1303     // Arrays of fixed size alias with arrays of unknown size.
1304     if (ta->size() != TypeInt::POS) {
1305       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1306       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
1307     }
1308     // Arrays of known objects become arrays of unknown objects.
1309     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1310       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1311       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1312     }
1313     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1314       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1315       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
1316     }
1317     // Initially all flattened array accesses share a single slice
1318     if (ta->is_flat() && ta->elem() != TypeInlineType::BOTTOM && _flattened_accesses_share_alias) {
1319       const TypeAry *tary = TypeAry::make(TypeInlineType::BOTTOM, ta->size());
1320       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1321     }
1322     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1323     // cannot be distinguished by bytecode alone.
1324     if (ta->elem() == TypeInt::BOOL) {
1325       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1326       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1327       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1328     }
1329     // During the 2nd round of IterGVN, NotNull castings are removed.
1330     // Make sure the Bottom and NotNull variants alias the same.
1331     // Also, make sure exact and non-exact variants alias the same.
1332     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
1333       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
1334     }
1335   }
1336 
1337   // Oop pointers need some flattening
1338   const TypeInstPtr *to = tj->isa_instptr();
1339   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1340     ciInstanceKlass *k = to->klass()->as_instance_klass();
1341     if( ptr == TypePtr::Constant ) {
1342       if (to->klass() != ciEnv::current()->Class_klass() ||
1343           offset < k->size_helper() * wordSize) {
1344         // No constant oop pointers (such as Strings); they alias with
1345         // unknown strings.
1346         assert(!is_known_inst, "not scalarizable allocation");
1347         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
1348       }
1349     } else if( is_known_inst ) {
1350       tj = to; // Keep NotNull and klass_is_exact for instance type
1351     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1352       // During the 2nd round of IterGVN, NotNull castings are removed.
1353       // Make sure the Bottom and NotNull variants alias the same.
1354       // Also, make sure exact and non-exact variants alias the same.
1355       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
1356     }
1357     if (to->speculative() != NULL) {
1358       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->klass()->flatten_array(), to->instance_id());
1359     }
1360     // Canonicalize the holder of this field
1361     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1362       // First handle header references such as a LoadKlassNode, even if the
1363       // object's klass is unloaded at compile time (4965979).
1364       if (!is_known_inst) { // Do it only for non-instance types
1365         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
1366       }
1367     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1368       // Static fields are in the space above the normal instance
1369       // fields in the java.lang.Class instance.
1370       if (to->klass() != ciEnv::current()->Class_klass()) {
1371         to = NULL;
1372         tj = TypeOopPtr::BOTTOM;
1373         offset = tj->offset();
1374       }
1375     } else {
1376       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1377       if (!k->equals(canonical_holder) || tj->offset() != offset) {
1378         if( is_known_inst ) {
1379           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), canonical_holder->flatten_array(), to->instance_id());
1380         } else {
1381           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
1382         }
1383       }
1384     }
1385   }
1386 
1387   // Klass pointers to object array klasses need some flattening
1388   const TypeKlassPtr *tk = tj->isa_klassptr();
1389   if( tk ) {
1390     // If we are referencing a field within a Klass, we need
1391     // to assume the worst case of an Object.  Both exact and
1392     // inexact types must flatten to the same alias class so
1393     // use NotNull as the PTR.
1394     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1395 
1396       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1397                                    TypeKlassPtr::OBJECT->klass(),
1398                                    Type::Offset(offset));
1399     }
1400 
1401     ciKlass* klass = tk->klass();
1402     if (klass != NULL && klass->is_obj_array_klass()) {
1403       ciKlass* k = TypeAryPtr::OOPS->klass();
1404       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1405         k = TypeInstPtr::BOTTOM->klass();
1406       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset));
1407     }
1408 
1409     // Check for precise loads from the primary supertype array and force them
1410     // to the supertype cache alias index.  Check for generic array loads from
1411     // the primary supertype array and also force them to the supertype cache
1412     // alias index.  Since the same load can reach both, we need to merge
1413     // these 2 disparate memories into the same alias class.  Since the
1414     // primary supertype array is read-only, there's no chance of confusion
1415     // where we bypass an array load and an array store.
1416     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1417     if (offset == Type::OffsetBot ||
1418         (offset >= primary_supers_offset &&
1419          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1420         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1421       offset = in_bytes(Klass::secondary_super_cache_offset());
1422       tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset));
1423     }
1424   }
1425 
1426   // Flatten all Raw pointers together.
1427   if (tj->base() == Type::RawPtr)
1428     tj = TypeRawPtr::BOTTOM;
1429 
1430   if (tj->base() == Type::AnyPtr)
1431     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1432 
1433   // Flatten all to bottom for now
1434   switch( _AliasLevel ) {
1435   case 0:
1436     tj = TypePtr::BOTTOM;
1437     break;
1438   case 1:                       // Flatten to: oop, static, field or array
1439     switch (tj->base()) {
1440     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1441     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1442     case Type::AryPtr:   // do not distinguish arrays at all

1541   intptr_t key = (intptr_t) adr_type;
1542   key ^= key >> logAliasCacheSize;
1543   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1544 }
1545 
1546 
1547 //-----------------------------grow_alias_types--------------------------------
1548 void Compile::grow_alias_types() {
1549   const int old_ats  = _max_alias_types; // how many before?
1550   const int new_ats  = old_ats;          // how many more?
1551   const int grow_ats = old_ats+new_ats;  // how many now?
1552   _max_alias_types = grow_ats;
1553   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1554   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1555   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1556   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1557 }
1558 
1559 
1560 //--------------------------------find_alias_type------------------------------
1561 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1562   if (_AliasLevel == 0)
1563     return alias_type(AliasIdxBot);
1564 
1565   AliasCacheEntry* ace = NULL;
1566   if (!uncached) {
1567     ace = probe_alias_cache(adr_type);
1568     if (ace->_adr_type == adr_type) {
1569       return alias_type(ace->_index);
1570     }
1571   }
1572 
1573   // Handle special cases.
1574   if (adr_type == NULL)             return alias_type(AliasIdxTop);
1575   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1576 
1577   // Do it the slow way.
1578   const TypePtr* flat = flatten_alias_type(adr_type);
1579 
1580 #ifdef ASSERT
1581   {
1582     ResourceMark rm;
1583     assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1584            Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1585     assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1586            Type::str(adr_type));
1587     if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1588       const TypeOopPtr* foop = flat->is_oopptr();
1589       // Scalarizable allocations have exact klass always.
1590       bool exact = !foop->klass_is_exact() || foop->is_known_instance();

1600     if (alias_type(i)->adr_type() == flat) {
1601       idx = i;
1602       break;
1603     }
1604   }
1605 
1606   if (idx == AliasIdxTop) {
1607     if (no_create)  return NULL;
1608     // Grow the array if necessary.
1609     if (_num_alias_types == _max_alias_types)  grow_alias_types();
1610     // Add a new alias type.
1611     idx = _num_alias_types++;
1612     _alias_types[idx]->Init(idx, flat);
1613     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1614     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1615     if (flat->isa_instptr()) {
1616       if (flat->offset() == java_lang_Class::klass_offset()
1617           && flat->is_instptr()->klass() == env()->Class_klass())
1618         alias_type(idx)->set_rewritable(false);
1619     }
1620     ciField* field = NULL;
1621     if (flat->isa_aryptr()) {
1622 #ifdef ASSERT
1623       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1624       // (T_BYTE has the weakest alignment and size restrictions...)
1625       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1626 #endif
1627       const Type* elemtype = flat->is_aryptr()->elem();
1628       if (flat->offset() == TypePtr::OffsetBot) {
1629         alias_type(idx)->set_element(elemtype);
1630       }
1631       int field_offset = flat->is_aryptr()->field_offset().get();
1632       if (elemtype->isa_inlinetype() &&
1633           elemtype->inline_klass() != NULL &&
1634           field_offset != Type::OffsetBot) {
1635         ciInlineKlass* vk = elemtype->inline_klass();
1636         field_offset += vk->first_field_offset();
1637         field = vk->get_field_by_offset(field_offset, false);
1638       }
1639     }
1640     if (flat->isa_klassptr()) {
1641       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1642         alias_type(idx)->set_rewritable(false);
1643       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1644         alias_type(idx)->set_rewritable(false);
1645       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1646         alias_type(idx)->set_rewritable(false);
1647       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1648         alias_type(idx)->set_rewritable(false);
1649       if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1650         alias_type(idx)->set_rewritable(false);
1651       if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1652         alias_type(idx)->set_rewritable(false);
1653     }
1654     // %%% (We would like to finalize JavaThread::threadObj_offset(),
1655     // but the base pointer type is not distinctive enough to identify
1656     // references into JavaThread.)
1657 
1658     // Check for final fields.
1659     const TypeInstPtr* tinst = flat->isa_instptr();
1660     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {

1661       if (tinst->const_oop() != NULL &&
1662           tinst->klass() == ciEnv::current()->Class_klass() &&
1663           tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1664         // static field
1665         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1666         field = k->get_field_by_offset(tinst->offset(), true);
1667       } else if (tinst->klass()->is_inlinetype()) {
1668         // Inline type field
1669         ciInlineKlass* vk = tinst->inline_klass();
1670         field = vk->get_field_by_offset(tinst->offset(), false);
1671       } else {
1672         ciInstanceKlass* k = tinst->klass()->as_instance_klass();
1673         field = k->get_field_by_offset(tinst->offset(), false);
1674       }
1675     }
1676     assert(field == NULL ||
1677            original_field == NULL ||
1678            (field->holder() == original_field->holder() &&
1679             field->offset() == original_field->offset() &&
1680             field->is_static() == original_field->is_static()), "wrong field?");
1681     // Set field() and is_rewritable() attributes.
1682     if (field != NULL) {
1683       alias_type(idx)->set_field(field);
1684       if (flat->isa_aryptr()) {
1685         // Fields of flat arrays are rewritable although they are declared final
1686         assert(flat->is_aryptr()->is_flat(), "must be a flat array");
1687         alias_type(idx)->set_rewritable(true);
1688       }
1689     }
1690   }
1691 
1692   // Fill the cache for next time.
1693   if (!uncached) {
1694     ace->_adr_type = adr_type;
1695     ace->_index    = idx;
1696     assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1697 
1698     // Might as well try to fill the cache for the flattened version, too.
1699     AliasCacheEntry* face = probe_alias_cache(flat);
1700     if (face->_adr_type == NULL) {
1701       face->_adr_type = flat;
1702       face->_index    = idx;
1703       assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1704     }
1705   }
1706 
1707   return alias_type(idx);
1708 }
1709 
1710 
1711 Compile::AliasType* Compile::alias_type(ciField* field) {
1712   const TypeOopPtr* t;
1713   if (field->is_static())
1714     t = TypeInstPtr::make(field->holder()->java_mirror());
1715   else
1716     t = TypeOopPtr::make_from_klass_raw(field->holder());
1717   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1718   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1719   return atp;
1720 }
1721 
1722 
1723 //------------------------------have_alias_type--------------------------------
1724 bool Compile::have_alias_type(const TypePtr* adr_type) {

1846   }
1847   assert(range_check_cast_count() == 0, "should be empty");
1848 }
1849 
1850 void Compile::add_opaque4_node(Node* n) {
1851   assert(n->Opcode() == Op_Opaque4, "Opaque4 only");
1852   assert(!_opaque4_nodes->contains(n), "duplicate entry in Opaque4 list");
1853   _opaque4_nodes->append(n);
1854 }
1855 
1856 // Remove all Opaque4 nodes.
1857 void Compile::remove_opaque4_nodes(PhaseIterGVN &igvn) {
1858   for (int i = opaque4_count(); i > 0; i--) {
1859     Node* opaq = opaque4_node(i-1);
1860     assert(opaq->Opcode() == Op_Opaque4, "Opaque4 only");
1861     igvn.replace_node(opaq, opaq->in(2));
1862   }
1863   assert(opaque4_count() == 0, "should be empty");
1864 }
1865 
1866 void Compile::add_inline_type(Node* n) {
1867   assert(n->is_InlineTypeBase(), "unexpected node");
1868   if (_inline_type_nodes != NULL) {
1869     _inline_type_nodes->push(n);
1870   }
1871 }
1872 
1873 void Compile::remove_inline_type(Node* n) {
1874   assert(n->is_InlineTypeBase(), "unexpected node");
1875   if (_inline_type_nodes != NULL && _inline_type_nodes->contains(n)) {
1876     _inline_type_nodes->remove(n);
1877   }
1878 }
1879 
1880 // Does the return value keep otherwise useless inline type allocations alive?
1881 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1882   ResourceMark rm;
1883   Unique_Node_List wq;
1884   wq.push(ret_val);
1885   bool some_allocations = false;
1886   for (uint i = 0; i < wq.size(); i++) {
1887     Node* n = wq.at(i);
1888     assert(!n->is_InlineType(), "chain of inline type nodes");
1889     if (n->outcnt() > 1) {
1890       // Some other use for the allocation
1891       return false;
1892     } else if (n->is_InlineTypePtr()) {
1893       wq.push(n->in(1));
1894     } else if (n->is_Phi()) {
1895       for (uint j = 1; j < n->req(); j++) {
1896         wq.push(n->in(j));
1897       }
1898     } else if (n->is_CheckCastPP() &&
1899                n->in(1)->is_Proj() &&
1900                n->in(1)->in(0)->is_Allocate()) {
1901       some_allocations = true;
1902     }
1903   }
1904   return some_allocations;
1905 }
1906 
1907 void Compile::process_inline_types(PhaseIterGVN &igvn, bool post_ea) {
1908   // Make inline types scalar in safepoints
1909   for (int i = _inline_type_nodes->length()-1; i >= 0; i--) {
1910     InlineTypeBaseNode* vt = _inline_type_nodes->at(i)->as_InlineTypeBase();
1911     vt->make_scalar_in_safepoints(&igvn);
1912   }
1913   // Remove InlineTypePtr nodes only after EA to give scalar replacement a chance
1914   // to remove buffer allocations. InlineType nodes are kept until loop opts and
1915   // removed via InlineTypeNode::remove_redundant_allocations.
1916   if (post_ea) {
1917     while (_inline_type_nodes->length() > 0) {
1918       InlineTypeBaseNode* vt = _inline_type_nodes->pop()->as_InlineTypeBase();
1919       if (vt->is_InlineTypePtr()) {
1920         igvn.replace_node(vt, vt->get_oop());
1921       }
1922     }
1923   }
1924   // Make sure that the return value does not keep an unused allocation alive
1925   if (tf()->returns_inline_type_as_fields()) {
1926     Node* ret = NULL;
1927     for (uint i = 1; i < root()->req(); i++){
1928       Node* in = root()->in(i);
1929       if (in->Opcode() == Op_Return) {
1930         assert(ret == NULL, "only one return");
1931         ret = in;
1932       }
1933     }
1934     if (ret != NULL) {
1935       Node* ret_val = ret->in(TypeFunc::Parms);
1936       if (igvn.type(ret_val)->isa_oopptr() &&
1937           return_val_keeps_allocations_alive(ret_val)) {
1938         igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
1939         assert(ret_val->outcnt() == 0, "should be dead now");
1940         igvn.remove_dead_node(ret_val);
1941       }
1942     }
1943   }
1944   igvn.optimize();
1945 }
1946 
1947 void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
1948   if (!_has_flattened_accesses) {
1949     return;
1950   }
1951   // Initially, all flattened array accesses share the same slice to
1952   // keep dependencies with Object[] array accesses (that could be
1953   // to a flattened array) correct. We're done with parsing so we
1954   // now know all flattened array accesses in this compile
1955   // unit. Let's move flattened array accesses to their own slice,
1956   // one per element field. This should help memory access
1957   // optimizations.
1958   ResourceMark rm;
1959   Unique_Node_List wq;
1960   wq.push(root());
1961 
1962   Node_List mergememnodes;
1963   Node_List memnodes;
1964 
1965   // Alias index currently shared by all flattened memory accesses
1966   int index = get_alias_index(TypeAryPtr::INLINES);
1967 
1968   // Find MergeMem nodes and flattened array accesses
1969   for (uint i = 0; i < wq.size(); i++) {
1970     Node* n = wq.at(i);
1971     if (n->is_Mem()) {
1972       const TypePtr* adr_type = NULL;
1973       if (n->Opcode() == Op_StoreCM) {
1974         adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
1975       } else {
1976         adr_type = get_adr_type(get_alias_index(n->adr_type()));
1977       }
1978       if (adr_type == TypeAryPtr::INLINES) {
1979         memnodes.push(n);
1980       }
1981     } else if (n->is_MergeMem()) {
1982       MergeMemNode* mm = n->as_MergeMem();
1983       if (mm->memory_at(index) != mm->base_memory()) {
1984         mergememnodes.push(n);
1985       }
1986     }
1987     for (uint j = 0; j < n->req(); j++) {
1988       Node* m = n->in(j);
1989       if (m != NULL) {
1990         wq.push(m);
1991       }
1992     }
1993   }
1994 
1995   if (memnodes.size() > 0) {
1996     _flattened_accesses_share_alias = false;
1997 
1998     // We are going to change the slice for the flattened array
1999     // accesses so we need to clear the cache entries that refer to
2000     // them.
2001     for (uint i = 0; i < AliasCacheSize; i++) {
2002       AliasCacheEntry* ace = &_alias_cache[i];
2003       if (ace->_adr_type != NULL &&
2004           ace->_adr_type->isa_aryptr() &&
2005           ace->_adr_type->is_aryptr()->is_flat()) {
2006         ace->_adr_type = NULL;
2007         ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the NULL adr_type resolves to AliasIdxTop
2008       }
2009     }
2010 
2011     // Find what aliases we are going to add
2012     int start_alias = num_alias_types()-1;
2013     int stop_alias = 0;
2014 
2015     for (uint i = 0; i < memnodes.size(); i++) {
2016       Node* m = memnodes.at(i);
2017       const TypePtr* adr_type = NULL;
2018       if (m->Opcode() == Op_StoreCM) {
2019         adr_type = m->in(MemNode::OopStore)->adr_type();
2020         Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
2021                                       m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
2022                                       get_alias_index(adr_type));
2023         igvn.register_new_node_with_optimizer(clone);
2024         igvn.replace_node(m, clone);
2025       } else {
2026         adr_type = m->adr_type();
2027 #ifdef ASSERT
2028         m->as_Mem()->set_adr_type(adr_type);
2029 #endif
2030       }
2031       int idx = get_alias_index(adr_type);
2032       start_alias = MIN2(start_alias, idx);
2033       stop_alias = MAX2(stop_alias, idx);
2034     }
2035 
2036     assert(stop_alias >= start_alias, "should have expanded aliases");
2037 
2038     Node_Stack stack(0);
2039 #ifdef ASSERT
2040     VectorSet seen(Thread::current()->resource_area());
2041 #endif
2042     // Now let's fix the memory graph so each flattened array access
2043     // is moved to the right slice. Start from the MergeMem nodes.
2044     uint last = unique();
2045     for (uint i = 0; i < mergememnodes.size(); i++) {
2046       MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2047       Node* n = current->memory_at(index);
2048       MergeMemNode* mm = NULL;
2049       do {
2050         // Follow memory edges through memory accesses, phis and
2051         // narrow membars and push nodes on the stack. Once we hit
2052         // bottom memory, we pop element off the stack one at a
2053         // time, in reverse order, and move them to the right slice
2054         // by changing their memory edges.
2055         if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2056           assert(!seen.test_set(n->_idx), "");
2057           // Uses (a load for instance) will need to be moved to the
2058           // right slice as well and will get a new memory state
2059           // that we don't know yet. The use could also be the
2060           // backedge of a loop. We put a place holder node between
2061           // the memory node and its uses. We replace that place
2062           // holder with the correct memory state once we know it,
2063           // i.e. when nodes are popped off the stack. Using the
2064           // place holder make the logic work in the presence of
2065           // loops.
2066           if (n->outcnt() > 1) {
2067             Node* place_holder = NULL;
2068             assert(!n->has_out_with(Op_Node), "");
2069             for (DUIterator k = n->outs(); n->has_out(k); k++) {
2070               Node* u = n->out(k);
2071               if (u != current && u->_idx < last) {
2072                 bool success = false;
2073                 for (uint l = 0; l < u->req(); l++) {
2074                   if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2075                     continue;
2076                   }
2077                   Node* in = u->in(l);
2078                   if (in == n) {
2079                     if (place_holder == NULL) {
2080                       place_holder = new Node(1);
2081                       place_holder->init_req(0, n);
2082                     }
2083                     igvn.replace_input_of(u, l, place_holder);
2084                     success = true;
2085                   }
2086                 }
2087                 if (success) {
2088                   --k;
2089                 }
2090               }
2091             }
2092           }
2093           if (n->is_Phi()) {
2094             stack.push(n, 1);
2095             n = n->in(1);
2096           } else if (n->is_Mem()) {
2097             stack.push(n, n->req());
2098             n = n->in(MemNode::Memory);
2099           } else {
2100             assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2101             stack.push(n, n->req());
2102             n = n->in(0)->in(TypeFunc::Memory);
2103           }
2104         } else {
2105           assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2106           // Build a new MergeMem node to carry the new memory state
2107           // as we build it. IGVN should fold extraneous MergeMem
2108           // nodes.
2109           mm = MergeMemNode::make(n);
2110           igvn.register_new_node_with_optimizer(mm);
2111           while (stack.size() > 0) {
2112             Node* m = stack.node();
2113             uint idx = stack.index();
2114             if (m->is_Mem()) {
2115               // Move memory node to its new slice
2116               const TypePtr* adr_type = m->adr_type();
2117               int alias = get_alias_index(adr_type);
2118               Node* prev = mm->memory_at(alias);
2119               igvn.replace_input_of(m, MemNode::Memory, prev);
2120               mm->set_memory_at(alias, m);
2121             } else if (m->is_Phi()) {
2122               // We need as many new phis as there are new aliases
2123               igvn.replace_input_of(m, idx, mm);
2124               if (idx == m->req()-1) {
2125                 Node* r = m->in(0);
2126                 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2127                   const Type* adr_type = get_adr_type(j);
2128                   if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
2129                     continue;
2130                   }
2131                   Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2132                   igvn.register_new_node_with_optimizer(phi);
2133                   for (uint k = 1; k < m->req(); k++) {
2134                     phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2135                   }
2136                   mm->set_memory_at(j, phi);
2137                 }
2138                 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2139                 igvn.register_new_node_with_optimizer(base_phi);
2140                 for (uint k = 1; k < m->req(); k++) {
2141                   base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2142                 }
2143                 mm->set_base_memory(base_phi);
2144               }
2145             } else {
2146               // This is a MemBarCPUOrder node from
2147               // Parse::array_load()/Parse::array_store(), in the
2148               // branch that handles flattened arrays hidden under
2149               // an Object[] array. We also need one new membar per
2150               // new alias to keep the unknown access that the
2151               // membars protect properly ordered with accesses to
2152               // known flattened array.
2153               assert(m->is_Proj(), "projection expected");
2154               Node* ctrl = m->in(0)->in(TypeFunc::Control);
2155               igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2156               for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2157                 const Type* adr_type = get_adr_type(j);
2158                 if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
2159                   continue;
2160                 }
2161                 MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
2162                 igvn.register_new_node_with_optimizer(mb);
2163                 Node* mem = mm->memory_at(j);
2164                 mb->init_req(TypeFunc::Control, ctrl);
2165                 mb->init_req(TypeFunc::Memory, mem);
2166                 ctrl = new ProjNode(mb, TypeFunc::Control);
2167                 igvn.register_new_node_with_optimizer(ctrl);
2168                 mem = new ProjNode(mb, TypeFunc::Memory);
2169                 igvn.register_new_node_with_optimizer(mem);
2170                 mm->set_memory_at(j, mem);
2171               }
2172               igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2173             }
2174             if (idx < m->req()-1) {
2175               idx += 1;
2176               stack.set_index(idx);
2177               n = m->in(idx);
2178               break;
2179             }
2180             // Take care of place holder nodes
2181             if (m->has_out_with(Op_Node)) {
2182               Node* place_holder = m->find_out_with(Op_Node);
2183               if (place_holder != NULL) {
2184                 Node* mm_clone = mm->clone();
2185                 igvn.register_new_node_with_optimizer(mm_clone);
2186                 Node* hook = new Node(1);
2187                 hook->init_req(0, mm);
2188                 igvn.replace_node(place_holder, mm_clone);
2189                 hook->destruct();
2190               }
2191               assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2192             }
2193             stack.pop();
2194           }
2195         }
2196       } while(stack.size() > 0);
2197       // Fix the memory state at the MergeMem we started from
2198       igvn.rehash_node_delayed(current);
2199       for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2200         const Type* adr_type = get_adr_type(j);
2201         if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
2202           continue;
2203         }
2204         current->set_memory_at(j, mm);
2205       }
2206       current->set_memory_at(index, current->base_memory());
2207     }
2208     igvn.optimize();
2209   }
2210   print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2211 }
2212 
2213 
2214 // StringOpts and late inlining of string methods
2215 void Compile::inline_string_calls(bool parse_time) {
2216   {
2217     // remove useless nodes to make the usage analysis simpler
2218     ResourceMark rm;
2219     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
2220   }
2221 
2222   {
2223     ResourceMark rm;
2224     print_method(PHASE_BEFORE_STRINGOPTS, 3);
2225     PhaseStringOpts pso(initial_gvn(), for_igvn());
2226     print_method(PHASE_AFTER_STRINGOPTS, 3);
2227   }
2228 
2229   // now inline anything that we skipped the first time around
2230   if (!parse_time) {
2231     _late_inlines_pos = _late_inlines.length();
2232   }
2233 

2473   remove_speculative_types(igvn);
2474 
2475   // No more new expensive nodes will be added to the list from here
2476   // so keep only the actual candidates for optimizations.
2477   cleanup_expensive_nodes(igvn);
2478 
2479   if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2480     Compile::TracePhase tp("", &timers[_t_renumberLive]);
2481     initial_gvn()->replace_with(&igvn);
2482     for_igvn()->clear();
2483     Unique_Node_List new_worklist(C->comp_arena());
2484     {
2485       ResourceMark rm;
2486       PhaseRenumberLive prl = PhaseRenumberLive(initial_gvn(), for_igvn(), &new_worklist);
2487     }
2488     set_for_igvn(&new_worklist);
2489     igvn = PhaseIterGVN(initial_gvn());
2490     igvn.optimize();
2491   }
2492 
2493   if (_inline_type_nodes->length() > 0) {
2494     // Do this once all inlining is over to avoid getting inconsistent debug info
2495     process_inline_types(igvn);
2496   }
2497 
2498   adjust_flattened_array_access_aliases(igvn);
2499 
2500   // Perform escape analysis
2501   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
2502     if (has_loops()) {
2503       // Cleanup graph (remove dead nodes).
2504       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2505       PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2506       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2507       if (failing())  return;
2508     }
2509     ConnectionGraph::do_analysis(this, &igvn);
2510 
2511     if (failing())  return;
2512 
2513     // Optimize out fields loads from scalar replaceable allocations.
2514     igvn.optimize();
2515     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
2516 
2517     if (failing())  return;
2518 
2519     if (congraph() != NULL && macro_count() > 0) {
2520       TracePhase tp("macroEliminate", &timers[_t_macroEliminate]);
2521       PhaseMacroExpand mexp(igvn);
2522       mexp.eliminate_macro_nodes();
2523       igvn.set_delay_transform(false);
2524 
2525       igvn.optimize();
2526       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
2527 
2528       if (failing())  return;
2529     }
2530   }
2531 
2532   if (_inline_type_nodes->length() > 0) {
2533     // Process inline types again now that EA might have simplified the graph
2534     process_inline_types(igvn, /* post_ea= */ true);
2535   }
2536 
2537   // Loop transforms on the ideal graph.  Range Check Elimination,
2538   // peeling, unrolling, etc.
2539 
2540   // Set loop opts counter
2541   if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
2542     {
2543       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2544       PhaseIdealLoop::optimize(igvn, LoopOptsDefault);
2545       _loop_opts_cnt--;
2546       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
2547       if (failing())  return;
2548     }
2549     // Loop opts pass if partial peeling occurred in previous pass
2550     if(PartialPeelLoop && major_progress() && (_loop_opts_cnt > 0)) {
2551       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2552       PhaseIdealLoop::optimize(igvn, LoopOptsSkipSplitIf);
2553       _loop_opts_cnt--;
2554       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
2555       if (failing())  return;
2556     }

3159             // Accumulate any precedence edges
3160             if (mem->in(i) != NULL) {
3161               n->add_prec(mem->in(i));
3162             }
3163           }
3164           // Everything above this point has been processed.
3165           done = true;
3166         }
3167         // Eliminate the previous StoreCM
3168         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
3169         assert(mem->outcnt() == 0, "should be dead");
3170         mem->disconnect_inputs(NULL, this);
3171       } else {
3172         prev = mem;
3173       }
3174       mem = prev->in(MemNode::Memory);
3175     }
3176   }
3177 }
3178 
3179 
3180 //------------------------------final_graph_reshaping_impl----------------------
3181 // Implement items 1-5 from final_graph_reshaping below.
3182 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
3183 
3184   if ( n->outcnt() == 0 ) return; // dead node
3185   uint nop = n->Opcode();
3186 
3187   // Check for 2-input instruction with "last use" on right input.
3188   // Swap to left input.  Implements item (2).
3189   if( n->req() == 3 &&          // two-input instruction
3190       n->in(1)->outcnt() > 1 && // left use is NOT a last use
3191       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
3192       n->in(2)->outcnt() == 1 &&// right use IS a last use
3193       !n->in(2)->is_Con() ) {   // right use is not a constant
3194     // Check for commutative opcode
3195     switch( nop ) {
3196     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
3197     case Op_MaxI:  case Op_MinI:
3198     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
3199     case Op_AndL:  case Op_XorL:  case Op_OrL:

3898           // Replace all nodes with identical edges as m with m
3899           k->subsume_by(m, this);
3900         }
3901       }
3902     }
3903     break;
3904   }
3905   case Op_CmpUL: {
3906     if (!Matcher::has_match_rule(Op_CmpUL)) {
3907       // No support for unsigned long comparisons
3908       ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3909       Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3910       Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3911       ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3912       Node* andl = new AndLNode(orl, remove_sign_mask);
3913       Node* cmp = new CmpLNode(andl, n->in(2));
3914       n->subsume_by(cmp, this);
3915     }
3916     break;
3917   }
3918 #ifdef ASSERT
3919   case Op_InlineTypePtr:
3920   case Op_InlineType: {
3921     n->dump(-1);
3922     assert(false, "inline type node was not removed");
3923     break;
3924   }
3925 #endif
3926   default:
3927     assert(!n->is_Call(), "");
3928     assert(!n->is_Mem(), "");
3929     assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3930     break;
3931   }
3932 }
3933 
3934 //------------------------------final_graph_reshaping_walk---------------------
3935 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3936 // requires that the walk visits a node's inputs before visiting the node.
3937 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
3938   Unique_Node_List sfpt;
3939 
3940   frc._visited.set(root->_idx); // first, mark node as visited
3941   uint cnt = root->req();
3942   Node *n = root;
3943   uint  i = 0;
3944   while (true) {
3945     if (i < cnt) {

4253   }
4254 }
4255 
4256 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4257   return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4258 }
4259 
4260 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4261   return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4262 }
4263 
4264 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4265   if (holder->is_initialized()) {
4266     return false;
4267   }
4268   if (holder->is_being_initialized()) {
4269     if (accessing_method->holder() == holder) {
4270       // Access inside a class. The barrier can be elided when access happens in <clinit>,
4271       // <init>, or a static method. In all those cases, there was an initialization
4272       // barrier on the holder klass passed.
4273       if (accessing_method->is_class_initializer() ||
4274           accessing_method->is_object_constructor() ||
4275           accessing_method->is_static()) {
4276         return false;
4277       }
4278     } else if (accessing_method->holder()->is_subclass_of(holder)) {
4279       // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4280       // In case of <init> or a static method, the barrier is on the subclass is not enough:
4281       // child class can become fully initialized while its parent class is still being initialized.
4282       if (accessing_method->is_class_initializer()) {
4283         return false;
4284       }
4285     }
4286     ciMethod* root = method(); // the root method of compilation
4287     if (root != accessing_method) {
4288       return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4289     }
4290   }
4291   return true;
4292 }
4293 
4294 #ifndef PRODUCT
4295 //------------------------------verify_graph_edges---------------------------
4296 // Walk the Graph and verify that there is a one-to-one correspondence
4297 // between Use-Def edges and Def-Use edges in the graph.
4298 void Compile::verify_graph_edges(bool no_dead_code) {
4299   if (VerifyGraphEdges) {
4300     Unique_Node_List visited;
4301     // Call recursive graph walk to check edges
4302     _root->verify_edges(visited);

4383                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
4384   }
4385 
4386   if (VerifyIdealNodeCount) {
4387     Compile::current()->print_missing_nodes();
4388   }
4389 #endif
4390 
4391   if (_log != NULL) {
4392     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
4393   }
4394 }
4395 
4396 //----------------------------static_subtype_check-----------------------------
4397 // Shortcut important common cases when superklass is exact:
4398 // (0) superklass is java.lang.Object (can occur in reflective code)
4399 // (1) subklass is already limited to a subtype of superklass => always ok
4400 // (2) subklass does not overlap with superklass => always fail
4401 // (3) superklass has NO subtypes and we can check with a simple compare.
4402 int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
4403   if (StressReflectiveCode || superk == NULL || subk == NULL) {
4404     return SSC_full_test;       // Let caller generate the general case.
4405   }
4406 
4407   if (superk == env()->Object_klass()) {
4408     return SSC_always_true;     // (0) this test cannot fail
4409   }
4410 
4411   ciType* superelem = superk;
4412   if (superelem->is_array_klass()) {
4413     ciArrayKlass* ak = superelem->as_array_klass();
4414     superelem = superelem->as_array_klass()->base_element_type();
4415   }
4416 
4417   if (!subk->is_interface()) {  // cannot trust static interface types yet
4418     if (subk->is_subtype_of(superk)) {
4419       return SSC_always_true;   // (1) false path dead; no dynamic test needed
4420     }
4421     if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) &&
4422         !superk->is_subtype_of(subk)) {
4423       return SSC_always_false;
4424     }
4425   }
4426 
4427   // If casting to an instance klass, it must have no subtypes
4428   if (superk->is_interface()) {
4429     // Cannot trust interfaces yet.
4430     // %%% S.B. superk->nof_implementors() == 1
4431   } else if (superelem->is_instance_klass()) {
4432     ciInstanceKlass* ik = superelem->as_instance_klass();
4433     if (!ik->has_subklass() && !ik->is_interface()) {
4434       if (!ik->is_final()) {
4435         // Add a dependency if there is a chance of a later subclass.

4856     for (uint next = 0; next < worklist.size(); ++next) {
4857       Node *n  = worklist.at(next);
4858       const Type* t = igvn.type_or_null(n);
4859       assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types");
4860       if (n->is_Type()) {
4861         t = n->as_Type()->type();
4862         assert(t == t->remove_speculative(), "no more speculative types");
4863       }
4864       uint max = n->len();
4865       for( uint i = 0; i < max; ++i ) {
4866         Node *m = n->in(i);
4867         if (not_a_node(m))  continue;
4868         worklist.push(m);
4869       }
4870     }
4871     igvn.check_no_speculative_types();
4872 #endif
4873   }
4874 }
4875 
4876 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
4877   const TypeInstPtr* ta = phase->type(a)->isa_instptr();
4878   const TypeInstPtr* tb = phase->type(b)->isa_instptr();
4879   if (!EnableValhalla || ta == NULL || tb == NULL ||
4880       ta->is_zero_type() || tb->is_zero_type() ||
4881       !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
4882     // Use old acmp if one operand is null or not an inline type
4883     return new CmpPNode(a, b);
4884   } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
4885     // We know that one operand is an inline type. Therefore,
4886     // new acmp will only return true if both operands are NULL.
4887     // Check if both operands are null by or'ing the oops.
4888     a = phase->transform(new CastP2XNode(NULL, a));
4889     b = phase->transform(new CastP2XNode(NULL, b));
4890     a = phase->transform(new OrXNode(a, b));
4891     return new CmpXNode(a, phase->MakeConX(0));
4892   }
4893   // Use new acmp
4894   return NULL;
4895 }
4896 
4897 // Auxiliary method to support randomized stressing/fuzzing.
4898 //
4899 // This method can be called the arbitrary number of times, with current count
4900 // as the argument. The logic allows selecting a single candidate from the
4901 // running list of candidates as follows:
4902 //    int count = 0;
4903 //    Cand* selected = null;
4904 //    while(cand = cand->next()) {
4905 //      if (randomized_select(++count)) {
4906 //        selected = cand;
4907 //      }
4908 //    }
4909 //
4910 // Including count equalizes the chances any candidate is "selected".
4911 // This is useful when we don't have the complete list of candidates to choose
4912 // from uniformly. In this case, we need to adjust the randomicity of the
4913 // selection, or else we will end up biasing the selection towards the latter
4914 // candidates.
4915 //
4916 // Quick back-envelope calculation shows that for the list of n candidates
< prev index next >