< prev index next >

src/hotspot/share/opto/memnode.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "classfile/javaClasses.hpp"

  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "opto/addnode.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/cfgnode.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/connode.hpp"
  38 #include "opto/convertnode.hpp"

  39 #include "opto/loopnode.hpp"
  40 #include "opto/machnode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/phaseX.hpp"
  46 #include "opto/regmask.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "utilities/align.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/macros.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 #include "utilities/vmError.hpp"
  53 
  54 // Portions of code courtesy of Clifford Click
  55 
  56 // Optimization - Graph Style
  57 
  58 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);

 220       // clone the Phi with our address type
 221       result = mphi->split_out_instance(t_adr, igvn);
 222     } else {
 223       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 224     }
 225   }
 226   return result;
 227 }
 228 
 229 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 230   uint alias_idx = phase->C->get_alias_index(tp);
 231   Node *mem = mmem;
 232 #ifdef ASSERT
 233   {
 234     // Check that current type is consistent with the alias index used during graph construction
 235     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 236     bool consistent =  adr_check == NULL || adr_check->empty() ||
 237                        phase->C->must_alias(adr_check, alias_idx );
 238     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 239     if( !consistent && adr_check != NULL && !adr_check->empty() &&
 240                tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 241         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 242         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 243           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 244           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 245       // don't assert if it is dead code.
 246       consistent = true;
 247     }
 248     if( !consistent ) {
 249       st->print("alias_idx==%d, adr_check==", alias_idx);
 250       if( adr_check == NULL ) {
 251         st->print("NULL");
 252       } else {
 253         adr_check->dump();
 254       }
 255       st->cr();
 256       print_alias_types();
 257       assert(consistent, "adr_check must match alias idx");
 258     }
 259   }
 260 #endif

 827          "use LoadKlassNode instead");
 828   assert(!(adr_type->isa_aryptr() &&
 829            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 830          "use LoadRangeNode instead");
 831   // Check control edge of raw loads
 832   assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 833           // oop will be recorded in oop map if load crosses safepoint
 834           rt->isa_oopptr() || is_immutable_value(adr),
 835           "raw memory operations should have control edge");
 836   LoadNode* load = NULL;
 837   switch (bt) {
 838   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 839   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 840   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 841   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 842   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 843   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
 844   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 845   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 846   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;

 847   case T_OBJECT:
 848 #ifdef _LP64
 849     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 850       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 851     } else
 852 #endif
 853     {
 854       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 855       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 856     }
 857     break;
 858   default:
 859     ShouldNotReachHere();
 860     break;
 861   }
 862   assert(load != NULL, "LoadNode should have been created");
 863   if (unaligned) {
 864     load->set_unaligned_access();
 865   }
 866   if (mismatched) {

 954 
 955     LoadNode* ld = clone()->as_Load();
 956     Node* addp = in(MemNode::Address)->clone();
 957     if (ac->as_ArrayCopy()->is_clonebasic()) {
 958       assert(ld_alloc != NULL, "need an alloc");
 959       assert(addp->is_AddP(), "address must be addp");
 960       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 961       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
 962       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
 963       addp->set_req(AddPNode::Base, src);
 964       addp->set_req(AddPNode::Address, src);
 965     } else {
 966       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
 967              ac->as_ArrayCopy()->is_copyof_validated() ||
 968              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
 969       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
 970       addp->set_req(AddPNode::Base, src);
 971       addp->set_req(AddPNode::Address, src);
 972 
 973       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
 974       BasicType ary_elem  = ary_t->klass()->as_array_klass()->element_type()->basic_type();
 975       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
 976       uint shift  = exact_log2(type2aelembytes(ary_elem));




 977 
 978       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 979 #ifdef _LP64
 980       diff = phase->transform(new ConvI2LNode(diff));
 981 #endif
 982       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
 983 
 984       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
 985       addp->set_req(AddPNode::Offset, offset);
 986     }
 987     addp = phase->transform(addp);
 988 #ifdef ASSERT
 989     const TypePtr* adr_type = phase->type(addp)->is_ptr();
 990     ld->_adr_type = adr_type;
 991 #endif
 992     ld->set_req(MemNode::Address, addp);
 993     ld->set_req(0, ctl);
 994     ld->set_req(MemNode::Memory, mem);
 995     // load depends on the tests that validate the arraycopy
 996     ld->_control_dependency = UnknownControl;

1084         // the same pointer-and-offset that we stored to.
1085         // Casted version may carry a dependency and it is respected.
1086         // Thus, we are able to replace L by V.
1087       }
1088       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1089       if (store_Opcode() != st->Opcode())
1090         return NULL;
1091       return st->in(MemNode::ValueIn);
1092     }
1093 
1094     // A load from a freshly-created object always returns zero.
1095     // (This can happen after LoadNode::Ideal resets the load's memory input
1096     // to find_captured_store, which returned InitializeNode::zero_memory.)
1097     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1098         (st->in(0) == ld_alloc) &&
1099         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1100       // return a zero value for the load's basic type
1101       // (This is one of the few places where a generic PhaseTransform
1102       // can create new nodes.  Think of it as lazily manifesting
1103       // virtually pre-existing constants.)






1104       return phase->zerocon(memory_type());
1105     }
1106 
1107     // A load from an initialization barrier can match a captured store.
1108     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1109       InitializeNode* init = st->in(0)->as_Initialize();
1110       AllocateNode* alloc = init->allocation();
1111       if ((alloc != NULL) && (alloc == ld_alloc)) {
1112         // examine a captured store value
1113         st = init->find_captured_store(ld_off, memory_size(), phase);
1114         if (st != NULL) {
1115           continue;             // take one more trip around
1116         }
1117       }
1118     }
1119 
1120     // Load boxed value from result of valueOf() call is input parameter.
1121     if (this->is_Load() && ld_adr->is_AddP() &&
1122         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1123       intptr_t ignore = 0;

1141 //----------------------is_instance_field_load_with_local_phi------------------
1142 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1143   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1144       in(Address)->is_AddP() ) {
1145     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1146     // Only instances and boxed values.
1147     if( t_oop != NULL &&
1148         (t_oop->is_ptr_to_boxed_value() ||
1149          t_oop->is_known_instance_field()) &&
1150         t_oop->offset() != Type::OffsetBot &&
1151         t_oop->offset() != Type::OffsetTop) {
1152       return true;
1153     }
1154   }
1155   return false;
1156 }
1157 
1158 //------------------------------Identity---------------------------------------
1159 // Loads are identity if previous store is to same address
1160 Node* LoadNode::Identity(PhaseGVN* phase) {



























1161   // If the previous store-maker is the right kind of Store, and the store is
1162   // to the same address, then we are equal to the value stored.
1163   Node* mem = in(Memory);
1164   Node* value = can_see_stored_value(mem, phase);
1165   if( value ) {
1166     // byte, short & char stores truncate naturally.
1167     // A load has to load the truncated value which requires
1168     // some sort of masking operation and that requires an
1169     // Ideal call instead of an Identity call.
1170     if (memory_size() < BytesPerInt) {
1171       // If the input to the store does not fit with the load's result type,
1172       // it must be truncated via an Ideal call.
1173       if (!phase->type(value)->higher_equal(phase->type(this)))
1174         return this;
1175     }
1176     // (This works even when value is a Con, but LoadNode::Value
1177     // usually runs first, producing the singleton type of the Con.)
1178     return value;
1179   }
1180 

1698   // fold up, do so.
1699   Node* prev_mem = find_previous_store(phase);
1700   if (prev_mem != NULL) {
1701     Node* value = can_see_arraycopy_value(prev_mem, phase);
1702     if (value != NULL) {
1703       return value;
1704     }
1705   }
1706   // Steps (a), (b):  Walk past independent stores to find an exact match.
1707   if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
1708     // (c) See if we can fold up on the spot, but don't fold up here.
1709     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1710     // just return a prior value, which is done by Identity calls.
1711     if (can_see_stored_value(prev_mem, phase)) {
1712       // Make ready for step (d):
1713       set_req(MemNode::Memory, prev_mem);
1714       return this;
1715     }
1716   }
1717 
1718   AllocateNode* alloc = is_new_object_mark_load(phase);
1719   if (alloc != NULL && alloc->Opcode() == Op_Allocate && UseBiasedLocking) {




1720     InitializeNode* init = alloc->initialization();
1721     Node* control = init->proj_out(0);
1722     return alloc->make_ideal_mark(phase, address, control, mem);
1723   }
1724 
1725   return progress ? this : NULL;
1726 }
1727 
1728 // Helper to recognize certain Klass fields which are invariant across
1729 // some group of array types (e.g., int[] or all T[] where T < Object).
1730 const Type*
1731 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1732                                  ciKlass* klass) const {
1733   if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1734     // The field is Klass::_modifier_flags.  Return its (constant) value.
1735     // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1736     assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1737     return TypeInt::make(klass->modifier_flags());
1738   }
1739   if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1740     // The field is Klass::_access_flags.  Return its (constant) value.
1741     // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1742     assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");

1794       }
1795     }
1796 
1797     // Don't do this for integer types. There is only potential profit if
1798     // the element type t is lower than _type; that is, for int types, if _type is
1799     // more restrictive than t.  This only happens here if one is short and the other
1800     // char (both 16 bits), and in those cases we've made an intentional decision
1801     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1802     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1803     //
1804     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1805     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1806     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1807     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1808     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1809     // In fact, that could have been the original type of p1, and p1 could have
1810     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1811     // expression (LShiftL quux 3) independently optimized to the constant 8.
1812     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1813         && (_type->isa_vect() == NULL)

1814         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1815       // t might actually be lower than _type, if _type is a unique
1816       // concrete subclass of abstract class t.
1817       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
1818         const Type* jt = t->join_speculative(_type);
1819         // In any case, do not allow the join, per se, to empty out the type.
1820         if (jt->empty() && !t->empty()) {
1821           // This can happen if a interface-typed array narrows to a class type.
1822           jt = _type;
1823         }
1824 #ifdef ASSERT
1825         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1826           // The pointers in the autobox arrays are always non-null
1827           Node* base = adr->in(AddPNode::Base);
1828           if ((base != NULL) && base->is_DecodeN()) {
1829             // Get LoadN node which loads IntegerCache.cache field
1830             base = base->in(1);
1831           }
1832           if ((base != NULL) && base->is_Con()) {
1833             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1834             if ((base_type != NULL) && base_type->is_autobox_cache()) {
1835               // It could be narrow oop
1836               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1837             }
1838           }
1839         }
1840 #endif
1841         return jt;
1842       }
1843     }
1844   } else if (tp->base() == Type::InstPtr) {
1845     assert( off != Type::OffsetBot ||
1846             // arrays can be cast to Objects
1847             tp->is_oopptr()->klass()->is_java_lang_Object() ||

1848             // unsafe field access may not have a constant offset
1849             C->has_unsafe_access(),
1850             "Field accesses must be precise" );
1851     // For oop loads, we expect the _type to be precise.
1852 
1853     // Optimize loads from constant fields.


1854     const TypeInstPtr* tinst = tp->is_instptr();
1855     ciObject* const_oop = tinst->const_oop();
1856     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
1857       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type());









1858       if (con_type != NULL) {
1859         return con_type;
1860       }
1861     }
1862   } else if (tp->base() == Type::KlassPtr) {
1863     assert( off != Type::OffsetBot ||
1864             // arrays can be cast to Objects

1865             tp->is_klassptr()->klass()->is_java_lang_Object() ||
1866             // also allow array-loading from the primary supertype
1867             // array during subtype checks
1868             Opcode() == Op_LoadKlass,
1869             "Field accesses must be precise" );
1870     // For klass/static loads, we expect the _type to be precise
1871   } else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
1872     /* With mirrors being an indirect in the Klass*
1873      * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
1874      * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
1875      *
1876      * So check the type and klass of the node before the LoadP.
1877      */
1878     Node* adr2 = adr->in(MemNode::Address);
1879     const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
1880     if (tkls != NULL && !StressReflectiveCode) {
1881       ciKlass* klass = tkls->klass();
1882       if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
1883         assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1884         assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1885         return TypeInstPtr::make(klass->java_mirror());
















1886       }
1887     }
1888   }
1889 
1890   const TypeKlassPtr *tkls = tp->isa_klassptr();
1891   if (tkls != NULL && !StressReflectiveCode) {
1892     ciKlass* klass = tkls->klass();
1893     if (klass->is_loaded() && tkls->klass_is_exact()) {
1894       // We are loading a field from a Klass metaobject whose identity
1895       // is known at compile time (the type is "exact" or "precise").
1896       // Check for fields we know are maintained as constants by the VM.
1897       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
1898         // The field is Klass::_super_check_offset.  Return its (constant) value.
1899         // (Folds up type checking code.)
1900         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
1901         return TypeInt::make(klass->super_check_offset());
1902       }
1903       // Compute index into primary_supers array
1904       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
1905       // Check for overflowing; use unsigned compare to handle the negative case.
1906       if( depth < ciKlass::primary_super_limit() ) {
1907         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
1908         // (Folds up type checking code.)
1909         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
1910         ciKlass *ss = klass->super_of_depth(depth);
1911         return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
1912       }
1913       const Type* aift = load_array_final_field(tkls, klass);
1914       if (aift != NULL)  return aift;
1915     }
1916 
1917     // We can still check if we are loading from the primary_supers array at a
1918     // shallow enough depth.  Even though the klass is not exact, entries less
1919     // than or equal to its super depth are correct.
1920     if (klass->is_loaded() ) {
1921       ciType *inner = klass;
1922       while( inner->is_obj_array_klass() )
1923         inner = inner->as_obj_array_klass()->base_element_type();
1924       if( inner->is_instance_klass() &&
1925           !inner->as_instance_klass()->flags().is_interface() ) {
1926         // Compute index into primary_supers array
1927         juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
1928         // Check for overflowing; use unsigned compare to handle the negative case.
1929         if( depth < ciKlass::primary_super_limit() &&
1930             depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
1931           // The field is an element of Klass::_primary_supers.  Return its (constant) value.
1932           // (Folds up type checking code.)
1933           assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
1934           ciKlass *ss = klass->super_of_depth(depth);
1935           return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
1936         }
1937       }
1938     }
1939 
1940     // If the type is enough to determine that the thing is not an array,

2105   return LoadNode::Ideal(phase, can_reshape);
2106 }
2107 
2108 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2109   Node* mem = in(MemNode::Memory);
2110   Node* value = can_see_stored_value(mem,phase);
2111   if (value != NULL && value->is_Con() &&
2112       !value->bottom_type()->higher_equal(_type)) {
2113     // If the input to the store does not fit with the load's result type,
2114     // it must be truncated. We can't delay until Ideal call since
2115     // a singleton Value is needed for split_thru_phi optimization.
2116     int con = value->get_int();
2117     return TypeInt::make((con << 16) >> 16);
2118   }
2119   return LoadNode::Value(phase);
2120 }
2121 
2122 //=============================================================================
2123 //----------------------------LoadKlassNode::make------------------------------
2124 // Polymorphic factory method:
2125 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {

2126   // sanity check the alias category against the created node type
2127   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2128   assert(adr_type != NULL, "expecting TypeKlassPtr");
2129 #ifdef _LP64
2130   if (adr_type->is_ptr_to_narrowklass()) {
2131     assert(UseCompressedClassPointers, "no compressed klasses");
2132     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2133     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2134   }
2135 #endif
2136   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2137   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2138 }
2139 
2140 //------------------------------Value------------------------------------------
2141 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2142   return klass_value_common(phase);
2143 }
2144 
2145 // In most cases, LoadKlassNode does not have the control input set. If the control

2192     }
2193     if( !ik->is_loaded() )
2194       return _type;             // Bail out if not loaded
2195     if (offset == oopDesc::klass_offset_in_bytes()) {
2196       if (tinst->klass_is_exact()) {
2197         return TypeKlassPtr::make(ik);
2198       }
2199       // See if we can become precise: no subklasses and no interface
2200       // (Note:  We need to support verified interfaces.)
2201       if (!ik->is_interface() && !ik->has_subklass()) {
2202         // Add a dependence; if any subclass added we need to recompile
2203         if (!ik->is_final()) {
2204           // %%% should use stronger assert_unique_concrete_subtype instead
2205           phase->C->dependencies()->assert_leaf_type(ik);
2206         }
2207         // Return precise klass
2208         return TypeKlassPtr::make(ik);
2209       }
2210 
2211       // Return root of possible klass
2212       return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/);
2213     }
2214   }
2215 
2216   // Check for loading klass from an array
2217   const TypeAryPtr *tary = tp->isa_aryptr();
2218   if( tary != NULL ) {
2219     ciKlass *tary_klass = tary->klass();
2220     if (tary_klass != NULL   // can be NULL when at BOTTOM or TOP
2221         && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2222       if (tary->klass_is_exact()) {
2223         return TypeKlassPtr::make(tary_klass);
2224       }
2225       ciArrayKlass *ak = tary->klass()->as_array_klass();
2226       // If the klass is an object array, we defer the question to the
2227       // array component klass.
2228       if( ak->is_obj_array_klass() ) {
2229         assert( ak->is_loaded(), "" );
2230         ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2231         if( base_k->is_loaded() && base_k->is_instance_klass() ) {
2232           ciInstanceKlass* ik = base_k->as_instance_klass();
2233           // See if we can become precise: no subklasses and no interface
2234           if (!ik->is_interface() && !ik->has_subklass()) {
2235             // Add a dependence; if any subclass added we need to recompile
2236             if (!ik->is_final()) {
2237               phase->C->dependencies()->assert_leaf_type(ik);
2238             }
2239             // Return precise array klass
2240             return TypeKlassPtr::make(ak);
2241           }
2242         }
2243         return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
2244       } else {                  // Found a type-array?
2245         assert( ak->is_type_array_klass(), "" );
2246         return TypeKlassPtr::make(ak); // These are always precise
2247       }
2248     }
2249   }
2250 
2251   // Check for loading klass from an array klass
2252   const TypeKlassPtr *tkls = tp->isa_klassptr();
2253   if (tkls != NULL && !StressReflectiveCode) {
2254     ciKlass* klass = tkls->klass();
2255     if( !klass->is_loaded() )
2256       return _type;             // Bail out if not loaded


2257     if( klass->is_obj_array_klass() &&
2258         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2259       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2260       // // Always returning precise element type is incorrect,
2261       // // e.g., element type could be object and array may contain strings
2262       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2263 
2264       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2265       // according to the element type's subclassing.
2266       return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);




2267     }
2268     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2269         tkls->offset() == in_bytes(Klass::super_offset())) {
2270       ciKlass* sup = klass->as_instance_klass()->super();
2271       // The field is Klass::_super.  Return its (constant) value.
2272       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2273       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2274     }
2275   }
2276 
2277   // Bailout case
2278   return LoadNode::Value(phase);
2279 }
2280 
2281 //------------------------------Identity---------------------------------------
2282 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2283 // Also feed through the klass in Allocate(...klass...)._klass.
2284 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2285   return klass_identity_common(phase);
2286 }

2454 //=============================================================================
2455 //---------------------------StoreNode::make-----------------------------------
2456 // Polymorphic factory method:
2457 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2458   assert((mo == unordered || mo == release), "unexpected");
2459   Compile* C = gvn.C;
2460   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2461          ctl != NULL, "raw memory operations should have control edge");
2462 
2463   switch (bt) {
2464   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2465   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2466   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2467   case T_CHAR:
2468   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2469   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2470   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2471   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2472   case T_METADATA:
2473   case T_ADDRESS:

2474   case T_OBJECT:
2475 #ifdef _LP64
2476     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2477       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2478       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2479     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2480                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2481                 adr->bottom_type()->isa_rawptr())) {
2482       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2483       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2484     }
2485 #endif
2486     {
2487       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2488     }
2489   default:
2490     ShouldNotReachHere();
2491     return (StoreNode*)NULL;
2492   }
2493 }

2514   //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2515 
2516   // Since they are not commoned, do not hash them:
2517   return NO_HASH;
2518 }
2519 
2520 //------------------------------Ideal------------------------------------------
2521 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2522 // When a store immediately follows a relevant allocation/initialization,
2523 // try to capture it into the initialization, or hoist it above.
2524 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2525   Node* p = MemNode::Ideal_common(phase, can_reshape);
2526   if (p)  return (p == NodeSentinel) ? NULL : p;
2527 
2528   Node* mem     = in(MemNode::Memory);
2529   Node* address = in(MemNode::Address);
2530   // Back-to-back stores to same address?  Fold em up.  Generally
2531   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2532   // since they must follow each StoreP operation.  Redundant StoreCMs
2533   // are eliminated just before matching in final_graph_reshape.
2534   {
2535     Node* st = mem;
2536     // If Store 'st' has more than one use, we cannot fold 'st' away.
2537     // For example, 'st' might be the final state at a conditional
2538     // return.  Or, 'st' might be used by some node which is live at
2539     // the same time 'st' is live, which might be unschedulable.  So,
2540     // require exactly ONE user until such time as we clone 'mem' for
2541     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2542     // true).
2543     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2544       // Looking at a dead closed cycle of memory?
2545       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2546       assert(Opcode() == st->Opcode() ||
2547              st->Opcode() == Op_StoreVector ||
2548              Opcode() == Op_StoreVector ||
2549              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2550              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2551              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy

2552              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2553              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2554 
2555       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2556           st->as_Store()->memory_size() <= this->memory_size()) {
2557         Node* use = st->raw_out(0);
2558         phase->igvn_rehash_node_delayed(use);
2559         if (can_reshape) {
2560           use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN());
2561         } else {
2562           // It's OK to do this in the parser, since DU info is always accurate,
2563           // and the parser always refers to nodes via SafePointNode maps.
2564           use->set_req(MemNode::Memory, st->in(MemNode::Memory));
2565         }
2566         return this;
2567       }
2568       st = st->in(MemNode::Memory);
2569     }
2570   }
2571 

2617   // Load then Store?  Then the Store is useless
2618   if (val->is_Load() &&
2619       val->in(MemNode::Address)->eqv_uncast(adr) &&
2620       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2621       val->as_Load()->store_Opcode() == Opcode()) {
2622     result = mem;
2623   }
2624 
2625   // Two stores in a row of the same value?
2626   if (result == this &&
2627       mem->is_Store() &&
2628       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2629       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2630       mem->Opcode() == Opcode()) {
2631     result = mem;
2632   }
2633 
2634   // Store of zero anywhere into a freshly-allocated object?
2635   // Then the store is useless.
2636   // (It must already have been captured by the InitializeNode.)
2637   if (result == this &&
2638       ReduceFieldZeroing && phase->type(val)->is_zero_type()) {
2639     // a newly allocated object is already all-zeroes everywhere
2640     if (mem->is_Proj() && mem->in(0)->is_Allocate()) {


2641       result = mem;
2642     }
2643 
2644     if (result == this) {
2645       // the store may also apply to zero-bits in an earlier object
2646       Node* prev_mem = find_previous_store(phase);
2647       // Steps (a), (b):  Walk past independent stores to find an exact match.
2648       if (prev_mem != NULL) {
2649         Node* prev_val = can_see_stored_value(prev_mem, phase);
2650         if (prev_val != NULL && phase->eqv(prev_val, val)) {
2651           // prev_val and val might differ by a cast; it would be good
2652           // to keep the more informative of the two.
2653           result = mem;








2654         }
2655       }
2656     }
2657   }
2658 
2659   if (result != this && phase->is_IterGVN() != NULL) {
2660     MemBarNode* trailing = trailing_membar();
2661     if (trailing != NULL) {
2662 #ifdef ASSERT
2663       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
2664       assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects");
2665 #endif
2666       PhaseIterGVN* igvn = phase->is_IterGVN();
2667       trailing->remove(igvn);
2668     }
2669   }
2670 
2671   return result;
2672 }
2673 

2942 // Clearing a short array is faster with stores
2943 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2944   // Already know this is a large node, do not try to ideal it
2945   if (!IdealizeClearArrayNode || _is_large) return NULL;
2946 
2947   const int unit = BytesPerLong;
2948   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2949   if (!t)  return NULL;
2950   if (!t->is_con())  return NULL;
2951   intptr_t raw_count = t->get_con();
2952   intptr_t size = raw_count;
2953   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2954   // Clearing nothing uses the Identity call.
2955   // Negative clears are possible on dead ClearArrays
2956   // (see jck test stmt114.stmt11402.val).
2957   if (size <= 0 || size % unit != 0)  return NULL;
2958   intptr_t count = size / unit;
2959   // Length too long; communicate this to matchers and assemblers.
2960   // Assemblers are responsible to produce fast hardware clears for it.
2961   if (size > InitArrayShortSize) {
2962     return new ClearArrayNode(in(0), in(1), in(2), in(3), true);
2963   }
2964   Node *mem = in(1);
2965   if( phase->type(mem)==Type::TOP ) return NULL;
2966   Node *adr = in(3);
2967   const Type* at = phase->type(adr);
2968   if( at==Type::TOP ) return NULL;
2969   const TypePtr* atp = at->isa_ptr();
2970   // adjust atp to be the correct array element address type
2971   if (atp == NULL)  atp = TypePtr::BOTTOM;
2972   else              atp = atp->add_offset(Type::OffsetBot);
2973   // Get base for derived pointer purposes
2974   if( adr->Opcode() != Op_AddP ) Unimplemented();
2975   Node *base = adr->in(1);
2976 
2977   Node *zero = phase->makecon(TypeLong::ZERO);
2978   Node *off  = phase->MakeConX(BytesPerLong);
2979   mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2980   count--;
2981   while( count-- ) {
2982     mem = phase->transform(mem);
2983     adr = phase->transform(new AddPNode(base,adr,off));
2984     mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2985   }
2986   return mem;
2987 }
2988 
2989 //----------------------------step_through----------------------------------
2990 // Return allocation input memory edge if it is different instance
2991 // or itself if it is the one we are looking for.
2992 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
2993   Node* n = *np;
2994   assert(n->is_ClearArray(), "sanity");
2995   intptr_t offset;
2996   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
2997   // This method is called only before Allocate nodes are expanded
2998   // during macro nodes expansion. Before that ClearArray nodes are
2999   // only generated in PhaseMacroExpand::generate_arraycopy() (before
3000   // Allocate nodes are expanded) which follows allocations.
3001   assert(alloc != NULL, "should have allocation");
3002   if (alloc->_idx == instance_id) {
3003     // Can not bypass initialization of the instance we are looking for.
3004     return false;
3005   }
3006   // Otherwise skip it.
3007   InitializeNode* init = alloc->initialization();
3008   if (init != NULL)
3009     *np = init->in(TypeFunc::Memory);
3010   else
3011     *np = alloc->in(TypeFunc::Memory);
3012   return true;
3013 }
3014 
3015 //----------------------------clear_memory-------------------------------------
3016 // Generate code to initialize object storage to zero.
3017 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


3018                                    intptr_t start_offset,
3019                                    Node* end_offset,
3020                                    PhaseGVN* phase) {
3021   intptr_t offset = start_offset;
3022 
3023   int unit = BytesPerLong;
3024   if ((offset % unit) != 0) {
3025     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
3026     adr = phase->transform(adr);
3027     const TypePtr* atp = TypeRawPtr::BOTTOM;
3028     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






3029     mem = phase->transform(mem);
3030     offset += BytesPerInt;
3031   }
3032   assert((offset % unit) == 0, "");
3033 
3034   // Initialize the remaining stuff, if any, with a ClearArray.
3035   return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
3036 }
3037 
3038 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,

3039                                    Node* start_offset,
3040                                    Node* end_offset,
3041                                    PhaseGVN* phase) {
3042   if (start_offset == end_offset) {
3043     // nothing to do
3044     return mem;
3045   }
3046 
3047   int unit = BytesPerLong;
3048   Node* zbase = start_offset;
3049   Node* zend  = end_offset;
3050 
3051   // Scale to the unit required by the CPU:
3052   if (!Matcher::init_array_count_is_in_bytes) {
3053     Node* shift = phase->intcon(exact_log2(unit));
3054     zbase = phase->transform(new URShiftXNode(zbase, shift) );
3055     zend  = phase->transform(new URShiftXNode(zend,  shift) );
3056   }
3057 
3058   // Bulk clear double-words
3059   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
3060   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
3061   mem = new ClearArrayNode(ctl, mem, zsize, adr, false);



3062   return phase->transform(mem);
3063 }
3064 
3065 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,


3066                                    intptr_t start_offset,
3067                                    intptr_t end_offset,
3068                                    PhaseGVN* phase) {
3069   if (start_offset == end_offset) {
3070     // nothing to do
3071     return mem;
3072   }
3073 
3074   assert((end_offset % BytesPerInt) == 0, "odd end offset");
3075   intptr_t done_offset = end_offset;
3076   if ((done_offset % BytesPerLong) != 0) {
3077     done_offset -= BytesPerInt;
3078   }
3079   if (done_offset > start_offset) {
3080     mem = clear_memory(ctl, mem, dest,
3081                        start_offset, phase->MakeConX(done_offset), phase);
3082   }
3083   if (done_offset < end_offset) { // emit the final 32-bit store
3084     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
3085     adr = phase->transform(adr);
3086     const TypePtr* atp = TypeRawPtr::BOTTOM;
3087     mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);






3088     mem = phase->transform(mem);
3089     done_offset += BytesPerInt;
3090   }
3091   assert(done_offset == end_offset, "");
3092   return mem;
3093 }
3094 
3095 //=============================================================================
3096 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3097   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3098     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3099 #ifdef ASSERT
3100   , _pair_idx(0)
3101 #endif
3102 {
3103   init_class_id(Class_MemBar);
3104   Node* top = C->top();
3105   init_req(TypeFunc::I_O,top);
3106   init_req(TypeFunc::FramePtr,top);
3107   init_req(TypeFunc::ReturnAdr,top);

3206       PhaseIterGVN* igvn = phase->is_IterGVN();
3207       remove(igvn);
3208       // Must return either the original node (now dead) or a new node
3209       // (Do not return a top here, since that would break the uniqueness of top.)
3210       return new ConINode(TypeInt::ZERO);
3211     }
3212   }
3213   return progress ? this : NULL;
3214 }
3215 
3216 //------------------------------Value------------------------------------------
3217 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3218   if( !in(0) ) return Type::TOP;
3219   if( phase->type(in(0)) == Type::TOP )
3220     return Type::TOP;
3221   return TypeTuple::MEMBAR;
3222 }
3223 
3224 //------------------------------match------------------------------------------
3225 // Construct projections for memory.
3226 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3227   switch (proj->_con) {
3228   case TypeFunc::Control:
3229   case TypeFunc::Memory:
3230     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3231   }
3232   ShouldNotReachHere();
3233   return NULL;
3234 }
3235 
3236 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3237   trailing->_kind = TrailingStore;
3238   leading->_kind = LeadingStore;
3239 #ifdef ASSERT
3240   trailing->_pair_idx = leading->_idx;
3241   leading->_pair_idx = leading->_idx;
3242 #endif
3243 }
3244 
3245 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3246   trailing->_kind = TrailingLoadStore;

3492   return (req() > RawStores);
3493 }
3494 
3495 void InitializeNode::set_complete(PhaseGVN* phase) {
3496   assert(!is_complete(), "caller responsibility");
3497   _is_complete = Complete;
3498 
3499   // After this node is complete, it contains a bunch of
3500   // raw-memory initializations.  There is no need for
3501   // it to have anything to do with non-raw memory effects.
3502   // Therefore, tell all non-raw users to re-optimize themselves,
3503   // after skipping the memory effects of this initialization.
3504   PhaseIterGVN* igvn = phase->is_IterGVN();
3505   if (igvn)  igvn->add_users_to_worklist(this);
3506 }
3507 
3508 // convenience function
3509 // return false if the init contains any stores already
3510 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3511   InitializeNode* init = initialization();
3512   if (init == NULL || init->is_complete())  return false;


3513   init->remove_extra_zeroes();
3514   // for now, if this allocation has already collected any inits, bail:
3515   if (init->is_non_zero())  return false;
3516   init->set_complete(phase);
3517   return true;
3518 }
3519 
3520 void InitializeNode::remove_extra_zeroes() {
3521   if (req() == RawStores)  return;
3522   Node* zmem = zero_memory();
3523   uint fill = RawStores;
3524   for (uint i = fill; i < req(); i++) {
3525     Node* n = in(i);
3526     if (n->is_top() || n == zmem)  continue;  // skip
3527     if (fill < i)  set_req(fill, n);          // compact
3528     ++fill;
3529   }
3530   // delete any empty spaces created:
3531   while (fill < req()) {
3532     del_req(fill);

4250         //   z's_done      12  16  16  16    12  16    12
4251         //   z's_needed    12  16  16  16    16  16    16
4252         //   zsize          0   0   0   0     4   0     4
4253         if (next_full_store < 0) {
4254           // Conservative tack:  Zero to end of current word.
4255           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
4256         } else {
4257           // Zero to beginning of next fully initialized word.
4258           // Or, don't zero at all, if we are already in that word.
4259           assert(next_full_store >= zeroes_needed, "must go forward");
4260           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
4261           zeroes_needed = next_full_store;
4262         }
4263       }
4264 
4265       if (zeroes_needed > zeroes_done) {
4266         intptr_t zsize = zeroes_needed - zeroes_done;
4267         // Do some incremental zeroing on rawmem, in parallel with inits.
4268         zeroes_done = align_down(zeroes_done, BytesPerInt);
4269         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


4270                                               zeroes_done, zeroes_needed,
4271                                               phase);
4272         zeroes_done = zeroes_needed;
4273         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
4274           do_zeroing = false;   // leave the hole, next time
4275       }
4276     }
4277 
4278     // Collect the store and move on:
4279     phase->replace_input_of(st, MemNode::Memory, inits);
4280     inits = st;                 // put it on the linearized chain
4281     set_req(i, zmem);           // unhook from previous position
4282 
4283     if (zeroes_done == st_off)
4284       zeroes_done = next_init_off;
4285 
4286     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
4287 
4288     #ifdef ASSERT
4289     // Various order invariants.  Weaker than stores_are_sane because

4309   remove_extra_zeroes();        // clear out all the zmems left over
4310   add_req(inits);
4311 
4312   if (!(UseTLAB && ZeroTLAB)) {
4313     // If anything remains to be zeroed, zero it all now.
4314     zeroes_done = align_down(zeroes_done, BytesPerInt);
4315     // if it is the last unused 4 bytes of an instance, forget about it
4316     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
4317     if (zeroes_done + BytesPerLong >= size_limit) {
4318       AllocateNode* alloc = allocation();
4319       assert(alloc != NULL, "must be present");
4320       if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
4321         Node* klass_node = alloc->in(AllocateNode::KlassNode);
4322         ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
4323         if (zeroes_done == k->layout_helper())
4324           zeroes_done = size_limit;
4325       }
4326     }
4327     if (zeroes_done < size_limit) {
4328       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,


4329                                             zeroes_done, size_in_bytes, phase);
4330     }
4331   }
4332 
4333   set_complete(phase);
4334   return rawmem;
4335 }
4336 
4337 
4338 #ifdef ASSERT
4339 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
4340   if (is_complete())
4341     return true;                // stores could be anything at this point
4342   assert(allocation() != NULL, "must be present");
4343   intptr_t last_off = allocation()->minimum_header_size();
4344   for (uint i = InitializeNode::RawStores; i < req(); i++) {
4345     Node* st = in(i);
4346     intptr_t st_off = get_store_offset(st, phase);
4347     if (st_off < 0)  continue;  // ignore dead garbage
4348     if (last_off > st_off) {

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "compiler/compileLog.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/c2/barrierSetC2.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/arraycopynode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/connode.hpp"
  40 #include "opto/convertnode.hpp"
  41 #include "opto/inlinetypenode.hpp"
  42 #include "opto/loopnode.hpp"
  43 #include "opto/machnode.hpp"
  44 #include "opto/matcher.hpp"
  45 #include "opto/memnode.hpp"
  46 #include "opto/mulnode.hpp"
  47 #include "opto/narrowptrnode.hpp"
  48 #include "opto/phaseX.hpp"
  49 #include "opto/regmask.hpp"
  50 #include "opto/rootnode.hpp"
  51 #include "utilities/align.hpp"
  52 #include "utilities/copy.hpp"
  53 #include "utilities/macros.hpp"
  54 #include "utilities/powerOfTwo.hpp"
  55 #include "utilities/vmError.hpp"
  56 
  57 // Portions of code courtesy of Clifford Click
  58 
  59 // Optimization - Graph Style
  60 
  61 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st);

 223       // clone the Phi with our address type
 224       result = mphi->split_out_instance(t_adr, igvn);
 225     } else {
 226       assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain");
 227     }
 228   }
 229   return result;
 230 }
 231 
 232 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem,  const TypePtr *tp, const TypePtr *adr_check, outputStream *st) {
 233   uint alias_idx = phase->C->get_alias_index(tp);
 234   Node *mem = mmem;
 235 #ifdef ASSERT
 236   {
 237     // Check that current type is consistent with the alias index used during graph construction
 238     assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx");
 239     bool consistent =  adr_check == NULL || adr_check->empty() ||
 240                        phase->C->must_alias(adr_check, alias_idx );
 241     // Sometimes dead array references collapse to a[-1], a[-2], or a[-3]
 242     if( !consistent && adr_check != NULL && !adr_check->empty() &&
 243         tp->isa_aryptr() &&        tp->offset() == Type::OffsetBot &&
 244         adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot &&
 245         ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() ||
 246           adr_check->offset() == oopDesc::klass_offset_in_bytes() ||
 247           adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) {
 248       // don't assert if it is dead code.
 249       consistent = true;
 250     }
 251     if( !consistent ) {
 252       st->print("alias_idx==%d, adr_check==", alias_idx);
 253       if( adr_check == NULL ) {
 254         st->print("NULL");
 255       } else {
 256         adr_check->dump();
 257       }
 258       st->cr();
 259       print_alias_types();
 260       assert(consistent, "adr_check must match alias idx");
 261     }
 262   }
 263 #endif

 830          "use LoadKlassNode instead");
 831   assert(!(adr_type->isa_aryptr() &&
 832            adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
 833          "use LoadRangeNode instead");
 834   // Check control edge of raw loads
 835   assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
 836           // oop will be recorded in oop map if load crosses safepoint
 837           rt->isa_oopptr() || is_immutable_value(adr),
 838           "raw memory operations should have control edge");
 839   LoadNode* load = NULL;
 840   switch (bt) {
 841   case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 842   case T_BYTE:    load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 843   case T_INT:     load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 844   case T_CHAR:    load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 845   case T_SHORT:   load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo, control_dependency); break;
 846   case T_LONG:    load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
 847   case T_FLOAT:   load = new LoadFNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 848   case T_DOUBLE:  load = new LoadDNode (ctl, mem, adr, adr_type, rt,            mo, control_dependency); break;
 849   case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo, control_dependency); break;
 850   case T_INLINE_TYPE:
 851   case T_OBJECT:
 852 #ifdef _LP64
 853     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
 854       load = new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo, control_dependency);
 855     } else
 856 #endif
 857     {
 858       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
 859       load = new LoadPNode(ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency);
 860     }
 861     break;
 862   default:
 863     ShouldNotReachHere();
 864     break;
 865   }
 866   assert(load != NULL, "LoadNode should have been created");
 867   if (unaligned) {
 868     load->set_unaligned_access();
 869   }
 870   if (mismatched) {

 958 
 959     LoadNode* ld = clone()->as_Load();
 960     Node* addp = in(MemNode::Address)->clone();
 961     if (ac->as_ArrayCopy()->is_clonebasic()) {
 962       assert(ld_alloc != NULL, "need an alloc");
 963       assert(addp->is_AddP(), "address must be addp");
 964       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 965       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
 966       assert(bs->step_over_gc_barrier(addp->in(AddPNode::Address)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern");
 967       addp->set_req(AddPNode::Base, src);
 968       addp->set_req(AddPNode::Address, src);
 969     } else {
 970       assert(ac->as_ArrayCopy()->is_arraycopy_validated() ||
 971              ac->as_ArrayCopy()->is_copyof_validated() ||
 972              ac->as_ArrayCopy()->is_copyofrange_validated(), "only supported cases");
 973       assert(addp->in(AddPNode::Base) == addp->in(AddPNode::Address), "should be");
 974       addp->set_req(AddPNode::Base, src);
 975       addp->set_req(AddPNode::Address, src);
 976 
 977       const TypeAryPtr* ary_t = phase->type(in(MemNode::Address))->isa_aryptr();
 978       BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
 979       uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
 980       uint shift  = exact_log2(type2aelembytes(ary_elem));
 981       if (ary_t->klass()->is_flat_array_klass()) {
 982         ciFlatArrayKlass* vak = ary_t->klass()->as_flat_array_klass();
 983         shift = vak->log2_element_size();
 984       }
 985 
 986       Node* diff = phase->transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
 987 #ifdef _LP64
 988       diff = phase->transform(new ConvI2LNode(diff));
 989 #endif
 990       diff = phase->transform(new LShiftXNode(diff, phase->intcon(shift)));
 991 
 992       Node* offset = phase->transform(new AddXNode(addp->in(AddPNode::Offset), diff));
 993       addp->set_req(AddPNode::Offset, offset);
 994     }
 995     addp = phase->transform(addp);
 996 #ifdef ASSERT
 997     const TypePtr* adr_type = phase->type(addp)->is_ptr();
 998     ld->_adr_type = adr_type;
 999 #endif
1000     ld->set_req(MemNode::Address, addp);
1001     ld->set_req(0, ctl);
1002     ld->set_req(MemNode::Memory, mem);
1003     // load depends on the tests that validate the arraycopy
1004     ld->_control_dependency = UnknownControl;

1092         // the same pointer-and-offset that we stored to.
1093         // Casted version may carry a dependency and it is respected.
1094         // Thus, we are able to replace L by V.
1095       }
1096       // Now prove that we have a LoadQ matched to a StoreQ, for some Q.
1097       if (store_Opcode() != st->Opcode())
1098         return NULL;
1099       return st->in(MemNode::ValueIn);
1100     }
1101 
1102     // A load from a freshly-created object always returns zero.
1103     // (This can happen after LoadNode::Ideal resets the load's memory input
1104     // to find_captured_store, which returned InitializeNode::zero_memory.)
1105     if (st->is_Proj() && st->in(0)->is_Allocate() &&
1106         (st->in(0) == ld_alloc) &&
1107         (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) {
1108       // return a zero value for the load's basic type
1109       // (This is one of the few places where a generic PhaseTransform
1110       // can create new nodes.  Think of it as lazily manifesting
1111       // virtually pre-existing constants.)
1112       assert(memory_type() != T_INLINE_TYPE, "should not be used for inline types");
1113       Node* default_value = ld_alloc->in(AllocateNode::DefaultValue);
1114       if (default_value != NULL) {
1115         return default_value;
1116       }
1117       assert(ld_alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
1118       return phase->zerocon(memory_type());
1119     }
1120 
1121     // A load from an initialization barrier can match a captured store.
1122     if (st->is_Proj() && st->in(0)->is_Initialize()) {
1123       InitializeNode* init = st->in(0)->as_Initialize();
1124       AllocateNode* alloc = init->allocation();
1125       if ((alloc != NULL) && (alloc == ld_alloc)) {
1126         // examine a captured store value
1127         st = init->find_captured_store(ld_off, memory_size(), phase);
1128         if (st != NULL) {
1129           continue;             // take one more trip around
1130         }
1131       }
1132     }
1133 
1134     // Load boxed value from result of valueOf() call is input parameter.
1135     if (this->is_Load() && ld_adr->is_AddP() &&
1136         (tp != NULL) && tp->is_ptr_to_boxed_value()) {
1137       intptr_t ignore = 0;

1155 //----------------------is_instance_field_load_with_local_phi------------------
1156 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1157   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1158       in(Address)->is_AddP() ) {
1159     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1160     // Only instances and boxed values.
1161     if( t_oop != NULL &&
1162         (t_oop->is_ptr_to_boxed_value() ||
1163          t_oop->is_known_instance_field()) &&
1164         t_oop->offset() != Type::OffsetBot &&
1165         t_oop->offset() != Type::OffsetTop) {
1166       return true;
1167     }
1168   }
1169   return false;
1170 }
1171 
1172 //------------------------------Identity---------------------------------------
1173 // Loads are identity if previous store is to same address
1174 Node* LoadNode::Identity(PhaseGVN* phase) {
1175   // Loading from an InlineTypePtr? The InlineTypePtr has the values of
1176   // all fields as input. Look for the field with matching offset.
1177   Node* addr = in(Address);
1178   intptr_t offset;
1179   Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset);
1180   if (base != NULL && base->is_InlineTypePtr() && offset > oopDesc::klass_offset_in_bytes()) {
1181     Node* value = base->as_InlineTypePtr()->field_value_by_offset((int)offset, true);
1182     if (value->is_InlineType()) {
1183       // Non-flattened inline type field
1184       InlineTypeNode* vt = value->as_InlineType();
1185       if (vt->is_allocated(phase)) {
1186         value = vt->get_oop();
1187       } else {
1188         // Not yet allocated, bail out
1189         value = NULL;
1190       }
1191     }
1192     if (value != NULL) {
1193       if (Opcode() == Op_LoadN) {
1194         // Encode oop value if we are loading a narrow oop
1195         assert(!phase->type(value)->isa_narrowoop(), "should already be decoded");
1196         value = phase->transform(new EncodePNode(value, bottom_type()));
1197       }
1198       return value;
1199     }
1200   }
1201 
1202   // If the previous store-maker is the right kind of Store, and the store is
1203   // to the same address, then we are equal to the value stored.
1204   Node* mem = in(Memory);
1205   Node* value = can_see_stored_value(mem, phase);
1206   if( value ) {
1207     // byte, short & char stores truncate naturally.
1208     // A load has to load the truncated value which requires
1209     // some sort of masking operation and that requires an
1210     // Ideal call instead of an Identity call.
1211     if (memory_size() < BytesPerInt) {
1212       // If the input to the store does not fit with the load's result type,
1213       // it must be truncated via an Ideal call.
1214       if (!phase->type(value)->higher_equal(phase->type(this)))
1215         return this;
1216     }
1217     // (This works even when value is a Con, but LoadNode::Value
1218     // usually runs first, producing the singleton type of the Con.)
1219     return value;
1220   }
1221 

1739   // fold up, do so.
1740   Node* prev_mem = find_previous_store(phase);
1741   if (prev_mem != NULL) {
1742     Node* value = can_see_arraycopy_value(prev_mem, phase);
1743     if (value != NULL) {
1744       return value;
1745     }
1746   }
1747   // Steps (a), (b):  Walk past independent stores to find an exact match.
1748   if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) {
1749     // (c) See if we can fold up on the spot, but don't fold up here.
1750     // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
1751     // just return a prior value, which is done by Identity calls.
1752     if (can_see_stored_value(prev_mem, phase)) {
1753       // Make ready for step (d):
1754       set_req(MemNode::Memory, prev_mem);
1755       return this;
1756     }
1757   }
1758 
1759   AllocateNode* alloc = AllocateNode::Ideal_allocation(address, phase);
1760   if (alloc != NULL && mem->is_Proj() &&
1761       mem->in(0) != NULL &&
1762       mem->in(0) == alloc->initialization() &&
1763       Opcode() == Op_LoadX &&
1764       alloc->initialization()->proj_out_or_null(0) != NULL) {
1765     InitializeNode* init = alloc->initialization();
1766     Node* control = init->proj_out(0);
1767     return alloc->make_ideal_mark(phase, control, mem);
1768   }
1769 
1770   return progress ? this : NULL;
1771 }
1772 
1773 // Helper to recognize certain Klass fields which are invariant across
1774 // some group of array types (e.g., int[] or all T[] where T < Object).
1775 const Type*
1776 LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
1777                                  ciKlass* klass) const {
1778   if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) {
1779     // The field is Klass::_modifier_flags.  Return its (constant) value.
1780     // (Folds up the 2nd indirection in aClassConstant.getModifiers().)
1781     assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags");
1782     return TypeInt::make(klass->modifier_flags());
1783   }
1784   if (tkls->offset() == in_bytes(Klass::access_flags_offset())) {
1785     // The field is Klass::_access_flags.  Return its (constant) value.
1786     // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).)
1787     assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags");

1839       }
1840     }
1841 
1842     // Don't do this for integer types. There is only potential profit if
1843     // the element type t is lower than _type; that is, for int types, if _type is
1844     // more restrictive than t.  This only happens here if one is short and the other
1845     // char (both 16 bits), and in those cases we've made an intentional decision
1846     // to use one kind of load over the other. See AndINode::Ideal and 4965907.
1847     // Also, do not try to narrow the type for a LoadKlass, regardless of offset.
1848     //
1849     // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8))
1850     // where the _gvn.type of the AddP is wider than 8.  This occurs when an earlier
1851     // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been
1852     // subsumed by p1.  If p1 is on the worklist but has not yet been re-transformed,
1853     // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any.
1854     // In fact, that could have been the original type of p1, and p1 could have
1855     // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
1856     // expression (LShiftL quux 3) independently optimized to the constant 8.
1857     if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
1858         && (_type->isa_vect() == NULL)
1859         && t->isa_inlinetype() == NULL
1860         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1861       // t might actually be lower than _type, if _type is a unique
1862       // concrete subclass of abstract class t.
1863       if (off_beyond_header || off == Type::OffsetBot) {  // is the offset beyond the header?
1864         const Type* jt = t->join_speculative(_type);
1865         // In any case, do not allow the join, per se, to empty out the type.
1866         if (jt->empty() && !t->empty()) {
1867           // This can happen if a interface-typed array narrows to a class type.
1868           jt = _type;
1869         }
1870 #ifdef ASSERT
1871         if (phase->C->eliminate_boxing() && adr->is_AddP()) {
1872           // The pointers in the autobox arrays are always non-null
1873           Node* base = adr->in(AddPNode::Base);
1874           if ((base != NULL) && base->is_DecodeN()) {
1875             // Get LoadN node which loads IntegerCache.cache field
1876             base = base->in(1);
1877           }
1878           if ((base != NULL) && base->is_Con()) {
1879             const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr();
1880             if ((base_type != NULL) && base_type->is_autobox_cache()) {
1881               // It could be narrow oop
1882               assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity");
1883             }
1884           }
1885         }
1886 #endif
1887         return jt;
1888       }
1889     }
1890   } else if (tp->base() == Type::InstPtr) {
1891     assert( off != Type::OffsetBot ||
1892             // arrays can be cast to Objects
1893             tp->is_oopptr()->klass()->is_java_lang_Object() ||
1894             tp->is_oopptr()->klass() == ciEnv::current()->Class_klass() ||
1895             // unsafe field access may not have a constant offset
1896             C->has_unsafe_access(),
1897             "Field accesses must be precise" );
1898     // For oop loads, we expect the _type to be precise.
1899 
1900     const TypeInstPtr* tinst = tp->is_instptr();
1901     BasicType bt = memory_type();
1902 
1903     // Optimize loads from constant fields.
1904     ciObject* const_oop = tinst->const_oop();
1905     if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) {
1906       ciType* mirror_type = const_oop->as_instance()->java_mirror_type();
1907       if (mirror_type != NULL && mirror_type->is_inlinetype()) {
1908         ciInlineKlass* vk = mirror_type->as_inline_klass();
1909         if (off == vk->default_value_offset()) {
1910           // Loading a special hidden field that contains the oop of the default inline type
1911           const Type* const_oop = TypeInstPtr::make(vk->default_instance());
1912           return (bt == T_NARROWOOP) ? const_oop->make_narrowoop() : const_oop;
1913         }
1914       }
1915       const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), bt);
1916       if (con_type != NULL) {
1917         return con_type;
1918       }
1919     }
1920   } else if (tp->base() == Type::KlassPtr) {
1921     assert( off != Type::OffsetBot ||
1922             // arrays can be cast to Objects
1923             tp->is_klassptr()->klass() == NULL ||
1924             tp->is_klassptr()->klass()->is_java_lang_Object() ||
1925             // also allow array-loading from the primary supertype
1926             // array during subtype checks
1927             Opcode() == Op_LoadKlass,
1928             "Field accesses must be precise" );
1929     // For klass/static loads, we expect the _type to be precise
1930   } else if (tp->base() == Type::RawPtr && !StressReflectiveCode) {
1931     if (adr->is_Load() && off == 0) {
1932       /* With mirrors being an indirect in the Klass*
1933        * the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
1934        * The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
1935        *
1936        * So check the type and klass of the node before the LoadP.
1937        */
1938       Node* adr2 = adr->in(MemNode::Address);
1939       const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
1940       if (tkls != NULL) {
1941         ciKlass* klass = tkls->klass();
1942         if (klass != NULL && klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
1943           assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1944           assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
1945           return TypeInstPtr::make(klass->java_mirror());
1946         }
1947       }
1948     } else {
1949       // Check for a load of the default value offset from the InlineKlassFixedBlock:
1950       // LoadI(LoadP(inline_klass, adr_inlineklass_fixed_block_offset), default_value_offset_offset)
1951       intptr_t offset = 0;
1952       Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
1953       if (base != NULL && base->is_Load() && offset == in_bytes(InlineKlass::default_value_offset_offset())) {
1954         const TypeKlassPtr* tkls = phase->type(base->in(MemNode::Address))->isa_klassptr();
1955         if (tkls != NULL && tkls->is_loaded() && tkls->klass_is_exact() && tkls->isa_inlinetype() &&
1956             tkls->offset() == in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset())) {
1957           assert(base->Opcode() == Op_LoadP, "must load an oop from klass");
1958           assert(Opcode() == Op_LoadI, "must load an int from fixed block");
1959           return TypeInt::make(tkls->klass()->as_inline_klass()->default_value_offset());
1960         }
1961       }
1962     }
1963   }
1964 
1965   const TypeKlassPtr *tkls = tp->isa_klassptr();
1966   if (tkls != NULL && !StressReflectiveCode) {
1967     ciKlass* klass = tkls->klass();
1968     if (tkls->is_loaded() && tkls->klass_is_exact()) {
1969       // We are loading a field from a Klass metaobject whose identity
1970       // is known at compile time (the type is "exact" or "precise").
1971       // Check for fields we know are maintained as constants by the VM.
1972       if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) {
1973         // The field is Klass::_super_check_offset.  Return its (constant) value.
1974         // (Folds up type checking code.)
1975         assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset");
1976         return TypeInt::make(klass->super_check_offset());
1977       }
1978       // Compute index into primary_supers array
1979       juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
1980       // Check for overflowing; use unsigned compare to handle the negative case.
1981       if( depth < ciKlass::primary_super_limit() ) {
1982         // The field is an element of Klass::_primary_supers.  Return its (constant) value.
1983         // (Folds up type checking code.)
1984         assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
1985         ciKlass *ss = klass->super_of_depth(depth);
1986         return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
1987       }
1988       const Type* aift = load_array_final_field(tkls, klass);
1989       if (aift != NULL)  return aift;
1990     }
1991 
1992     // We can still check if we are loading from the primary_supers array at a
1993     // shallow enough depth.  Even though the klass is not exact, entries less
1994     // than or equal to its super depth are correct.
1995     if (tkls->is_loaded()) {
1996       ciType *inner = klass;
1997       while( inner->is_obj_array_klass() )
1998         inner = inner->as_obj_array_klass()->base_element_type();
1999       if( inner->is_instance_klass() &&
2000           !inner->as_instance_klass()->flags().is_interface() ) {
2001         // Compute index into primary_supers array
2002         juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*);
2003         // Check for overflowing; use unsigned compare to handle the negative case.
2004         if( depth < ciKlass::primary_super_limit() &&
2005             depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case
2006           // The field is an element of Klass::_primary_supers.  Return its (constant) value.
2007           // (Folds up type checking code.)
2008           assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers");
2009           ciKlass *ss = klass->super_of_depth(depth);
2010           return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR;
2011         }
2012       }
2013     }
2014 
2015     // If the type is enough to determine that the thing is not an array,

2180   return LoadNode::Ideal(phase, can_reshape);
2181 }
2182 
2183 const Type* LoadSNode::Value(PhaseGVN* phase) const {
2184   Node* mem = in(MemNode::Memory);
2185   Node* value = can_see_stored_value(mem,phase);
2186   if (value != NULL && value->is_Con() &&
2187       !value->bottom_type()->higher_equal(_type)) {
2188     // If the input to the store does not fit with the load's result type,
2189     // it must be truncated. We can't delay until Ideal call since
2190     // a singleton Value is needed for split_thru_phi optimization.
2191     int con = value->get_int();
2192     return TypeInt::make((con << 16) >> 16);
2193   }
2194   return LoadNode::Value(phase);
2195 }
2196 
2197 //=============================================================================
2198 //----------------------------LoadKlassNode::make------------------------------
2199 // Polymorphic factory method:
2200 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
2201                           const TypeKlassPtr* tk) {
2202   // sanity check the alias category against the created node type
2203   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2204   assert(adr_type != NULL, "expecting TypeKlassPtr");
2205 #ifdef _LP64
2206   if (adr_type->is_ptr_to_narrowklass()) {
2207     assert(UseCompressedClassPointers, "no compressed klasses");
2208     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2209     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2210   }
2211 #endif
2212   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2213   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2214 }
2215 
2216 //------------------------------Value------------------------------------------
2217 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2218   return klass_value_common(phase);
2219 }
2220 
2221 // In most cases, LoadKlassNode does not have the control input set. If the control

2268     }
2269     if( !ik->is_loaded() )
2270       return _type;             // Bail out if not loaded
2271     if (offset == oopDesc::klass_offset_in_bytes()) {
2272       if (tinst->klass_is_exact()) {
2273         return TypeKlassPtr::make(ik);
2274       }
2275       // See if we can become precise: no subklasses and no interface
2276       // (Note:  We need to support verified interfaces.)
2277       if (!ik->is_interface() && !ik->has_subklass()) {
2278         // Add a dependence; if any subclass added we need to recompile
2279         if (!ik->is_final()) {
2280           // %%% should use stronger assert_unique_concrete_subtype instead
2281           phase->C->dependencies()->assert_leaf_type(ik);
2282         }
2283         // Return precise klass
2284         return TypeKlassPtr::make(ik);
2285       }
2286 
2287       // Return root of possible klass
2288       return TypeKlassPtr::make(TypePtr::NotNull, ik, Type::Offset(0), tinst->flatten_array());
2289     }
2290   }
2291 
2292   // Check for loading klass from an array
2293   const TypeAryPtr *tary = tp->isa_aryptr();
2294   if (tary != NULL) {
2295     ciKlass *tary_klass = tary->klass();
2296     if (tary_klass != NULL   // can be NULL when at BOTTOM or TOP
2297         && tary->offset() == oopDesc::klass_offset_in_bytes()) {
2298       if (tary->klass_is_exact()) {
2299         return TypeKlassPtr::make(tary_klass);
2300       }
2301       ciArrayKlass* ak = tary_klass->as_array_klass();
2302       // If the klass is an object array, we defer the question to the
2303       // array component klass.
2304       if (ak->is_obj_array_klass()) {
2305         assert(ak->is_loaded(), "");
2306         ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass();
2307         if (base_k->is_loaded() && base_k->is_instance_klass()) {
2308           ciInstanceKlass *ik = base_k->as_instance_klass();
2309           // See if we can become precise: no subklasses and no interface
2310           if (!ik->is_interface() && !ik->has_subklass()) {
2311             // Add a dependence; if any subclass added we need to recompile
2312             if (!ik->is_final()) {
2313               phase->C->dependencies()->assert_leaf_type(ik);
2314             }
2315             // Return precise array klass
2316             return TypeKlassPtr::make(ak);
2317           }
2318         }
2319         return TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
2320       } else if (ak->is_type_array_klass()) {

2321         return TypeKlassPtr::make(ak); // These are always precise
2322       }
2323     }
2324   }
2325 
2326   // Check for loading klass from an array klass
2327   const TypeKlassPtr *tkls = tp->isa_klassptr();
2328   if (tkls != NULL && !StressReflectiveCode) {
2329     if (!tkls->is_loaded()) {

2330       return _type;             // Bail out if not loaded
2331     }
2332     ciKlass* klass = tkls->klass();
2333     if( klass->is_obj_array_klass() &&
2334         tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2335       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
2336       // // Always returning precise element type is incorrect,
2337       // // e.g., element type could be object and array may contain strings
2338       // return TypeKlassPtr::make(TypePtr::Constant, elem, 0);
2339 
2340       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2341       // according to the element type's subclassing.
2342       return TypeKlassPtr::make(tkls->ptr(), elem, Type::Offset(0));
2343     } else if (klass->is_flat_array_klass() &&
2344                tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) {
2345       ciKlass* elem = klass->as_flat_array_klass()->element_klass();
2346       return TypeKlassPtr::make(tkls->ptr(), elem, Type::Offset(0), /* flatten_array= */ true);
2347     }
2348     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2349         tkls->offset() == in_bytes(Klass::super_offset())) {
2350       ciKlass* sup = klass->as_instance_klass()->super();
2351       // The field is Klass::_super.  Return its (constant) value.
2352       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2353       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2354     }
2355   }
2356 
2357   // Bailout case
2358   return LoadNode::Value(phase);
2359 }
2360 
2361 //------------------------------Identity---------------------------------------
2362 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2363 // Also feed through the klass in Allocate(...klass...)._klass.
2364 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2365   return klass_identity_common(phase);
2366 }

2534 //=============================================================================
2535 //---------------------------StoreNode::make-----------------------------------
2536 // Polymorphic factory method:
2537 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2538   assert((mo == unordered || mo == release), "unexpected");
2539   Compile* C = gvn.C;
2540   assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2541          ctl != NULL, "raw memory operations should have control edge");
2542 
2543   switch (bt) {
2544   case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case
2545   case T_BYTE:    return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2546   case T_INT:     return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2547   case T_CHAR:
2548   case T_SHORT:   return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2549   case T_LONG:    return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2550   case T_FLOAT:   return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2551   case T_DOUBLE:  return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2552   case T_METADATA:
2553   case T_ADDRESS:
2554   case T_INLINE_TYPE:
2555   case T_OBJECT:
2556 #ifdef _LP64
2557     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2558       val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2559       return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2560     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2561                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2562                 adr->bottom_type()->isa_rawptr())) {
2563       val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2564       return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2565     }
2566 #endif
2567     {
2568       return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2569     }
2570   default:
2571     ShouldNotReachHere();
2572     return (StoreNode*)NULL;
2573   }
2574 }

2595   //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2596 
2597   // Since they are not commoned, do not hash them:
2598   return NO_HASH;
2599 }
2600 
2601 //------------------------------Ideal------------------------------------------
2602 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2603 // When a store immediately follows a relevant allocation/initialization,
2604 // try to capture it into the initialization, or hoist it above.
2605 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2606   Node* p = MemNode::Ideal_common(phase, can_reshape);
2607   if (p)  return (p == NodeSentinel) ? NULL : p;
2608 
2609   Node* mem     = in(MemNode::Memory);
2610   Node* address = in(MemNode::Address);
2611   // Back-to-back stores to same address?  Fold em up.  Generally
2612   // unsafe if I have intervening uses...  Also disallowed for StoreCM
2613   // since they must follow each StoreP operation.  Redundant StoreCMs
2614   // are eliminated just before matching in final_graph_reshape.
2615   if (phase->C->get_adr_type(phase->C->get_alias_index(adr_type())) != TypeAryPtr::INLINES) {
2616     Node* st = mem;
2617     // If Store 'st' has more than one use, we cannot fold 'st' away.
2618     // For example, 'st' might be the final state at a conditional
2619     // return.  Or, 'st' might be used by some node which is live at
2620     // the same time 'st' is live, which might be unschedulable.  So,
2621     // require exactly ONE user until such time as we clone 'mem' for
2622     // each of 'mem's uses (thus making the exactly-1-user-rule hold
2623     // true).
2624     while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
2625       // Looking at a dead closed cycle of memory?
2626       assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
2627       assert(Opcode() == st->Opcode() ||
2628              st->Opcode() == Op_StoreVector ||
2629              Opcode() == Op_StoreVector ||
2630              phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
2631              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
2632              (Opcode() == Op_StoreI && st->Opcode() == Op_StoreL) || // initialization by arraycopy
2633              (Opcode() == Op_StoreL && st->Opcode() == Op_StoreN) ||
2634              (is_mismatched_access() || st->as_Store()->is_mismatched_access()),
2635              "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
2636 
2637       if (st->in(MemNode::Address)->eqv_uncast(address) &&
2638           st->as_Store()->memory_size() <= this->memory_size()) {
2639         Node* use = st->raw_out(0);
2640         phase->igvn_rehash_node_delayed(use);
2641         if (can_reshape) {
2642           use->set_req_X(MemNode::Memory, st->in(MemNode::Memory), phase->is_IterGVN());
2643         } else {
2644           // It's OK to do this in the parser, since DU info is always accurate,
2645           // and the parser always refers to nodes via SafePointNode maps.
2646           use->set_req(MemNode::Memory, st->in(MemNode::Memory));
2647         }
2648         return this;
2649       }
2650       st = st->in(MemNode::Memory);
2651     }
2652   }
2653 

2699   // Load then Store?  Then the Store is useless
2700   if (val->is_Load() &&
2701       val->in(MemNode::Address)->eqv_uncast(adr) &&
2702       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2703       val->as_Load()->store_Opcode() == Opcode()) {
2704     result = mem;
2705   }
2706 
2707   // Two stores in a row of the same value?
2708   if (result == this &&
2709       mem->is_Store() &&
2710       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2711       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2712       mem->Opcode() == Opcode()) {
2713     result = mem;
2714   }
2715 
2716   // Store of zero anywhere into a freshly-allocated object?
2717   // Then the store is useless.
2718   // (It must already have been captured by the InitializeNode.)
2719   if (result == this && ReduceFieldZeroing) {

2720     // a newly allocated object is already all-zeroes everywhere
2721     if (mem->is_Proj() && mem->in(0)->is_Allocate() &&
2722         (phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == val)) {
2723       assert(!phase->type(val)->is_zero_type() || mem->in(0)->in(AllocateNode::DefaultValue) == NULL, "storing null to inline type array is forbidden");
2724       result = mem;
2725     }
2726 
2727     if (result == this) {
2728       // the store may also apply to zero-bits in an earlier object
2729       Node* prev_mem = find_previous_store(phase);
2730       // Steps (a), (b):  Walk past independent stores to find an exact match.
2731       if (prev_mem != NULL) {
2732         Node* prev_val = can_see_stored_value(prev_mem, phase);
2733         if (prev_val != NULL && phase->eqv(prev_val, val)) {
2734           // prev_val and val might differ by a cast; it would be good
2735           // to keep the more informative of the two.
2736           if (phase->type(val)->is_zero_type()) {
2737             result = mem;
2738           } else if (prev_mem->is_Proj() && prev_mem->in(0)->is_Initialize()) {
2739             InitializeNode* init = prev_mem->in(0)->as_Initialize();
2740             AllocateNode* alloc = init->allocation();
2741             if (alloc != NULL && alloc->in(AllocateNode::DefaultValue) == val) {
2742               result = mem;
2743             }
2744           }
2745         }
2746       }
2747     }
2748   }
2749 
2750   if (result != this && phase->is_IterGVN() != NULL) {
2751     MemBarNode* trailing = trailing_membar();
2752     if (trailing != NULL) {
2753 #ifdef ASSERT
2754       const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr();
2755       assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects");
2756 #endif
2757       PhaseIterGVN* igvn = phase->is_IterGVN();
2758       trailing->remove(igvn);
2759     }
2760   }
2761 
2762   return result;
2763 }
2764 

3033 // Clearing a short array is faster with stores
3034 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
3035   // Already know this is a large node, do not try to ideal it
3036   if (!IdealizeClearArrayNode || _is_large) return NULL;
3037 
3038   const int unit = BytesPerLong;
3039   const TypeX* t = phase->type(in(2))->isa_intptr_t();
3040   if (!t)  return NULL;
3041   if (!t->is_con())  return NULL;
3042   intptr_t raw_count = t->get_con();
3043   intptr_t size = raw_count;
3044   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
3045   // Clearing nothing uses the Identity call.
3046   // Negative clears are possible on dead ClearArrays
3047   // (see jck test stmt114.stmt11402.val).
3048   if (size <= 0 || size % unit != 0)  return NULL;
3049   intptr_t count = size / unit;
3050   // Length too long; communicate this to matchers and assemblers.
3051   // Assemblers are responsible to produce fast hardware clears for it.
3052   if (size > InitArrayShortSize) {
3053     return new ClearArrayNode(in(0), in(1), in(2), in(3), in(4), true);
3054   }
3055   Node *mem = in(1);
3056   if( phase->type(mem)==Type::TOP ) return NULL;
3057   Node *adr = in(3);
3058   const Type* at = phase->type(adr);
3059   if( at==Type::TOP ) return NULL;
3060   const TypePtr* atp = at->isa_ptr();
3061   // adjust atp to be the correct array element address type
3062   if (atp == NULL)  atp = TypePtr::BOTTOM;
3063   else              atp = atp->add_offset(Type::OffsetBot);
3064   // Get base for derived pointer purposes
3065   if( adr->Opcode() != Op_AddP ) Unimplemented();
3066   Node *base = adr->in(1);
3067 
3068   Node *val = in(4);
3069   Node *off  = phase->MakeConX(BytesPerLong);
3070   mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
3071   count--;
3072   while( count-- ) {
3073     mem = phase->transform(mem);
3074     adr = phase->transform(new AddPNode(base,adr,off));
3075     mem = new StoreLNode(in(0), mem, adr, atp, val, MemNode::unordered, false);
3076   }
3077   return mem;
3078 }
3079 
3080 //----------------------------step_through----------------------------------
3081 // Return allocation input memory edge if it is different instance
3082 // or itself if it is the one we are looking for.
3083 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
3084   Node* n = *np;
3085   assert(n->is_ClearArray(), "sanity");
3086   intptr_t offset;
3087   AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
3088   // This method is called only before Allocate nodes are expanded
3089   // during macro nodes expansion. Before that ClearArray nodes are
3090   // only generated in PhaseMacroExpand::generate_arraycopy() (before
3091   // Allocate nodes are expanded) which follows allocations.
3092   assert(alloc != NULL, "should have allocation");
3093   if (alloc->_idx == instance_id) {
3094     // Can not bypass initialization of the instance we are looking for.
3095     return false;
3096   }
3097   // Otherwise skip it.
3098   InitializeNode* init = alloc->initialization();
3099   if (init != NULL)
3100     *np = init->in(TypeFunc::Memory);
3101   else
3102     *np = alloc->in(TypeFunc::Memory);
3103   return true;
3104 }
3105 
3106 //----------------------------clear_memory-------------------------------------
3107 // Generate code to initialize object storage to zero.
3108 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3109                                    Node* val,
3110                                    Node* raw_val,
3111                                    intptr_t start_offset,
3112                                    Node* end_offset,
3113                                    PhaseGVN* phase) {
3114   intptr_t offset = start_offset;
3115 
3116   int unit = BytesPerLong;
3117   if ((offset % unit) != 0) {
3118     Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
3119     adr = phase->transform(adr);
3120     const TypePtr* atp = TypeRawPtr::BOTTOM;
3121     if (val != NULL) {
3122       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
3123       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
3124     } else {
3125       assert(raw_val == NULL, "val may not be null");
3126       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
3127     }
3128     mem = phase->transform(mem);
3129     offset += BytesPerInt;
3130   }
3131   assert((offset % unit) == 0, "");
3132 
3133   // Initialize the remaining stuff, if any, with a ClearArray.
3134   return clear_memory(ctl, mem, dest, raw_val, phase->MakeConX(offset), end_offset, phase);
3135 }
3136 
3137 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3138                                    Node* raw_val,
3139                                    Node* start_offset,
3140                                    Node* end_offset,
3141                                    PhaseGVN* phase) {
3142   if (start_offset == end_offset) {
3143     // nothing to do
3144     return mem;
3145   }
3146 
3147   int unit = BytesPerLong;
3148   Node* zbase = start_offset;
3149   Node* zend  = end_offset;
3150 
3151   // Scale to the unit required by the CPU:
3152   if (!Matcher::init_array_count_is_in_bytes) {
3153     Node* shift = phase->intcon(exact_log2(unit));
3154     zbase = phase->transform(new URShiftXNode(zbase, shift) );
3155     zend  = phase->transform(new URShiftXNode(zend,  shift) );
3156   }
3157 
3158   // Bulk clear double-words
3159   Node* zsize = phase->transform(new SubXNode(zend, zbase) );
3160   Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
3161   if (raw_val == NULL) {
3162     raw_val = phase->MakeConX(0);
3163   }
3164   mem = new ClearArrayNode(ctl, mem, zsize, adr, raw_val, false);
3165   return phase->transform(mem);
3166 }
3167 
3168 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
3169                                    Node* val,
3170                                    Node* raw_val,
3171                                    intptr_t start_offset,
3172                                    intptr_t end_offset,
3173                                    PhaseGVN* phase) {
3174   if (start_offset == end_offset) {
3175     // nothing to do
3176     return mem;
3177   }
3178 
3179   assert((end_offset % BytesPerInt) == 0, "odd end offset");
3180   intptr_t done_offset = end_offset;
3181   if ((done_offset % BytesPerLong) != 0) {
3182     done_offset -= BytesPerInt;
3183   }
3184   if (done_offset > start_offset) {
3185     mem = clear_memory(ctl, mem, dest, val, raw_val,
3186                        start_offset, phase->MakeConX(done_offset), phase);
3187   }
3188   if (done_offset < end_offset) { // emit the final 32-bit store
3189     Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
3190     adr = phase->transform(adr);
3191     const TypePtr* atp = TypeRawPtr::BOTTOM;
3192     if (val != NULL) {
3193       assert(phase->type(val)->isa_narrowoop(), "should be narrow oop");
3194       mem = new StoreNNode(ctl, mem, adr, atp, val, MemNode::unordered);
3195     } else {
3196       assert(raw_val == NULL, "val may not be null");
3197       mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
3198     }
3199     mem = phase->transform(mem);
3200     done_offset += BytesPerInt;
3201   }
3202   assert(done_offset == end_offset, "");
3203   return mem;
3204 }
3205 
3206 //=============================================================================
3207 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
3208   : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
3209     _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone)
3210 #ifdef ASSERT
3211   , _pair_idx(0)
3212 #endif
3213 {
3214   init_class_id(Class_MemBar);
3215   Node* top = C->top();
3216   init_req(TypeFunc::I_O,top);
3217   init_req(TypeFunc::FramePtr,top);
3218   init_req(TypeFunc::ReturnAdr,top);

3317       PhaseIterGVN* igvn = phase->is_IterGVN();
3318       remove(igvn);
3319       // Must return either the original node (now dead) or a new node
3320       // (Do not return a top here, since that would break the uniqueness of top.)
3321       return new ConINode(TypeInt::ZERO);
3322     }
3323   }
3324   return progress ? this : NULL;
3325 }
3326 
3327 //------------------------------Value------------------------------------------
3328 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3329   if( !in(0) ) return Type::TOP;
3330   if( phase->type(in(0)) == Type::TOP )
3331     return Type::TOP;
3332   return TypeTuple::MEMBAR;
3333 }
3334 
3335 //------------------------------match------------------------------------------
3336 // Construct projections for memory.
3337 Node *MemBarNode::match(const ProjNode *proj, const Matcher *m, const RegMask* mask) {
3338   switch (proj->_con) {
3339   case TypeFunc::Control:
3340   case TypeFunc::Memory:
3341     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3342   }
3343   ShouldNotReachHere();
3344   return NULL;
3345 }
3346 
3347 void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3348   trailing->_kind = TrailingStore;
3349   leading->_kind = LeadingStore;
3350 #ifdef ASSERT
3351   trailing->_pair_idx = leading->_idx;
3352   leading->_pair_idx = leading->_idx;
3353 #endif
3354 }
3355 
3356 void MemBarNode::set_load_store_pair(MemBarNode* leading, MemBarNode* trailing) {
3357   trailing->_kind = TrailingLoadStore;

3603   return (req() > RawStores);
3604 }
3605 
3606 void InitializeNode::set_complete(PhaseGVN* phase) {
3607   assert(!is_complete(), "caller responsibility");
3608   _is_complete = Complete;
3609 
3610   // After this node is complete, it contains a bunch of
3611   // raw-memory initializations.  There is no need for
3612   // it to have anything to do with non-raw memory effects.
3613   // Therefore, tell all non-raw users to re-optimize themselves,
3614   // after skipping the memory effects of this initialization.
3615   PhaseIterGVN* igvn = phase->is_IterGVN();
3616   if (igvn)  igvn->add_users_to_worklist(this);
3617 }
3618 
3619 // convenience function
3620 // return false if the init contains any stores already
3621 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) {
3622   InitializeNode* init = initialization();
3623   if (init == NULL || init->is_complete()) {
3624     return false;
3625   }
3626   init->remove_extra_zeroes();
3627   // for now, if this allocation has already collected any inits, bail:
3628   if (init->is_non_zero())  return false;
3629   init->set_complete(phase);
3630   return true;
3631 }
3632 
3633 void InitializeNode::remove_extra_zeroes() {
3634   if (req() == RawStores)  return;
3635   Node* zmem = zero_memory();
3636   uint fill = RawStores;
3637   for (uint i = fill; i < req(); i++) {
3638     Node* n = in(i);
3639     if (n->is_top() || n == zmem)  continue;  // skip
3640     if (fill < i)  set_req(fill, n);          // compact
3641     ++fill;
3642   }
3643   // delete any empty spaces created:
3644   while (fill < req()) {
3645     del_req(fill);

4363         //   z's_done      12  16  16  16    12  16    12
4364         //   z's_needed    12  16  16  16    16  16    16
4365         //   zsize          0   0   0   0     4   0     4
4366         if (next_full_store < 0) {
4367           // Conservative tack:  Zero to end of current word.
4368           zeroes_needed = align_up(zeroes_needed, BytesPerInt);
4369         } else {
4370           // Zero to beginning of next fully initialized word.
4371           // Or, don't zero at all, if we are already in that word.
4372           assert(next_full_store >= zeroes_needed, "must go forward");
4373           assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary");
4374           zeroes_needed = next_full_store;
4375         }
4376       }
4377 
4378       if (zeroes_needed > zeroes_done) {
4379         intptr_t zsize = zeroes_needed - zeroes_done;
4380         // Do some incremental zeroing on rawmem, in parallel with inits.
4381         zeroes_done = align_down(zeroes_done, BytesPerInt);
4382         rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
4383                                               allocation()->in(AllocateNode::DefaultValue),
4384                                               allocation()->in(AllocateNode::RawDefaultValue),
4385                                               zeroes_done, zeroes_needed,
4386                                               phase);
4387         zeroes_done = zeroes_needed;
4388         if (zsize > InitArrayShortSize && ++big_init_gaps > 2)
4389           do_zeroing = false;   // leave the hole, next time
4390       }
4391     }
4392 
4393     // Collect the store and move on:
4394     phase->replace_input_of(st, MemNode::Memory, inits);
4395     inits = st;                 // put it on the linearized chain
4396     set_req(i, zmem);           // unhook from previous position
4397 
4398     if (zeroes_done == st_off)
4399       zeroes_done = next_init_off;
4400 
4401     assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any");
4402 
4403     #ifdef ASSERT
4404     // Various order invariants.  Weaker than stores_are_sane because

4424   remove_extra_zeroes();        // clear out all the zmems left over
4425   add_req(inits);
4426 
4427   if (!(UseTLAB && ZeroTLAB)) {
4428     // If anything remains to be zeroed, zero it all now.
4429     zeroes_done = align_down(zeroes_done, BytesPerInt);
4430     // if it is the last unused 4 bytes of an instance, forget about it
4431     intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
4432     if (zeroes_done + BytesPerLong >= size_limit) {
4433       AllocateNode* alloc = allocation();
4434       assert(alloc != NULL, "must be present");
4435       if (alloc != NULL && alloc->Opcode() == Op_Allocate) {
4436         Node* klass_node = alloc->in(AllocateNode::KlassNode);
4437         ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
4438         if (zeroes_done == k->layout_helper())
4439           zeroes_done = size_limit;
4440       }
4441     }
4442     if (zeroes_done < size_limit) {
4443       rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
4444                                             allocation()->in(AllocateNode::DefaultValue),
4445                                             allocation()->in(AllocateNode::RawDefaultValue),
4446                                             zeroes_done, size_in_bytes, phase);
4447     }
4448   }
4449 
4450   set_complete(phase);
4451   return rawmem;
4452 }
4453 
4454 
4455 #ifdef ASSERT
4456 bool InitializeNode::stores_are_sane(PhaseTransform* phase) {
4457   if (is_complete())
4458     return true;                // stores could be anything at this point
4459   assert(allocation() != NULL, "must be present");
4460   intptr_t last_off = allocation()->minimum_header_size();
4461   for (uint i = InitializeNode::RawStores; i < req(); i++) {
4462     Node* st = in(i);
4463     intptr_t st_off = get_store_offset(st, phase);
4464     if (st_off < 0)  continue;  // ignore dead garbage
4465     if (last_off > st_off) {
< prev index next >