1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciFlatArrayKlass.hpp"
  27 #include "ci/ciInlineKlass.hpp"
  28 #include "ci/ciUtilities.hpp"
  29 #include "classfile/javaClasses.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "gc/shared/barrierSet.hpp"
  32 #include "gc/shared/c2/barrierSetC2.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "opto/addnode.hpp"
  36 #include "opto/castnode.hpp"
  37 #include "opto/convertnode.hpp"
  38 #include "opto/graphKit.hpp"
  39 #include "opto/idealKit.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/intrinsicnode.hpp"
  42 #include "opto/locknode.hpp"
  43 #include "opto/machnode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/opaquenode.hpp"
  46 #include "opto/parse.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "opto/runtime.hpp"
  49 #include "opto/subtypenode.hpp"
  50 #include "runtime/deoptimization.hpp"
  51 #include "runtime/sharedRuntime.hpp"
  52 #include "utilities/bitMap.inline.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 
  55 //----------------------------GraphKit-----------------------------------------
  56 // Main utility constructor.
  57 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
  58   : Phase(Phase::Parser),
  59     _env(C->env()),
  60     _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()),
  61     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  62 {
  63   assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
  64   _exceptions = jvms->map()->next_exception();
  65   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  66   set_jvms(jvms);
  67 #ifdef ASSERT
  68   if (_gvn.is_IterGVN() != NULL) {
  69     assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
  70     // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
  71     _worklist_size = _gvn.C->for_igvn()->size();
  72   }
  73 #endif
  74 }
  75 
  76 // Private constructor for parser.
  77 GraphKit::GraphKit()
  78   : Phase(Phase::Parser),
  79     _env(C->env()),
  80     _gvn(*C->initial_gvn()),
  81     _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
  82 {
  83   _exceptions = NULL;
  84   set_map(NULL);
  85   debug_only(_sp = -99);
  86   debug_only(set_bci(-99));
  87 }
  88 
  89 
  90 
  91 //---------------------------clean_stack---------------------------------------
  92 // Clear away rubbish from the stack area of the JVM state.
  93 // This destroys any arguments that may be waiting on the stack.
  94 void GraphKit::clean_stack(int from_sp) {
  95   SafePointNode* map      = this->map();
  96   JVMState*      jvms     = this->jvms();
  97   int            stk_size = jvms->stk_size();
  98   int            stkoff   = jvms->stkoff();
  99   Node*          top      = this->top();
 100   for (int i = from_sp; i < stk_size; i++) {
 101     if (map->in(stkoff + i) != top) {
 102       map->set_req(stkoff + i, top);
 103     }
 104   }
 105 }
 106 
 107 
 108 //--------------------------------sync_jvms-----------------------------------
 109 // Make sure our current jvms agrees with our parse state.
 110 JVMState* GraphKit::sync_jvms() const {
 111   JVMState* jvms = this->jvms();
 112   jvms->set_bci(bci());       // Record the new bci in the JVMState
 113   jvms->set_sp(sp());         // Record the new sp in the JVMState
 114   assert(jvms_in_sync(), "jvms is now in sync");
 115   return jvms;
 116 }
 117 
 118 //--------------------------------sync_jvms_for_reexecute---------------------
 119 // Make sure our current jvms agrees with our parse state.  This version
 120 // uses the reexecute_sp for reexecuting bytecodes.
 121 JVMState* GraphKit::sync_jvms_for_reexecute() {
 122   JVMState* jvms = this->jvms();
 123   jvms->set_bci(bci());          // Record the new bci in the JVMState
 124   jvms->set_sp(reexecute_sp());  // Record the new sp in the JVMState
 125   return jvms;
 126 }
 127 
 128 #ifdef ASSERT
 129 bool GraphKit::jvms_in_sync() const {
 130   Parse* parse = is_Parse();
 131   if (parse == NULL) {
 132     if (bci() !=      jvms()->bci())          return false;
 133     if (sp()  != (int)jvms()->sp())           return false;
 134     return true;
 135   }
 136   if (jvms()->method() != parse->method())    return false;
 137   if (jvms()->bci()    != parse->bci())       return false;
 138   int jvms_sp = jvms()->sp();
 139   if (jvms_sp          != parse->sp())        return false;
 140   int jvms_depth = jvms()->depth();
 141   if (jvms_depth       != parse->depth())     return false;
 142   return true;
 143 }
 144 
 145 // Local helper checks for special internal merge points
 146 // used to accumulate and merge exception states.
 147 // They are marked by the region's in(0) edge being the map itself.
 148 // Such merge points must never "escape" into the parser at large,
 149 // until they have been handed to gvn.transform.
 150 static bool is_hidden_merge(Node* reg) {
 151   if (reg == NULL)  return false;
 152   if (reg->is_Phi()) {
 153     reg = reg->in(0);
 154     if (reg == NULL)  return false;
 155   }
 156   return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root();
 157 }
 158 
 159 void GraphKit::verify_map() const {
 160   if (map() == NULL)  return;  // null map is OK
 161   assert(map()->req() <= jvms()->endoff(), "no extra garbage on map");
 162   assert(!map()->has_exceptions(),    "call add_exception_states_from 1st");
 163   assert(!is_hidden_merge(control()), "call use_exception_state, not set_map");
 164 }
 165 
 166 void GraphKit::verify_exception_state(SafePointNode* ex_map) {
 167   assert(ex_map->next_exception() == NULL, "not already part of a chain");
 168   assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop");
 169 }
 170 #endif
 171 
 172 //---------------------------stop_and_kill_map---------------------------------
 173 // Set _map to NULL, signalling a stop to further bytecode execution.
 174 // First smash the current map's control to a constant, to mark it dead.
 175 void GraphKit::stop_and_kill_map() {
 176   SafePointNode* dead_map = stop();
 177   if (dead_map != NULL) {
 178     dead_map->disconnect_inputs(NULL, C); // Mark the map as killed.
 179     assert(dead_map->is_killed(), "must be so marked");
 180   }
 181 }
 182 
 183 
 184 //--------------------------------stopped--------------------------------------
 185 // Tell if _map is NULL, or control is top.
 186 bool GraphKit::stopped() {
 187   if (map() == NULL)           return true;
 188   else if (control() == top()) return true;
 189   else                         return false;
 190 }
 191 
 192 
 193 //-----------------------------has_ex_handler----------------------------------
 194 // Tell if this method or any caller method has exception handlers.
 195 bool GraphKit::has_ex_handler() {
 196   for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) {
 197     if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) {
 198       return true;
 199     }
 200   }
 201   return false;
 202 }
 203 
 204 //------------------------------save_ex_oop------------------------------------
 205 // Save an exception without blowing stack contents or other JVM state.
 206 void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {
 207   assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");
 208   ex_map->add_req(ex_oop);
 209   debug_only(verify_exception_state(ex_map));
 210 }
 211 
 212 inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {
 213   assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there");
 214   Node* ex_oop = ex_map->in(ex_map->req()-1);
 215   if (clear_it)  ex_map->del_req(ex_map->req()-1);
 216   return ex_oop;
 217 }
 218 
 219 //-----------------------------saved_ex_oop------------------------------------
 220 // Recover a saved exception from its map.
 221 Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) {
 222   return common_saved_ex_oop(ex_map, false);
 223 }
 224 
 225 //--------------------------clear_saved_ex_oop---------------------------------
 226 // Erase a previously saved exception from its map.
 227 Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) {
 228   return common_saved_ex_oop(ex_map, true);
 229 }
 230 
 231 #ifdef ASSERT
 232 //---------------------------has_saved_ex_oop----------------------------------
 233 // Erase a previously saved exception from its map.
 234 bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) {
 235   return ex_map->req() == ex_map->jvms()->endoff()+1;
 236 }
 237 #endif
 238 
 239 //-------------------------make_exception_state--------------------------------
 240 // Turn the current JVM state into an exception state, appending the ex_oop.
 241 SafePointNode* GraphKit::make_exception_state(Node* ex_oop) {
 242   sync_jvms();
 243   SafePointNode* ex_map = stop();  // do not manipulate this map any more
 244   set_saved_ex_oop(ex_map, ex_oop);
 245   return ex_map;
 246 }
 247 
 248 
 249 //--------------------------add_exception_state--------------------------------
 250 // Add an exception to my list of exceptions.
 251 void GraphKit::add_exception_state(SafePointNode* ex_map) {
 252   if (ex_map == NULL || ex_map->control() == top()) {
 253     return;
 254   }
 255 #ifdef ASSERT
 256   verify_exception_state(ex_map);
 257   if (has_exceptions()) {
 258     assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place");
 259   }
 260 #endif
 261 
 262   // If there is already an exception of exactly this type, merge with it.
 263   // In particular, null-checks and other low-level exceptions common up here.
 264   Node*       ex_oop  = saved_ex_oop(ex_map);
 265   const Type* ex_type = _gvn.type(ex_oop);
 266   if (ex_oop == top()) {
 267     // No action needed.
 268     return;
 269   }
 270   assert(ex_type->isa_instptr(), "exception must be an instance");
 271   for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) {
 272     const Type* ex_type2 = _gvn.type(saved_ex_oop(e2));
 273     // We check sp also because call bytecodes can generate exceptions
 274     // both before and after arguments are popped!
 275     if (ex_type2 == ex_type
 276         && e2->_jvms->sp() == ex_map->_jvms->sp()) {
 277       combine_exception_states(ex_map, e2);
 278       return;
 279     }
 280   }
 281 
 282   // No pre-existing exception of the same type.  Chain it on the list.
 283   push_exception_state(ex_map);
 284 }
 285 
 286 //-----------------------add_exception_states_from-----------------------------
 287 void GraphKit::add_exception_states_from(JVMState* jvms) {
 288   SafePointNode* ex_map = jvms->map()->next_exception();
 289   if (ex_map != NULL) {
 290     jvms->map()->set_next_exception(NULL);
 291     for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) {
 292       next_map = ex_map->next_exception();
 293       ex_map->set_next_exception(NULL);
 294       add_exception_state(ex_map);
 295     }
 296   }
 297 }
 298 
 299 //-----------------------transfer_exceptions_into_jvms-------------------------
 300 JVMState* GraphKit::transfer_exceptions_into_jvms() {
 301   if (map() == NULL) {
 302     // We need a JVMS to carry the exceptions, but the map has gone away.
 303     // Create a scratch JVMS, cloned from any of the exception states...
 304     if (has_exceptions()) {
 305       _map = _exceptions;
 306       _map = clone_map();
 307       _map->set_next_exception(NULL);
 308       clear_saved_ex_oop(_map);
 309       debug_only(verify_map());
 310     } else {
 311       // ...or created from scratch
 312       JVMState* jvms = new (C) JVMState(_method, NULL);
 313       jvms->set_bci(_bci);
 314       jvms->set_sp(_sp);
 315       jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms));
 316       set_jvms(jvms);
 317       for (uint i = 0; i < map()->req(); i++)  map()->init_req(i, top());
 318       set_all_memory(top());
 319       while (map()->req() < jvms->endoff())  map()->add_req(top());
 320     }
 321     // (This is a kludge, in case you didn't notice.)
 322     set_control(top());
 323   }
 324   JVMState* jvms = sync_jvms();
 325   assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet");
 326   jvms->map()->set_next_exception(_exceptions);
 327   _exceptions = NULL;   // done with this set of exceptions
 328   return jvms;
 329 }
 330 
 331 static inline void add_n_reqs(Node* dstphi, Node* srcphi) {
 332   assert(is_hidden_merge(dstphi), "must be a special merge node");
 333   assert(is_hidden_merge(srcphi), "must be a special merge node");
 334   uint limit = srcphi->req();
 335   for (uint i = PhiNode::Input; i < limit; i++) {
 336     dstphi->add_req(srcphi->in(i));
 337   }
 338 }
 339 static inline void add_one_req(Node* dstphi, Node* src) {
 340   assert(is_hidden_merge(dstphi), "must be a special merge node");
 341   assert(!is_hidden_merge(src), "must not be a special merge node");
 342   dstphi->add_req(src);
 343 }
 344 
 345 //-----------------------combine_exception_states------------------------------
 346 // This helper function combines exception states by building phis on a
 347 // specially marked state-merging region.  These regions and phis are
 348 // untransformed, and can build up gradually.  The region is marked by
 349 // having a control input of its exception map, rather than NULL.  Such
 350 // regions do not appear except in this function, and in use_exception_state.
 351 void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) {
 352   if (failing())  return;  // dying anyway...
 353   JVMState* ex_jvms = ex_map->_jvms;
 354   assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains");
 355   assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals");
 356   assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes");
 357   assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS");
 358   assert(ex_jvms->scloff() == phi_map->_jvms->scloff(), "matching scalar replaced objects");
 359   assert(ex_map->req() == phi_map->req(), "matching maps");
 360   uint tos = ex_jvms->stkoff() + ex_jvms->sp();
 361   Node*         hidden_merge_mark = root();
 362   Node*         region  = phi_map->control();
 363   MergeMemNode* phi_mem = phi_map->merged_memory();
 364   MergeMemNode* ex_mem  = ex_map->merged_memory();
 365   if (region->in(0) != hidden_merge_mark) {
 366     // The control input is not (yet) a specially-marked region in phi_map.
 367     // Make it so, and build some phis.
 368     region = new RegionNode(2);
 369     _gvn.set_type(region, Type::CONTROL);
 370     region->set_req(0, hidden_merge_mark);  // marks an internal ex-state
 371     region->init_req(1, phi_map->control());
 372     phi_map->set_control(region);
 373     Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO);
 374     record_for_igvn(io_phi);
 375     _gvn.set_type(io_phi, Type::ABIO);
 376     phi_map->set_i_o(io_phi);
 377     for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) {
 378       Node* m = mms.memory();
 379       Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C));
 380       record_for_igvn(m_phi);
 381       _gvn.set_type(m_phi, Type::MEMORY);
 382       mms.set_memory(m_phi);
 383     }
 384   }
 385 
 386   // Either or both of phi_map and ex_map might already be converted into phis.
 387   Node* ex_control = ex_map->control();
 388   // if there is special marking on ex_map also, we add multiple edges from src
 389   bool add_multiple = (ex_control->in(0) == hidden_merge_mark);
 390   // how wide was the destination phi_map, originally?
 391   uint orig_width = region->req();
 392 
 393   if (add_multiple) {
 394     add_n_reqs(region, ex_control);
 395     add_n_reqs(phi_map->i_o(), ex_map->i_o());
 396   } else {
 397     // ex_map has no merges, so we just add single edges everywhere
 398     add_one_req(region, ex_control);
 399     add_one_req(phi_map->i_o(), ex_map->i_o());
 400   }
 401   for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) {
 402     if (mms.is_empty()) {
 403       // get a copy of the base memory, and patch some inputs into it
 404       const TypePtr* adr_type = mms.adr_type(C);
 405       Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type);
 406       assert(phi->as_Phi()->region() == mms.base_memory()->in(0), "");
 407       mms.set_memory(phi);
 408       // Prepare to append interesting stuff onto the newly sliced phi:
 409       while (phi->req() > orig_width)  phi->del_req(phi->req()-1);
 410     }
 411     // Append stuff from ex_map:
 412     if (add_multiple) {
 413       add_n_reqs(mms.memory(), mms.memory2());
 414     } else {
 415       add_one_req(mms.memory(), mms.memory2());
 416     }
 417   }
 418   uint limit = ex_map->req();
 419   for (uint i = TypeFunc::Parms; i < limit; i++) {
 420     // Skip everything in the JVMS after tos.  (The ex_oop follows.)
 421     if (i == tos)  i = ex_jvms->monoff();
 422     Node* src = ex_map->in(i);
 423     Node* dst = phi_map->in(i);
 424     if (src != dst) {
 425       PhiNode* phi;
 426       if (dst->in(0) != region) {
 427         dst = phi = PhiNode::make(region, dst, _gvn.type(dst));
 428         record_for_igvn(phi);
 429         _gvn.set_type(phi, phi->type());
 430         phi_map->set_req(i, dst);
 431         // Prepare to append interesting stuff onto the new phi:
 432         while (dst->req() > orig_width)  dst->del_req(dst->req()-1);
 433       } else {
 434         assert(dst->is_Phi(), "nobody else uses a hidden region");
 435         phi = dst->as_Phi();
 436       }
 437       if (add_multiple && src->in(0) == ex_control) {
 438         // Both are phis.
 439         add_n_reqs(dst, src);
 440       } else {
 441         while (dst->req() < region->req())  add_one_req(dst, src);
 442       }
 443       const Type* srctype = _gvn.type(src);
 444       if (phi->type() != srctype) {
 445         const Type* dsttype = phi->type()->meet_speculative(srctype);
 446         if (phi->type() != dsttype) {
 447           phi->set_type(dsttype);
 448           _gvn.set_type(phi, dsttype);
 449         }
 450       }
 451     }
 452   }
 453   phi_map->merge_replaced_nodes_with(ex_map);
 454 }
 455 
 456 //--------------------------use_exception_state--------------------------------
 457 Node* GraphKit::use_exception_state(SafePointNode* phi_map) {
 458   if (failing()) { stop(); return top(); }
 459   Node* region = phi_map->control();
 460   Node* hidden_merge_mark = root();
 461   assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation");
 462   Node* ex_oop = clear_saved_ex_oop(phi_map);
 463   if (region->in(0) == hidden_merge_mark) {
 464     // Special marking for internal ex-states.  Process the phis now.
 465     region->set_req(0, region);  // now it's an ordinary region
 466     set_jvms(phi_map->jvms());   // ...so now we can use it as a map
 467     // Note: Setting the jvms also sets the bci and sp.
 468     set_control(_gvn.transform(region));
 469     uint tos = jvms()->stkoff() + sp();
 470     for (uint i = 1; i < tos; i++) {
 471       Node* x = phi_map->in(i);
 472       if (x->in(0) == region) {
 473         assert(x->is_Phi(), "expected a special phi");
 474         phi_map->set_req(i, _gvn.transform(x));
 475       }
 476     }
 477     for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
 478       Node* x = mms.memory();
 479       if (x->in(0) == region) {
 480         assert(x->is_Phi(), "nobody else uses a hidden region");
 481         mms.set_memory(_gvn.transform(x));
 482       }
 483     }
 484     if (ex_oop->in(0) == region) {
 485       assert(ex_oop->is_Phi(), "expected a special phi");
 486       ex_oop = _gvn.transform(ex_oop);
 487     }
 488   } else {
 489     set_jvms(phi_map->jvms());
 490   }
 491 
 492   assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared");
 493   assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared");
 494   return ex_oop;
 495 }
 496 
 497 //---------------------------------java_bc-------------------------------------
 498 Bytecodes::Code GraphKit::java_bc() const {
 499   ciMethod* method = this->method();
 500   int       bci    = this->bci();
 501   if (method != NULL && bci != InvocationEntryBci)
 502     return method->java_code_at_bci(bci);
 503   else
 504     return Bytecodes::_illegal;
 505 }
 506 
 507 void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
 508                                                           bool must_throw) {
 509     // if the exception capability is set, then we will generate code
 510     // to check the JavaThread.should_post_on_exceptions flag to see
 511     // if we actually need to report exception events (for this
 512     // thread).  If we don't need to report exception events, we will
 513     // take the normal fast path provided by add_exception_events.  If
 514     // exception event reporting is enabled for this thread, we will
 515     // take the uncommon_trap in the BuildCutout below.
 516 
 517     // first must access the should_post_on_exceptions_flag in this thread's JavaThread
 518     Node* jthread = _gvn.transform(new ThreadLocalNode());
 519     Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
 520     Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
 521 
 522     // Test the should_post_on_exceptions_flag vs. 0
 523     Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) );
 524     Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
 525 
 526     // Branch to slow_path if should_post_on_exceptions_flag was true
 527     { BuildCutout unless(this, tst, PROB_MAX);
 528       // Do not try anything fancy if we're notifying the VM on every throw.
 529       // Cf. case Bytecodes::_athrow in parse2.cpp.
 530       uncommon_trap(reason, Deoptimization::Action_none,
 531                     (ciKlass*)NULL, (char*)NULL, must_throw);
 532     }
 533 
 534 }
 535 
 536 //------------------------------builtin_throw----------------------------------
 537 void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
 538   bool must_throw = true;
 539 
 540   if (env()->jvmti_can_post_on_exceptions()) {
 541     // check if we must post exception events, take uncommon trap if so
 542     uncommon_trap_if_should_post_on_exceptions(reason, must_throw);
 543     // here if should_post_on_exceptions is false
 544     // continue on with the normal codegen
 545   }
 546 
 547   // If this particular condition has not yet happened at this
 548   // bytecode, then use the uncommon trap mechanism, and allow for
 549   // a future recompilation if several traps occur here.
 550   // If the throw is hot, try to use a more complicated inline mechanism
 551   // which keeps execution inside the compiled code.
 552   bool treat_throw_as_hot = false;
 553   ciMethodData* md = method()->method_data();
 554 
 555   if (ProfileTraps) {
 556     if (too_many_traps(reason)) {
 557       treat_throw_as_hot = true;
 558     }
 559     // (If there is no MDO at all, assume it is early in
 560     // execution, and that any deopts are part of the
 561     // startup transient, and don't need to be remembered.)
 562 
 563     // Also, if there is a local exception handler, treat all throws
 564     // as hot if there has been at least one in this method.
 565     if (C->trap_count(reason) != 0
 566         && method()->method_data()->trap_count(reason) != 0
 567         && has_ex_handler()) {
 568         treat_throw_as_hot = true;
 569     }
 570   }
 571 
 572   // If this throw happens frequently, an uncommon trap might cause
 573   // a performance pothole.  If there is a local exception handler,
 574   // and if this particular bytecode appears to be deoptimizing often,
 575   // let us handle the throw inline, with a preconstructed instance.
 576   // Note:   If the deopt count has blown up, the uncommon trap
 577   // runtime is going to flush this nmethod, not matter what.
 578   if (treat_throw_as_hot
 579       && (!StackTraceInThrowable || OmitStackTraceInFastThrow)) {
 580     // If the throw is local, we use a pre-existing instance and
 581     // punt on the backtrace.  This would lead to a missing backtrace
 582     // (a repeat of 4292742) if the backtrace object is ever asked
 583     // for its backtrace.
 584     // Fixing this remaining case of 4292742 requires some flavor of
 585     // escape analysis.  Leave that for the future.
 586     ciInstance* ex_obj = NULL;
 587     switch (reason) {
 588     case Deoptimization::Reason_null_check:
 589       ex_obj = env()->NullPointerException_instance();
 590       break;
 591     case Deoptimization::Reason_div0_check:
 592       ex_obj = env()->ArithmeticException_instance();
 593       break;
 594     case Deoptimization::Reason_range_check:
 595       ex_obj = env()->ArrayIndexOutOfBoundsException_instance();
 596       break;
 597     case Deoptimization::Reason_class_check:
 598       if (java_bc() == Bytecodes::_aastore) {
 599         ex_obj = env()->ArrayStoreException_instance();
 600       } else {
 601         ex_obj = env()->ClassCastException_instance();
 602       }
 603       break;
 604     default:
 605       break;
 606     }
 607     if (failing()) { stop(); return; }  // exception allocation might fail
 608     if (ex_obj != NULL) {
 609       // Cheat with a preallocated exception object.
 610       if (C->log() != NULL)
 611         C->log()->elem("hot_throw preallocated='1' reason='%s'",
 612                        Deoptimization::trap_reason_name(reason));
 613       const TypeInstPtr* ex_con  = TypeInstPtr::make(ex_obj);
 614       Node*              ex_node = _gvn.transform(ConNode::make(ex_con));
 615 
 616       // Clear the detail message of the preallocated exception object.
 617       // Weblogic sometimes mutates the detail message of exceptions
 618       // using reflection.
 619       int offset = java_lang_Throwable::get_detailMessage_offset();
 620       const TypePtr* adr_typ = ex_con->add_offset(offset);
 621 
 622       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
 623       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
 624       Node *store = access_store_at(ex_node, adr, adr_typ, null(), val_type, T_OBJECT, IN_HEAP);
 625 
 626       add_exception_state(make_exception_state(ex_node));
 627       return;
 628     }
 629   }
 630 
 631   // %%% Maybe add entry to OptoRuntime which directly throws the exc.?
 632   // It won't be much cheaper than bailing to the interp., since we'll
 633   // have to pass up all the debug-info, and the runtime will have to
 634   // create the stack trace.
 635 
 636   // Usual case:  Bail to interpreter.
 637   // Reserve the right to recompile if we haven't seen anything yet.
 638 
 639   ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL;
 640   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
 641   if (treat_throw_as_hot
 642       && (method()->method_data()->trap_recompiled_at(bci(), m)
 643           || C->too_many_traps(reason))) {
 644     // We cannot afford to take more traps here.  Suffer in the interpreter.
 645     if (C->log() != NULL)
 646       C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'",
 647                      Deoptimization::trap_reason_name(reason),
 648                      C->trap_count(reason));
 649     action = Deoptimization::Action_none;
 650   }
 651 
 652   // "must_throw" prunes the JVM state to include only the stack, if there
 653   // are no local exception handlers.  This should cut down on register
 654   // allocation time and code size, by drastically reducing the number
 655   // of in-edges on the call to the uncommon trap.
 656 
 657   uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw);
 658 }
 659 
 660 
 661 //----------------------------PreserveJVMState---------------------------------
 662 PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
 663   debug_only(kit->verify_map());
 664   _kit    = kit;
 665   _map    = kit->map();   // preserve the map
 666   _sp     = kit->sp();
 667   kit->set_map(clone_map ? kit->clone_map() : NULL);
 668 #ifdef ASSERT
 669   _bci    = kit->bci();
 670   Parse* parser = kit->is_Parse();
 671   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
 672   _block  = block;
 673 #endif
 674 }
 675 PreserveJVMState::~PreserveJVMState() {
 676   GraphKit* kit = _kit;
 677 #ifdef ASSERT
 678   assert(kit->bci() == _bci, "bci must not shift");
 679   Parse* parser = kit->is_Parse();
 680   int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo();
 681   assert(block == _block,    "block must not shift");
 682 #endif
 683   kit->set_map(_map);
 684   kit->set_sp(_sp);
 685 }
 686 
 687 
 688 //-----------------------------BuildCutout-------------------------------------
 689 BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt)
 690   : PreserveJVMState(kit)
 691 {
 692   assert(p->is_Con() || p->is_Bool(), "test must be a bool");
 693   SafePointNode* outer_map = _map;   // preserved map is caller's
 694   SafePointNode* inner_map = kit->map();
 695   IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt);
 696   outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) ));
 697   inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) ));
 698 }
 699 BuildCutout::~BuildCutout() {
 700   GraphKit* kit = _kit;
 701   assert(kit->stopped(), "cutout code must stop, throw, return, etc.");
 702 }
 703 
 704 //---------------------------PreserveReexecuteState----------------------------
 705 PreserveReexecuteState::PreserveReexecuteState(GraphKit* kit) {
 706   assert(!kit->stopped(), "must call stopped() before");
 707   _kit    =    kit;
 708   _sp     =    kit->sp();
 709   _reexecute = kit->jvms()->_reexecute;
 710 }
 711 PreserveReexecuteState::~PreserveReexecuteState() {
 712   if (_kit->stopped()) return;
 713   _kit->jvms()->_reexecute = _reexecute;
 714   _kit->set_sp(_sp);
 715 }
 716 
 717 //------------------------------clone_map--------------------------------------
 718 // Implementation of PreserveJVMState
 719 //
 720 // Only clone_map(...) here. If this function is only used in the
 721 // PreserveJVMState class we may want to get rid of this extra
 722 // function eventually and do it all there.
 723 
 724 SafePointNode* GraphKit::clone_map() {
 725   if (map() == NULL)  return NULL;
 726 
 727   // Clone the memory edge first
 728   Node* mem = MergeMemNode::make(map()->memory());
 729   gvn().set_type_bottom(mem);
 730 
 731   SafePointNode *clonemap = (SafePointNode*)map()->clone();
 732   JVMState* jvms = this->jvms();
 733   JVMState* clonejvms = jvms->clone_shallow(C);
 734   clonemap->set_memory(mem);
 735   clonemap->set_jvms(clonejvms);
 736   clonejvms->set_map(clonemap);
 737   record_for_igvn(clonemap);
 738   gvn().set_type_bottom(clonemap);
 739   return clonemap;
 740 }
 741 
 742 
 743 //-----------------------------set_map_clone-----------------------------------
 744 void GraphKit::set_map_clone(SafePointNode* m) {
 745   _map = m;
 746   _map = clone_map();
 747   _map->set_next_exception(NULL);
 748   debug_only(verify_map());
 749 }
 750 
 751 
 752 //----------------------------kill_dead_locals---------------------------------
 753 // Detect any locals which are known to be dead, and force them to top.
 754 void GraphKit::kill_dead_locals() {
 755   // Consult the liveness information for the locals.  If any
 756   // of them are unused, then they can be replaced by top().  This
 757   // should help register allocation time and cut down on the size
 758   // of the deoptimization information.
 759 
 760   // This call is made from many of the bytecode handling
 761   // subroutines called from the Big Switch in do_one_bytecode.
 762   // Every bytecode which might include a slow path is responsible
 763   // for killing its dead locals.  The more consistent we
 764   // are about killing deads, the fewer useless phis will be
 765   // constructed for them at various merge points.
 766 
 767   // bci can be -1 (InvocationEntryBci).  We return the entry
 768   // liveness for the method.
 769 
 770   if (method() == NULL || method()->code_size() == 0) {
 771     // We are building a graph for a call to a native method.
 772     // All locals are live.
 773     return;
 774   }
 775 
 776   ResourceMark rm;
 777 
 778   // Consult the liveness information for the locals.  If any
 779   // of them are unused, then they can be replaced by top().  This
 780   // should help register allocation time and cut down on the size
 781   // of the deoptimization information.
 782   MethodLivenessResult live_locals = method()->liveness_at_bci(bci());
 783 
 784   int len = (int)live_locals.size();
 785   assert(len <= jvms()->loc_size(), "too many live locals");
 786   for (int local = 0; local < len; local++) {
 787     if (!live_locals.at(local)) {
 788       set_local(local, top());
 789     }
 790   }
 791 }
 792 
 793 #ifdef ASSERT
 794 //-------------------------dead_locals_are_killed------------------------------
 795 // Return true if all dead locals are set to top in the map.
 796 // Used to assert "clean" debug info at various points.
 797 bool GraphKit::dead_locals_are_killed() {
 798   if (method() == NULL || method()->code_size() == 0) {
 799     // No locals need to be dead, so all is as it should be.
 800     return true;
 801   }
 802 
 803   // Make sure somebody called kill_dead_locals upstream.
 804   ResourceMark rm;
 805   for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
 806     if (jvms->loc_size() == 0)  continue;  // no locals to consult
 807     SafePointNode* map = jvms->map();
 808     ciMethod* method = jvms->method();
 809     int       bci    = jvms->bci();
 810     if (jvms == this->jvms()) {
 811       bci = this->bci();  // it might not yet be synched
 812     }
 813     MethodLivenessResult live_locals = method->liveness_at_bci(bci);
 814     int len = (int)live_locals.size();
 815     if (!live_locals.is_valid() || len == 0)
 816       // This method is trivial, or is poisoned by a breakpoint.
 817       return true;
 818     assert(len == jvms->loc_size(), "live map consistent with locals map");
 819     for (int local = 0; local < len; local++) {
 820       if (!live_locals.at(local) && map->local(jvms, local) != top()) {
 821         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 822           tty->print_cr("Zombie local %d: ", local);
 823           jvms->dump();
 824         }
 825         return false;
 826       }
 827     }
 828   }
 829   return true;
 830 }
 831 
 832 #endif //ASSERT
 833 
 834 // Helper function for enforcing certain bytecodes to reexecute if
 835 // deoptimization happens
 836 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
 837   ciMethod* cur_method = jvms->method();
 838   int       cur_bci   = jvms->bci();
 839   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
 840     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 841     return Interpreter::bytecode_should_reexecute(code) ||
 842            (is_anewarray && (code == Bytecodes::_multianewarray));
 843     // Reexecute _multianewarray bytecode which was replaced with
 844     // sequence of [a]newarray. See Parse::do_multianewarray().
 845     //
 846     // Note: interpreter should not have it set since this optimization
 847     // is limited by dimensions and guarded by flag so in some cases
 848     // multianewarray() runtime calls will be generated and
 849     // the bytecode should not be reexecutes (stack will not be reset).
 850   } else {
 851     return false;
 852   }
 853 }
 854 
 855 // Helper function for adding JVMState and debug information to node
 856 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 857   // Add the safepoint edges to the call (or other safepoint).
 858 
 859   // Make sure dead locals are set to top.  This
 860   // should help register allocation time and cut down on the size
 861   // of the deoptimization information.
 862   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
 863 
 864   // Walk the inline list to fill in the correct set of JVMState's
 865   // Also fill in the associated edges for each JVMState.
 866 
 867   // If the bytecode needs to be reexecuted we need to put
 868   // the arguments back on the stack.
 869   const bool should_reexecute = jvms()->should_reexecute();
 870   JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
 871 
 872   // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
 873   // undefined if the bci is different.  This is normal for Parse but it
 874   // should not happen for LibraryCallKit because only one bci is processed.
 875   assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),
 876          "in LibraryCallKit the reexecute bit should not change");
 877 
 878   // If we are guaranteed to throw, we can prune everything but the
 879   // input to the current bytecode.
 880   bool can_prune_locals = false;
 881   uint stack_slots_not_pruned = 0;
 882   int inputs = 0, depth = 0;
 883   if (must_throw) {
 884     assert(method() == youngest_jvms->method(), "sanity");
 885     if (compute_stack_effects(inputs, depth)) {
 886       can_prune_locals = true;
 887       stack_slots_not_pruned = inputs;
 888     }
 889   }
 890 
 891   if (env()->should_retain_local_variables()) {
 892     // At any safepoint, this method can get breakpointed, which would
 893     // then require an immediate deoptimization.
 894     can_prune_locals = false;  // do not prune locals
 895     stack_slots_not_pruned = 0;
 896   }
 897 
 898   // do not scribble on the input jvms
 899   JVMState* out_jvms = youngest_jvms->clone_deep(C);
 900   call->set_jvms(out_jvms); // Start jvms list for call node
 901 
 902   // For a known set of bytecodes, the interpreter should reexecute them if
 903   // deoptimization happens. We set the reexecute state for them here
 904   if (out_jvms->is_reexecute_undefined() && //don't change if already specified
 905       should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
 906     out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
 907   }
 908 
 909   // Presize the call:
 910   DEBUG_ONLY(uint non_debug_edges = call->req());
 911   call->add_req_batch(top(), youngest_jvms->debug_depth());
 912   assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
 913 
 914   // Set up edges so that the call looks like this:
 915   //  Call [state:] ctl io mem fptr retadr
 916   //       [parms:] parm0 ... parmN
 917   //       [root:]  loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 918   //    [...mid:]   loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
 919   //       [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 920   // Note that caller debug info precedes callee debug info.
 921 
 922   // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
 923   uint debug_ptr = call->req();
 924 
 925   // Loop over the map input edges associated with jvms, add them
 926   // to the call node, & reset all offsets to match call node array.
 927   for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
 928     uint debug_end   = debug_ptr;
 929     uint debug_start = debug_ptr - in_jvms->debug_size();
 930     debug_ptr = debug_start;  // back up the ptr
 931 
 932     uint p = debug_start;  // walks forward in [debug_start, debug_end)
 933     uint j, k, l;
 934     SafePointNode* in_map = in_jvms->map();
 935     out_jvms->set_map(call);
 936 
 937     if (can_prune_locals) {
 938       assert(in_jvms->method() == out_jvms->method(), "sanity");
 939       // If the current throw can reach an exception handler in this JVMS,
 940       // then we must keep everything live that can reach that handler.
 941       // As a quick and dirty approximation, we look for any handlers at all.
 942       if (in_jvms->method()->has_exception_handlers()) {
 943         can_prune_locals = false;
 944       }
 945     }
 946 
 947     // Add the Locals
 948     k = in_jvms->locoff();
 949     l = in_jvms->loc_size();
 950     out_jvms->set_locoff(p);
 951     if (!can_prune_locals) {
 952       for (j = 0; j < l; j++)
 953         call->set_req(p++, in_map->in(k+j));
 954     } else {
 955       p += l;  // already set to top above by add_req_batch
 956     }
 957 
 958     // Add the Expression Stack
 959     k = in_jvms->stkoff();
 960     l = in_jvms->sp();
 961     out_jvms->set_stkoff(p);
 962     if (!can_prune_locals) {
 963       for (j = 0; j < l; j++)
 964         call->set_req(p++, in_map->in(k+j));
 965     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
 966       // Divide stack into {S0,...,S1}, where S0 is set to top.
 967       uint s1 = stack_slots_not_pruned;
 968       stack_slots_not_pruned = 0;  // for next iteration
 969       if (s1 > l)  s1 = l;
 970       uint s0 = l - s1;
 971       p += s0;  // skip the tops preinstalled by add_req_batch
 972       for (j = s0; j < l; j++)
 973         call->set_req(p++, in_map->in(k+j));
 974     } else {
 975       p += l;  // already set to top above by add_req_batch
 976     }
 977 
 978     // Add the Monitors
 979     k = in_jvms->monoff();
 980     l = in_jvms->mon_size();
 981     out_jvms->set_monoff(p);
 982     for (j = 0; j < l; j++)
 983       call->set_req(p++, in_map->in(k+j));
 984 
 985     // Copy any scalar object fields.
 986     k = in_jvms->scloff();
 987     l = in_jvms->scl_size();
 988     out_jvms->set_scloff(p);
 989     for (j = 0; j < l; j++)
 990       call->set_req(p++, in_map->in(k+j));
 991 
 992     // Finish the new jvms.
 993     out_jvms->set_endoff(p);
 994 
 995     assert(out_jvms->endoff()     == debug_end,             "fill ptr must match");
 996     assert(out_jvms->depth()      == in_jvms->depth(),      "depth must match");
 997     assert(out_jvms->loc_size()   == in_jvms->loc_size(),   "size must match");
 998     assert(out_jvms->mon_size()   == in_jvms->mon_size(),   "size must match");
 999     assert(out_jvms->scl_size()   == in_jvms->scl_size(),   "size must match");
1000     assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1001 
1002     // Update the two tail pointers in parallel.
1003     out_jvms = out_jvms->caller();
1004     in_jvms  = in_jvms->caller();
1005   }
1006 
1007   assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1008 
1009   // Test the correctness of JVMState::debug_xxx accessors:
1010   assert(call->jvms()->debug_start() == non_debug_edges, "");
1011   assert(call->jvms()->debug_end()   == call->req(), "");
1012   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1013 }
1014 
1015 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1016   Bytecodes::Code code = java_bc();
1017   if (code == Bytecodes::_wide) {
1018     code = method()->java_code_at_bci(bci() + 1);
1019   }
1020 
1021   BasicType rtype = T_ILLEGAL;
1022   int       rsize = 0;
1023 
1024   if (code != Bytecodes::_illegal) {
1025     depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1026     rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
1027     if (rtype < T_CONFLICT)
1028       rsize = type2size[rtype];
1029   }
1030 
1031   switch (code) {
1032   case Bytecodes::_illegal:
1033     return false;
1034 
1035   case Bytecodes::_ldc:
1036   case Bytecodes::_ldc_w:
1037   case Bytecodes::_ldc2_w:
1038     inputs = 0;
1039     break;
1040 
1041   case Bytecodes::_dup:         inputs = 1;  break;
1042   case Bytecodes::_dup_x1:      inputs = 2;  break;
1043   case Bytecodes::_dup_x2:      inputs = 3;  break;
1044   case Bytecodes::_dup2:        inputs = 2;  break;
1045   case Bytecodes::_dup2_x1:     inputs = 3;  break;
1046   case Bytecodes::_dup2_x2:     inputs = 4;  break;
1047   case Bytecodes::_swap:        inputs = 2;  break;
1048   case Bytecodes::_arraylength: inputs = 1;  break;
1049 
1050   case Bytecodes::_getstatic:
1051   case Bytecodes::_putstatic:
1052   case Bytecodes::_getfield:
1053   case Bytecodes::_putfield:
1054     {
1055       bool ignored_will_link;
1056       ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
1057       int      size  = field->type()->size();
1058       bool is_get = (depth >= 0), is_static = (depth & 1);
1059       inputs = (is_static ? 0 : 1);
1060       if (is_get) {
1061         depth = size - inputs;
1062       } else {
1063         inputs += size;        // putxxx pops the value from the stack
1064         depth = - inputs;
1065       }
1066     }
1067     break;
1068 
1069   case Bytecodes::_invokevirtual:
1070   case Bytecodes::_invokespecial:
1071   case Bytecodes::_invokestatic:
1072   case Bytecodes::_invokedynamic:
1073   case Bytecodes::_invokeinterface:
1074     {
1075       bool ignored_will_link;
1076       ciSignature* declared_signature = NULL;
1077       ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
1078       assert(declared_signature != NULL, "cannot be null");
1079       inputs   = declared_signature->arg_size_for_bc(code);
1080       int size = declared_signature->return_type()->size();
1081       depth = size - inputs;
1082     }
1083     break;
1084 
1085   case Bytecodes::_multianewarray:
1086     {
1087       ciBytecodeStream iter(method());
1088       iter.reset_to_bci(bci());
1089       iter.next();
1090       inputs = iter.get_dimensions();
1091       assert(rsize == 1, "");
1092       depth = rsize - inputs;
1093     }
1094     break;
1095 
1096   case Bytecodes::_withfield: {
1097     bool ignored_will_link;
1098     ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
1099     int      size  = field->type()->size();
1100     inputs = size+1;
1101     depth = rsize - inputs;
1102     break;
1103   }
1104 
1105   case Bytecodes::_ireturn:
1106   case Bytecodes::_lreturn:
1107   case Bytecodes::_freturn:
1108   case Bytecodes::_dreturn:
1109   case Bytecodes::_areturn:
1110     assert(rsize == -depth, "");
1111     inputs = rsize;
1112     break;
1113 
1114   case Bytecodes::_jsr:
1115   case Bytecodes::_jsr_w:
1116     inputs = 0;
1117     depth  = 1;                  // S.B. depth=1, not zero
1118     break;
1119 
1120   default:
1121     // bytecode produces a typed result
1122     inputs = rsize - depth;
1123     assert(inputs >= 0, "");
1124     break;
1125   }
1126 
1127 #ifdef ASSERT
1128   // spot check
1129   int outputs = depth + inputs;
1130   assert(outputs >= 0, "sanity");
1131   switch (code) {
1132   case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break;
1133   case Bytecodes::_athrow:    assert(inputs == 1 && outputs == 0, ""); break;
1134   case Bytecodes::_aload_0:   assert(inputs == 0 && outputs == 1, ""); break;
1135   case Bytecodes::_return:    assert(inputs == 0 && outputs == 0, ""); break;
1136   case Bytecodes::_drem:      assert(inputs == 4 && outputs == 2, ""); break;
1137   default:                    break;
1138   }
1139 #endif //ASSERT
1140 
1141   return true;
1142 }
1143 
1144 
1145 
1146 //------------------------------basic_plus_adr---------------------------------
1147 Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) {
1148   // short-circuit a common case
1149   if (offset == intcon(0))  return ptr;
1150   return _gvn.transform( new AddPNode(base, ptr, offset) );
1151 }
1152 
1153 Node* GraphKit::ConvI2L(Node* offset) {
1154   // short-circuit a common case
1155   jint offset_con = find_int_con(offset, Type::OffsetBot);
1156   if (offset_con != Type::OffsetBot) {
1157     return longcon((jlong) offset_con);
1158   }
1159   return _gvn.transform( new ConvI2LNode(offset));
1160 }
1161 
1162 Node* GraphKit::ConvI2UL(Node* offset) {
1163   juint offset_con = (juint) find_int_con(offset, Type::OffsetBot);
1164   if (offset_con != (juint) Type::OffsetBot) {
1165     return longcon((julong) offset_con);
1166   }
1167   Node* conv = _gvn.transform( new ConvI2LNode(offset));
1168   Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1169   return _gvn.transform( new AndLNode(conv, mask) );
1170 }
1171 
1172 Node* GraphKit::ConvL2I(Node* offset) {
1173   // short-circuit a common case
1174   jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1175   if (offset_con != (jlong)Type::OffsetBot) {
1176     return intcon((int) offset_con);
1177   }
1178   return _gvn.transform( new ConvL2INode(offset));
1179 }
1180 
1181 //-------------------------load_object_klass-----------------------------------
1182 Node* GraphKit::load_object_klass(Node* obj) {
1183   // Special-case a fresh allocation to avoid building nodes:
1184   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1185   if (akls != NULL)  return akls;
1186   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1187   return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
1188 }
1189 
1190 //-------------------------load_array_length-----------------------------------
1191 Node* GraphKit::load_array_length(Node* array) {
1192   // Special-case a fresh allocation to avoid building nodes:
1193   AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn);
1194   Node *alen;
1195   if (alloc == NULL) {
1196     Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1197     alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS));
1198   } else {
1199     alen = alloc->Ideal_length();
1200     Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn);
1201     if (ccast != alen) {
1202       alen = _gvn.transform(ccast);
1203     }
1204   }
1205   return alen;
1206 }
1207 
1208 //------------------------------do_null_check----------------------------------
1209 // Helper function to do a NULL pointer check.  Returned value is
1210 // the incoming address with NULL casted away.  You are allowed to use the
1211 // not-null value only if you are control dependent on the test.
1212 #ifndef PRODUCT
1213 extern int explicit_null_checks_inserted,
1214            explicit_null_checks_elided;
1215 #endif
1216 Node* GraphKit::null_check_common(Node* value, BasicType type,
1217                                   // optional arguments for variations:
1218                                   bool assert_null,
1219                                   Node* *null_control,
1220                                   bool speculative) {
1221   assert(!assert_null || null_control == NULL, "not both at once");
1222   if (stopped())  return top();
1223   NOT_PRODUCT(explicit_null_checks_inserted++);
1224 
1225   // Construct NULL check
1226   Node *chk = NULL;
1227   switch(type) {
1228     case T_LONG   : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1229     case T_INT    : chk = new CmpINode(value, _gvn.intcon(0)); break;
1230     case T_INLINE_TYPE : // fall through
1231     case T_ARRAY  : // fall through
1232       type = T_OBJECT;  // simplify further tests
1233     case T_OBJECT : {
1234       const Type *t = _gvn.type( value );
1235 
1236       const TypeOopPtr* tp = t->isa_oopptr();
1237       if (tp != NULL && tp->klass() != NULL && !tp->klass()->is_loaded()
1238           // Only for do_null_check, not any of its siblings:
1239           && !assert_null && null_control == NULL) {
1240         // Usually, any field access or invocation on an unloaded oop type
1241         // will simply fail to link, since the statically linked class is
1242         // likely also to be unloaded.  However, in -Xcomp mode, sometimes
1243         // the static class is loaded but the sharper oop type is not.
1244         // Rather than checking for this obscure case in lots of places,
1245         // we simply observe that a null check on an unloaded class
1246         // will always be followed by a nonsense operation, so we
1247         // can just issue the uncommon trap here.
1248         // Our access to the unloaded class will only be correct
1249         // after it has been loaded and initialized, which requires
1250         // a trip through the interpreter.
1251 #ifndef PRODUCT
1252         if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); }
1253 #endif
1254         uncommon_trap(Deoptimization::Reason_unloaded,
1255                       Deoptimization::Action_reinterpret,
1256                       tp->klass(), "!loaded");
1257         return top();
1258       }
1259 
1260       if (assert_null) {
1261         // See if the type is contained in NULL_PTR.
1262         // If so, then the value is already null.
1263         if (t->higher_equal(TypePtr::NULL_PTR)) {
1264           NOT_PRODUCT(explicit_null_checks_elided++);
1265           return value;           // Elided null assert quickly!
1266         }
1267       } else {
1268         // See if mixing in the NULL pointer changes type.
1269         // If so, then the NULL pointer was not allowed in the original
1270         // type.  In other words, "value" was not-null.
1271         if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
1272           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
1273           NOT_PRODUCT(explicit_null_checks_elided++);
1274           return value;           // Elided null check quickly!
1275         }
1276       }
1277       chk = new CmpPNode( value, null() );
1278       break;
1279     }
1280 
1281     default:
1282       fatal("unexpected type: %s", type2name(type));
1283   }
1284   assert(chk != NULL, "sanity check");
1285   chk = _gvn.transform(chk);
1286 
1287   BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne;
1288   BoolNode *btst = new BoolNode( chk, btest);
1289   Node   *tst = _gvn.transform( btst );
1290 
1291   //-----------
1292   // if peephole optimizations occurred, a prior test existed.
1293   // If a prior test existed, maybe it dominates as we can avoid this test.
1294   if (tst != btst && type == T_OBJECT) {
1295     // At this point we want to scan up the CFG to see if we can
1296     // find an identical test (and so avoid this test altogether).
1297     Node *cfg = control();
1298     int depth = 0;
1299     while( depth < 16 ) {       // Limit search depth for speed
1300       if( cfg->Opcode() == Op_IfTrue &&
1301           cfg->in(0)->in(1) == tst ) {
1302         // Found prior test.  Use "cast_not_null" to construct an identical
1303         // CastPP (and hence hash to) as already exists for the prior test.
1304         // Return that casted value.
1305         if (assert_null) {
1306           replace_in_map(value, null());
1307           return null();  // do not issue the redundant test
1308         }
1309         Node *oldcontrol = control();
1310         set_control(cfg);
1311         Node *res = cast_not_null(value);
1312         set_control(oldcontrol);
1313         NOT_PRODUCT(explicit_null_checks_elided++);
1314         return res;
1315       }
1316       cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1317       if (cfg == NULL)  break;  // Quit at region nodes
1318       depth++;
1319     }
1320   }
1321 
1322   //-----------
1323   // Branch to failure if null
1324   float ok_prob = PROB_MAX;  // a priori estimate:  nulls never happen
1325   Deoptimization::DeoptReason reason;
1326   if (assert_null) {
1327     reason = Deoptimization::reason_null_assert(speculative);
1328   } else if (type == T_OBJECT) {
1329     reason = Deoptimization::reason_null_check(speculative);
1330   } else {
1331     reason = Deoptimization::Reason_div0_check;
1332   }
1333   // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1334   // ciMethodData::has_trap_at will return a conservative -1 if any
1335   // must-be-null assertion has failed.  This could cause performance
1336   // problems for a method after its first do_null_assert failure.
1337   // Consider using 'Reason_class_check' instead?
1338 
1339   // To cause an implicit null check, we set the not-null probability
1340   // to the maximum (PROB_MAX).  For an explicit check the probability
1341   // is set to a smaller value.
1342   if (null_control != NULL || too_many_traps(reason)) {
1343     // probability is less likely
1344     ok_prob =  PROB_LIKELY_MAG(3);
1345   } else if (!assert_null &&
1346              (ImplicitNullCheckThreshold > 0) &&
1347              method() != NULL &&
1348              (method()->method_data()->trap_count(reason)
1349               >= (uint)ImplicitNullCheckThreshold)) {
1350     ok_prob =  PROB_LIKELY_MAG(3);
1351   }
1352 
1353   if (null_control != NULL) {
1354     IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN);
1355     Node* null_true = _gvn.transform( new IfFalseNode(iff));
1356     set_control(      _gvn.transform( new IfTrueNode(iff)));
1357 #ifndef PRODUCT
1358     if (null_true == top()) {
1359       explicit_null_checks_elided++;
1360     }
1361 #endif
1362     (*null_control) = null_true;
1363   } else {
1364     BuildCutout unless(this, tst, ok_prob);
1365     // Check for optimizer eliding test at parse time
1366     if (stopped()) {
1367       // Failure not possible; do not bother making uncommon trap.
1368       NOT_PRODUCT(explicit_null_checks_elided++);
1369     } else if (assert_null) {
1370       uncommon_trap(reason,
1371                     Deoptimization::Action_make_not_entrant,
1372                     NULL, "assert_null");
1373     } else {
1374       replace_in_map(value, zerocon(type));
1375       builtin_throw(reason);
1376     }
1377   }
1378 
1379   // Must throw exception, fall-thru not possible?
1380   if (stopped()) {
1381     return top();               // No result
1382   }
1383 
1384   if (assert_null) {
1385     // Cast obj to null on this path.
1386     replace_in_map(value, zerocon(type));
1387     return zerocon(type);
1388   }
1389 
1390   // Cast obj to not-null on this path, if there is no null_control.
1391   // (If there is a null_control, a non-null value may come back to haunt us.)
1392   if (type == T_OBJECT) {
1393     Node* cast = cast_not_null(value, false);
1394     if (null_control == NULL || (*null_control) == top())
1395       replace_in_map(value, cast);
1396     value = cast;
1397   }
1398 
1399   return value;
1400 }
1401 
1402 Node* GraphKit::null2default(Node* value, ciInlineKlass* vk) {
1403   assert(!vk->is_scalarizable(), "Should only be used for non scalarizable inline klasses");
1404   Node* null_ctl = top();
1405   value = null_check_oop(value, &null_ctl);
1406   if (!null_ctl->is_top()) {
1407     // Return default value if oop is null
1408     Node* region = new RegionNode(3);
1409     region->init_req(1, control());
1410     region->init_req(2, null_ctl);
1411     value = PhiNode::make(region, value, TypeInstPtr::make(TypePtr::BotPTR, vk));
1412     value->set_req(2, InlineTypeNode::default_oop(gvn(), vk));
1413     set_control(gvn().transform(region));
1414     value = gvn().transform(value);
1415   }
1416   return value;
1417 }
1418 
1419 //------------------------------cast_not_null----------------------------------
1420 // Cast obj to not-null on this path
1421 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1422   if (obj->is_InlineType()) {
1423     return obj;
1424   }
1425   const Type *t = _gvn.type(obj);
1426   const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1427   // Object is already not-null?
1428   if( t == t_not_null ) return obj;
1429 
1430   Node *cast = new CastPPNode(obj,t_not_null);
1431   cast->init_req(0, control());
1432   cast = _gvn.transform( cast );
1433 
1434   if (t->is_inlinetypeptr() && t->inline_klass()->is_scalarizable()) {
1435     // Scalarize inline type now that we know it's non-null
1436     cast = InlineTypeNode::make_from_oop(this, cast, t->inline_klass())->as_ptr(&gvn());
1437   }
1438 
1439   // Scan for instances of 'obj' in the current JVM mapping.
1440   // These instances are known to be not-null after the test.
1441   if (do_replace_in_map)
1442     replace_in_map(obj, cast);
1443 
1444   return cast;                  // Return casted value
1445 }
1446 
1447 // Sometimes in intrinsics, we implicitly know an object is not null
1448 // (there's no actual null check) so we can cast it to not null. In
1449 // the course of optimizations, the input to the cast can become null.
1450 // In that case that data path will die and we need the control path
1451 // to become dead as well to keep the graph consistent. So we have to
1452 // add a check for null for which one branch can't be taken. It uses
1453 // an Opaque4 node that will cause the check to be removed after loop
1454 // opts so the test goes away and the compiled code doesn't execute a
1455 // useless check.
1456 Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {
1457   if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(value))) {
1458     return value;
1459   }
1460   Node* chk = _gvn.transform(new CmpPNode(value, null()));
1461   Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
1462   Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1)));
1463   IfNode *iff = new IfNode(control(), opaq, PROB_MAX, COUNT_UNKNOWN);
1464   _gvn.set_type(iff, iff->Value(&_gvn));
1465   Node *if_f = _gvn.transform(new IfFalseNode(iff));
1466   Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
1467   Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic"));
1468   C->root()->add_req(halt);
1469   Node *if_t = _gvn.transform(new IfTrueNode(iff));
1470   set_control(if_t);
1471   return cast_not_null(value, do_replace_in_map);
1472 }
1473 
1474 
1475 //--------------------------replace_in_map-------------------------------------
1476 void GraphKit::replace_in_map(Node* old, Node* neww) {
1477   if (old == neww) {
1478     return;
1479   }
1480 
1481   map()->replace_edge(old, neww);
1482 
1483   // Note: This operation potentially replaces any edge
1484   // on the map.  This includes locals, stack, and monitors
1485   // of the current (innermost) JVM state.
1486 
1487   // don't let inconsistent types from profiling escape this
1488   // method
1489 
1490   const Type* told = _gvn.type(old);
1491   const Type* tnew = _gvn.type(neww);
1492 
1493   if (!tnew->higher_equal(told)) {
1494     return;
1495   }
1496 
1497   map()->record_replaced_node(old, neww);
1498 }
1499 
1500 
1501 //=============================================================================
1502 //--------------------------------memory---------------------------------------
1503 Node* GraphKit::memory(uint alias_idx) {
1504   MergeMemNode* mem = merged_memory();
1505   Node* p = mem->memory_at(alias_idx);
1506   _gvn.set_type(p, Type::MEMORY);  // must be mapped
1507   return p;
1508 }
1509 
1510 //-----------------------------reset_memory------------------------------------
1511 Node* GraphKit::reset_memory() {
1512   Node* mem = map()->memory();
1513   // do not use this node for any more parsing!
1514   debug_only( map()->set_memory((Node*)NULL) );
1515   return _gvn.transform( mem );
1516 }
1517 
1518 //------------------------------set_all_memory---------------------------------
1519 void GraphKit::set_all_memory(Node* newmem) {
1520   Node* mergemem = MergeMemNode::make(newmem);
1521   gvn().set_type_bottom(mergemem);
1522   map()->set_memory(mergemem);
1523 }
1524 
1525 //------------------------------set_all_memory_call----------------------------
1526 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1527   Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1528   set_all_memory(newmem);
1529 }
1530 
1531 //=============================================================================
1532 //
1533 // parser factory methods for MemNodes
1534 //
1535 // These are layered on top of the factory methods in LoadNode and StoreNode,
1536 // and integrate with the parser's memory state and _gvn engine.
1537 //
1538 
1539 // factory methods in "int adr_idx"
1540 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1541                           int adr_idx,
1542                           MemNode::MemOrd mo,
1543                           LoadNode::ControlDependency control_dependency,
1544                           bool require_atomic_access,
1545                           bool unaligned,
1546                           bool mismatched,
1547                           bool unsafe,
1548                           uint8_t barrier_data) {
1549   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1550   const TypePtr* adr_type = NULL; // debug-mode-only argument
1551   debug_only(adr_type = C->get_adr_type(adr_idx));
1552   Node* mem = memory(adr_idx);
1553   Node* ld;
1554   if (require_atomic_access && bt == T_LONG) {
1555     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1556   } else if (require_atomic_access && bt == T_DOUBLE) {
1557     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1558   } else {
1559     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
1560   }
1561   ld = _gvn.transform(ld);
1562 
1563   if (((bt == T_OBJECT || bt == T_INLINE_TYPE) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1564     // Improve graph before escape analysis and boxing elimination.
1565     record_for_igvn(ld);
1566   }
1567   return ld;
1568 }
1569 
1570 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1571                                 int adr_idx,
1572                                 MemNode::MemOrd mo,
1573                                 bool require_atomic_access,
1574                                 bool unaligned,
1575                                 bool mismatched,
1576                                 bool unsafe) {
1577   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1578   const TypePtr* adr_type = NULL;
1579   debug_only(adr_type = C->get_adr_type(adr_idx));
1580   Node *mem = memory(adr_idx);
1581   Node* st;
1582   if (require_atomic_access && bt == T_LONG) {
1583     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1584   } else if (require_atomic_access && bt == T_DOUBLE) {
1585     st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1586   } else {
1587     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1588   }
1589   if (unaligned) {
1590     st->as_Store()->set_unaligned_access();
1591   }
1592   if (mismatched) {
1593     st->as_Store()->set_mismatched_access();
1594   }
1595   if (unsafe) {
1596     st->as_Store()->set_unsafe_access();
1597   }
1598   st = _gvn.transform(st);
1599   set_memory(st, adr_idx);
1600   // Back-to-back stores can only remove intermediate store with DU info
1601   // so push on worklist for optimizer.
1602   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1603     record_for_igvn(st);
1604 
1605   return st;
1606 }
1607 
1608 Node* GraphKit::access_store_at(Node* obj,
1609                                 Node* adr,
1610                                 const TypePtr* adr_type,
1611                                 Node* val,
1612                                 const Type* val_type,
1613                                 BasicType bt,
1614                                 DecoratorSet decorators,
1615                                 bool safe_for_replace) {
1616   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1617   // could be delayed during Parse (for example, in adjust_map_after_if()).
1618   // Execute transformation here to avoid barrier generation in such case.
1619   if (_gvn.type(val) == TypePtr::NULL_PTR) {
1620     val = _gvn.makecon(TypePtr::NULL_PTR);
1621   }
1622 
1623   if (stopped()) {
1624     return top(); // Dead path ?
1625   }
1626 
1627   assert(val != NULL, "not dead path");
1628   if (val->is_InlineType()) {
1629     // Store to non-flattened field. Buffer the inline type and make sure
1630     // the store is re-executed if the allocation triggers deoptimization.
1631     PreserveReexecuteState preexecs(this);
1632     jvms()->set_should_reexecute(true);
1633     val = val->as_InlineType()->buffer(this, safe_for_replace);
1634   }
1635 
1636   C2AccessValuePtr addr(adr, adr_type);
1637   C2AccessValue value(val, val_type);
1638   C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1639   if (access.is_raw()) {
1640     return _barrier_set->BarrierSetC2::store_at(access, value);
1641   } else {
1642     return _barrier_set->store_at(access, value);
1643   }
1644 }
1645 
1646 Node* GraphKit::access_load_at(Node* obj,   // containing obj
1647                                Node* adr,   // actual adress to store val at
1648                                const TypePtr* adr_type,
1649                                const Type* val_type,
1650                                BasicType bt,
1651                                DecoratorSet decorators,
1652                                Node* ctl) {
1653   if (stopped()) {
1654     return top(); // Dead path ?
1655   }
1656 
1657   C2AccessValuePtr addr(adr, adr_type);
1658   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1659   if (access.is_raw()) {
1660     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1661   } else {
1662     return _barrier_set->load_at(access, val_type);
1663   }
1664 }
1665 
1666 Node* GraphKit::access_load(Node* adr,   // actual adress to load val at
1667                             const Type* val_type,
1668                             BasicType bt,
1669                             DecoratorSet decorators) {
1670   if (stopped()) {
1671     return top(); // Dead path ?
1672   }
1673 
1674   C2AccessValuePtr addr(adr, NULL);
1675   C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
1676   if (access.is_raw()) {
1677     return _barrier_set->BarrierSetC2::load_at(access, val_type);
1678   } else {
1679     return _barrier_set->load_at(access, val_type);
1680   }
1681 }
1682 
1683 Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj,
1684                                              Node* adr,
1685                                              const TypePtr* adr_type,
1686                                              int alias_idx,
1687                                              Node* expected_val,
1688                                              Node* new_val,
1689                                              const Type* value_type,
1690                                              BasicType bt,
1691                                              DecoratorSet decorators) {
1692   C2AccessValuePtr addr(adr, adr_type);
1693   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1694                         bt, obj, addr, alias_idx);
1695   if (access.is_raw()) {
1696     return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
1697   } else {
1698     return _barrier_set->atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
1699   }
1700 }
1701 
1702 Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* obj,
1703                                               Node* adr,
1704                                               const TypePtr* adr_type,
1705                                               int alias_idx,
1706                                               Node* expected_val,
1707                                               Node* new_val,
1708                                               const Type* value_type,
1709                                               BasicType bt,
1710                                               DecoratorSet decorators) {
1711   C2AccessValuePtr addr(adr, adr_type);
1712   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1713                         bt, obj, addr, alias_idx);
1714   if (access.is_raw()) {
1715     return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
1716   } else {
1717     return _barrier_set->atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
1718   }
1719 }
1720 
1721 Node* GraphKit::access_atomic_xchg_at(Node* obj,
1722                                       Node* adr,
1723                                       const TypePtr* adr_type,
1724                                       int alias_idx,
1725                                       Node* new_val,
1726                                       const Type* value_type,
1727                                       BasicType bt,
1728                                       DecoratorSet decorators) {
1729   C2AccessValuePtr addr(adr, adr_type);
1730   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
1731                         bt, obj, addr, alias_idx);
1732   if (access.is_raw()) {
1733     return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
1734   } else {
1735     return _barrier_set->atomic_xchg_at(access, new_val, value_type);
1736   }
1737 }
1738 
1739 Node* GraphKit::access_atomic_add_at(Node* obj,
1740                                      Node* adr,
1741                                      const TypePtr* adr_type,
1742                                      int alias_idx,
1743                                      Node* new_val,
1744                                      const Type* value_type,
1745                                      BasicType bt,
1746                                      DecoratorSet decorators) {
1747   C2AccessValuePtr addr(adr, adr_type);
1748   C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1749   if (access.is_raw()) {
1750     return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1751   } else {
1752     return _barrier_set->atomic_add_at(access, new_val, value_type);
1753   }
1754 }
1755 
1756 void GraphKit::access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array) {
1757   return _barrier_set->clone(this, src_base, dst_base, countx, is_array);
1758 }
1759 
1760 //-------------------------array_element_address-------------------------
1761 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1762                                       const TypeInt* sizetype, Node* ctrl) {
1763   uint shift  = exact_log2(type2aelembytes(elembt));
1764   ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass();
1765   if (arytype_klass != NULL && arytype_klass->is_flat_array_klass()) {
1766     ciFlatArrayKlass* vak = arytype_klass->as_flat_array_klass();
1767     shift = vak->log2_element_size();
1768   }
1769   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1770 
1771   // short-circuit a common case (saves lots of confusing waste motion)
1772   jint idx_con = find_int_con(idx, -1);
1773   if (idx_con >= 0) {
1774     intptr_t offset = header + ((intptr_t)idx_con << shift);
1775     return basic_plus_adr(ary, offset);
1776   }
1777 
1778   // must be correct type for alignment purposes
1779   Node* base  = basic_plus_adr(ary, header);
1780   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1781   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1782   return basic_plus_adr(ary, base, scale);
1783 }
1784 
1785 //-------------------------load_array_element-------------------------
1786 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1787   const Type* elemtype = arytype->elem();
1788   BasicType elembt = elemtype->array_element_basic_type();
1789   assert(elembt != T_INLINE_TYPE, "inline types are not supported by this method");
1790   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1791   if (elembt == T_NARROWOOP) {
1792     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1793   }
1794   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1795   return ld;
1796 }
1797 
1798 //-------------------------set_arguments_for_java_call-------------------------
1799 // Arguments (pre-popped from the stack) are taken from the JVMS.
1800 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1801   PreserveReexecuteState preexecs(this);
1802   if (EnableValhalla) {
1803     // Make sure the call is re-executed, if buffering of inline type arguments triggers deoptimization
1804     jvms()->set_should_reexecute(true);
1805     int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1806     inc_sp(arg_size);
1807   }
1808   // Add the call arguments
1809   const TypeTuple* domain = call->tf()->domain_sig();
1810   ExtendedSignature sig_cc = ExtendedSignature(call->method()->get_sig_cc(), SigEntryFilter());
1811   uint nargs = domain->cnt();
1812   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1813     Node* arg = argument(i-TypeFunc::Parms);
1814     const Type* t = domain->field_at(i);
1815     if (call->method()->has_scalarized_args() && t->is_inlinetypeptr() && !t->maybe_null()) {
1816       // We don't pass inline type arguments by reference but instead pass each field of the inline type
1817       InlineTypeNode* vt = arg->as_InlineType();
1818       vt->pass_fields(this, call, sig_cc, idx);
1819       // If an inline type argument is passed as fields, attach the Method* to the call site
1820       // to be able to access the extended signature later via attached_method_before_pc().
1821       // For example, see CompiledMethod::preserve_callee_argument_oops().
1822       call->set_override_symbolic_info(true);
1823       continue;
1824     } else if (arg->is_InlineType()) {
1825       // Pass inline type argument via oop to callee
1826       arg = arg->as_InlineType()->buffer(this);
1827       if (!is_late_inline) {
1828         arg = arg->as_InlineTypePtr()->get_oop();
1829       }
1830     }
1831     call->init_req(idx++, arg);
1832     // Skip reserved arguments
1833     BasicType bt = t->basic_type();
1834     while (SigEntry::next_is_reserved(sig_cc, bt, true)) {
1835       call->init_req(idx++, top());
1836       if (type2size[bt] == 2) {
1837         call->init_req(idx++, top());
1838       }
1839     }
1840   }
1841 }
1842 
1843 //---------------------------set_edges_for_java_call---------------------------
1844 // Connect a newly created call into the current JVMS.
1845 // A return value node (if any) is returned from set_edges_for_java_call.
1846 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1847 
1848   // Add the predefined inputs:
1849   call->init_req( TypeFunc::Control, control() );
1850   call->init_req( TypeFunc::I_O    , i_o() );
1851   call->init_req( TypeFunc::Memory , reset_memory() );
1852   call->init_req( TypeFunc::FramePtr, frameptr() );
1853   call->init_req( TypeFunc::ReturnAdr, top() );
1854 
1855   add_safepoint_edges(call, must_throw);
1856 
1857   Node* xcall = _gvn.transform(call);
1858 
1859   if (xcall == top()) {
1860     set_control(top());
1861     return;
1862   }
1863   assert(xcall == call, "call identity is stable");
1864 
1865   // Re-use the current map to produce the result.
1866 
1867   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1868   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1869   set_all_memory_call(xcall, separate_io_proj);
1870 
1871   //return xcall;   // no need, caller already has it
1872 }
1873 
1874 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1875   if (stopped())  return top();  // maybe the call folded up?
1876 
1877   // Note:  Since any out-of-line call can produce an exception,
1878   // we always insert an I_O projection from the call into the result.
1879 
1880   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1881 
1882   if (separate_io_proj) {
1883     // The caller requested separate projections be used by the fall
1884     // through and exceptional paths, so replace the projections for
1885     // the fall through path.
1886     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1887     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1888   }
1889 
1890   // Capture the return value, if any.
1891   Node* ret;
1892   if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) {
1893     ret = top();
1894   } else if (call->tf()->returns_inline_type_as_fields()) {
1895     // Return of multiple values (inline type fields): we create a
1896     // InlineType node, each field is a projection from the call.
1897     ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
1898     const Array<SigEntry>* sig_array = vk->extended_sig();
1899     GrowableArray<SigEntry> sig = GrowableArray<SigEntry>(sig_array->length());
1900     sig.appendAll(sig_array);
1901     ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter());
1902     uint base_input = TypeFunc::Parms + 1;
1903     ret = InlineTypeNode::make_from_multi(this, call, sig_cc, vk, base_input, false);
1904   } else {
1905     ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1906   }
1907 
1908   return ret;
1909 }
1910 
1911 //--------------------set_predefined_input_for_runtime_call--------------------
1912 // Reading and setting the memory state is way conservative here.
1913 // The real problem is that I am not doing real Type analysis on memory,
1914 // so I cannot distinguish card mark stores from other stores.  Across a GC
1915 // point the Store Barrier and the card mark memory has to agree.  I cannot
1916 // have a card mark store and its barrier split across the GC point from
1917 // either above or below.  Here I get that to happen by reading ALL of memory.
1918 // A better answer would be to separate out card marks from other memory.
1919 // For now, return the input memory state, so that it can be reused
1920 // after the call, if this call has restricted memory effects.
1921 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1922   // Set fixed predefined input arguments
1923   Node* memory = reset_memory();
1924   Node* m = narrow_mem == NULL ? memory : narrow_mem;
1925   call->init_req( TypeFunc::Control,   control()  );
1926   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1927   call->init_req( TypeFunc::Memory,    m          ); // may gc ptrs
1928   call->init_req( TypeFunc::FramePtr,  frameptr() );
1929   call->init_req( TypeFunc::ReturnAdr, top()      );
1930   return memory;
1931 }
1932 
1933 //-------------------set_predefined_output_for_runtime_call--------------------
1934 // Set control and memory (not i_o) from the call.
1935 // If keep_mem is not NULL, use it for the output state,
1936 // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM.
1937 // If hook_mem is NULL, this call produces no memory effects at all.
1938 // If hook_mem is a Java-visible memory slice (such as arraycopy operands),
1939 // then only that memory slice is taken from the call.
1940 // In the last case, we must put an appropriate memory barrier before
1941 // the call, so as to create the correct anti-dependencies on loads
1942 // preceding the call.
1943 void GraphKit::set_predefined_output_for_runtime_call(Node* call,
1944                                                       Node* keep_mem,
1945                                                       const TypePtr* hook_mem) {
1946   // no i/o
1947   set_control(_gvn.transform( new ProjNode(call,TypeFunc::Control) ));
1948   if (keep_mem) {
1949     // First clone the existing memory state
1950     set_all_memory(keep_mem);
1951     if (hook_mem != NULL) {
1952       // Make memory for the call
1953       Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) );
1954       // Set the RawPtr memory state only.  This covers all the heap top/GC stuff
1955       // We also use hook_mem to extract specific effects from arraycopy stubs.
1956       set_memory(mem, hook_mem);
1957     }
1958     // ...else the call has NO memory effects.
1959 
1960     // Make sure the call advertises its memory effects precisely.
1961     // This lets us build accurate anti-dependences in gcm.cpp.
1962     assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem),
1963            "call node must be constructed correctly");
1964   } else {
1965     assert(hook_mem == NULL, "");
1966     // This is not a "slow path" call; all memory comes from the call.
1967     set_all_memory_call(call);
1968   }
1969 }
1970 
1971 // Keep track of MergeMems feeding into other MergeMems
1972 static void add_mergemem_users_to_worklist(Unique_Node_List& wl, Node* mem) {
1973   if (!mem->is_MergeMem()) {
1974     return;
1975   }
1976   for (SimpleDUIterator i(mem); i.has_next(); i.next()) {
1977     Node* use = i.get();
1978     if (use->is_MergeMem()) {
1979       wl.push(use);
1980     }
1981   }
1982 }
1983 
1984 // Replace the call with the current state of the kit.
1985 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
1986   JVMState* ejvms = NULL;
1987   if (has_exceptions()) {
1988     ejvms = transfer_exceptions_into_jvms();
1989   }
1990 
1991   ReplacedNodes replaced_nodes = map()->replaced_nodes();
1992   ReplacedNodes replaced_nodes_exception;
1993   Node* ex_ctl = top();
1994 
1995   SafePointNode* final_state = stop();
1996 
1997   // Find all the needed outputs of this call
1998   CallProjections* callprojs = call->extract_projections(true);
1999 
2000   Unique_Node_List wl;
2001   Node* init_mem = call->in(TypeFunc::Memory);
2002   Node* final_mem = final_state->in(TypeFunc::Memory);
2003   Node* final_ctl = final_state->in(TypeFunc::Control);
2004   Node* final_io = final_state->in(TypeFunc::I_O);
2005 
2006   // Replace all the old call edges with the edges from the inlining result
2007   if (callprojs->fallthrough_catchproj != NULL) {
2008     C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2009   }
2010   if (callprojs->fallthrough_memproj != NULL) {
2011     if (final_mem->is_MergeMem()) {
2012       // Parser's exits MergeMem was not transformed but may be optimized
2013       final_mem = _gvn.transform(final_mem);
2014     }
2015     C->gvn_replace_by(callprojs->fallthrough_memproj,   final_mem);
2016     add_mergemem_users_to_worklist(wl, final_mem);
2017   }
2018   if (callprojs->fallthrough_ioproj != NULL) {
2019     C->gvn_replace_by(callprojs->fallthrough_ioproj,    final_io);
2020   }
2021 
2022   // Replace the result with the new result if it exists and is used
2023   if (callprojs->resproj[0] != NULL && result != NULL) {
2024     assert(callprojs->nb_resproj == 1, "unexpected number of results");
2025     C->gvn_replace_by(callprojs->resproj[0], result);
2026   }
2027 
2028   if (ejvms == NULL) {
2029     // No exception edges to simply kill off those paths
2030     if (callprojs->catchall_catchproj != NULL) {
2031       C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2032     }
2033     if (callprojs->catchall_memproj != NULL) {
2034       C->gvn_replace_by(callprojs->catchall_memproj,   C->top());
2035     }
2036     if (callprojs->catchall_ioproj != NULL) {
2037       C->gvn_replace_by(callprojs->catchall_ioproj,    C->top());
2038     }
2039     // Replace the old exception object with top
2040     if (callprojs->exobj != NULL) {
2041       C->gvn_replace_by(callprojs->exobj, C->top());
2042     }
2043   } else {
2044     GraphKit ekit(ejvms);
2045 
2046     // Load my combined exception state into the kit, with all phis transformed:
2047     SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2048     replaced_nodes_exception = ex_map->replaced_nodes();
2049 
2050     Node* ex_oop = ekit.use_exception_state(ex_map);
2051 
2052     if (callprojs->catchall_catchproj != NULL) {
2053       C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2054       ex_ctl = ekit.control();
2055     }
2056     if (callprojs->catchall_memproj != NULL) {
2057       Node* ex_mem = ekit.reset_memory();
2058       C->gvn_replace_by(callprojs->catchall_memproj,   ex_mem);
2059       add_mergemem_users_to_worklist(wl, ex_mem);
2060     }
2061     if (callprojs->catchall_ioproj != NULL) {
2062       C->gvn_replace_by(callprojs->catchall_ioproj,    ekit.i_o());
2063     }
2064 
2065     // Replace the old exception object with the newly created one
2066     if (callprojs->exobj != NULL) {
2067       C->gvn_replace_by(callprojs->exobj, ex_oop);
2068     }
2069   }
2070 
2071   // Disconnect the call from the graph
2072   call->disconnect_inputs(NULL, C);
2073   C->gvn_replace_by(call, C->top());
2074 
2075   // Clean up any MergeMems that feed other MergeMems since the
2076   // optimizer doesn't like that.
2077   while (wl.size() > 0) {
2078     _gvn.transform(wl.pop());
2079   }
2080 
2081   if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
2082     replaced_nodes.apply(C, final_ctl);
2083   }
2084   if (!ex_ctl->is_top() && do_replaced_nodes) {
2085     replaced_nodes_exception.apply(C, ex_ctl);
2086   }
2087 }
2088 
2089 
2090 //------------------------------increment_counter------------------------------
2091 // for statistics: increment a VM counter by 1
2092 
2093 void GraphKit::increment_counter(address counter_addr) {
2094   Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2095   increment_counter(adr1);
2096 }
2097 
2098 void GraphKit::increment_counter(Node* counter_addr) {
2099   int adr_type = Compile::AliasIdxRaw;
2100   Node* ctrl = control();
2101   Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2102   Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1)));
2103   store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered);
2104 }
2105 
2106 
2107 //------------------------------uncommon_trap----------------------------------
2108 // Bail out to the interpreter in mid-method.  Implemented by calling the
2109 // uncommon_trap blob.  This helper function inserts a runtime call with the
2110 // right debug info.
2111 void GraphKit::uncommon_trap(int trap_request,
2112                              ciKlass* klass, const char* comment,
2113                              bool must_throw,
2114                              bool keep_exact_action) {
2115   if (failing())  stop();
2116   if (stopped())  return; // trap reachable?
2117 
2118   // Note:  If ProfileTraps is true, and if a deopt. actually
2119   // occurs here, the runtime will make sure an MDO exists.  There is
2120   // no need to call method()->ensure_method_data() at this point.
2121 
2122   // Set the stack pointer to the right value for reexecution:
2123   set_sp(reexecute_sp());
2124 
2125 #ifdef ASSERT
2126   if (!must_throw) {
2127     // Make sure the stack has at least enough depth to execute
2128     // the current bytecode.
2129     int inputs, ignored_depth;
2130     if (compute_stack_effects(inputs, ignored_depth)) {
2131       assert(sp() >= inputs, "must have enough JVMS stack to execute %s: sp=%d, inputs=%d",
2132              Bytecodes::name(java_bc()), sp(), inputs);
2133     }
2134   }
2135 #endif
2136 
2137   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
2138   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
2139 
2140   switch (action) {
2141   case Deoptimization::Action_maybe_recompile:
2142   case Deoptimization::Action_reinterpret:
2143     // Temporary fix for 6529811 to allow virtual calls to be sure they
2144     // get the chance to go from mono->bi->mega
2145     if (!keep_exact_action &&
2146         Deoptimization::trap_request_index(trap_request) < 0 &&
2147         too_many_recompiles(reason)) {
2148       // This BCI is causing too many recompilations.
2149       if (C->log() != NULL) {
2150         C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'",
2151                 Deoptimization::trap_reason_name(reason),
2152                 Deoptimization::trap_action_name(action));
2153       }
2154       action = Deoptimization::Action_none;
2155       trap_request = Deoptimization::make_trap_request(reason, action);
2156     } else {
2157       C->set_trap_can_recompile(true);
2158     }
2159     break;
2160   case Deoptimization::Action_make_not_entrant:
2161     C->set_trap_can_recompile(true);
2162     break;
2163   case Deoptimization::Action_none:
2164   case Deoptimization::Action_make_not_compilable:
2165     break;
2166   default:
2167 #ifdef ASSERT
2168     fatal("unknown action %d: %s", action, Deoptimization::trap_action_name(action));
2169 #endif
2170     break;
2171   }
2172 
2173   if (TraceOptoParse) {
2174     char buf[100];
2175     tty->print_cr("Uncommon trap %s at bci:%d",
2176                   Deoptimization::format_trap_request(buf, sizeof(buf),
2177                                                       trap_request), bci());
2178   }
2179 
2180   CompileLog* log = C->log();
2181   if (log != NULL) {
2182     int kid = (klass == NULL)? -1: log->identify(klass);
2183     log->begin_elem("uncommon_trap bci='%d'", bci());
2184     char buf[100];
2185     log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf),
2186                                                           trap_request));
2187     if (kid >= 0)         log->print(" klass='%d'", kid);
2188     if (comment != NULL)  log->print(" comment='%s'", comment);
2189     log->end_elem();
2190   }
2191 
2192   // Make sure any guarding test views this path as very unlikely
2193   Node *i0 = control()->in(0);
2194   if (i0 != NULL && i0->is_If()) {        // Found a guarding if test?
2195     IfNode *iff = i0->as_If();
2196     float f = iff->_prob;   // Get prob
2197     if (control()->Opcode() == Op_IfTrue) {
2198       if (f > PROB_UNLIKELY_MAG(4))
2199         iff->_prob = PROB_MIN;
2200     } else {
2201       if (f < PROB_LIKELY_MAG(4))
2202         iff->_prob = PROB_MAX;
2203     }
2204   }
2205 
2206   // Clear out dead values from the debug info.
2207   kill_dead_locals();
2208 
2209   // Now insert the uncommon trap subroutine call
2210   address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
2211   const TypePtr* no_memory_effects = NULL;
2212   // Pass the index of the class to be loaded
2213   Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON |
2214                                  (must_throw ? RC_MUST_THROW : 0),
2215                                  OptoRuntime::uncommon_trap_Type(),
2216                                  call_addr, "uncommon_trap", no_memory_effects,
2217                                  intcon(trap_request));
2218   assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request,
2219          "must extract request correctly from the graph");
2220   assert(trap_request != 0, "zero value reserved by uncommon_trap_request");
2221 
2222   call->set_req(TypeFunc::ReturnAdr, returnadr());
2223   // The debug info is the only real input to this call.
2224 
2225   // Halt-and-catch fire here.  The above call should never return!
2226   HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen"
2227                                                        PRODUCT_ONLY(COMMA /*reachable*/false));
2228   _gvn.set_type_bottom(halt);
2229   root()->add_req(halt);
2230 
2231   stop_and_kill_map();
2232 }
2233 
2234 
2235 //--------------------------just_allocated_object------------------------------
2236 // Report the object that was just allocated.
2237 // It must be the case that there are no intervening safepoints.
2238 // We use this to determine if an object is so "fresh" that
2239 // it does not require card marks.
2240 Node* GraphKit::just_allocated_object(Node* current_control) {
2241   Node* ctrl = current_control;
2242   // Object::<init> is invoked after allocation, most of invoke nodes
2243   // will be reduced, but a region node is kept in parse time, we check
2244   // the pattern and skip the region node if it degraded to a copy.
2245   if (ctrl != NULL && ctrl->is_Region() && ctrl->req() == 2 &&
2246       ctrl->as_Region()->is_copy()) {
2247     ctrl = ctrl->as_Region()->is_copy();
2248   }
2249   if (C->recent_alloc_ctl() == ctrl) {
2250    return C->recent_alloc_obj();
2251   }
2252   return NULL;
2253 }
2254 
2255 
2256 /**
2257  * Record profiling data exact_kls for Node n with the type system so
2258  * that it can propagate it (speculation)
2259  *
2260  * @param n          node that the type applies to
2261  * @param exact_kls  type from profiling
2262  * @param maybe_null did profiling see null?
2263  *
2264  * @return           node with improved type
2265  */
2266 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2267   const Type* current_type = _gvn.type(n);
2268   assert(UseTypeSpeculation, "type speculation must be on");
2269 
2270   const TypePtr* speculative = current_type->speculative();
2271 
2272   // Should the klass from the profile be recorded in the speculative type?
2273   if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2274     const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
2275     const TypeOopPtr* xtype = tklass->as_instance_type();
2276     assert(xtype->klass_is_exact(), "Should be exact");
2277     // Any reason to believe n is not null (from this profiling or a previous one)?
2278     assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2279     const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2280     // record the new speculative type's depth
2281     speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2282     speculative = speculative->with_inline_depth(jvms()->depth());
2283   } else if (current_type->would_improve_ptr(ptr_kind)) {
2284     // Profiling report that null was never seen so we can change the
2285     // speculative type to non null ptr.
2286     if (ptr_kind == ProfileAlwaysNull) {
2287       speculative = TypePtr::NULL_PTR;
2288     } else {
2289       assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2290       const TypePtr* ptr = TypePtr::NOTNULL;
2291       if (speculative != NULL) {
2292         speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2293       } else {
2294         speculative = ptr;
2295       }
2296     }
2297   }
2298 
2299   if (speculative != current_type->speculative()) {
2300     // Build a type with a speculative type (what we think we know
2301     // about the type but will need a guard when we use it)
2302     const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2303     // We're changing the type, we need a new CheckCast node to carry
2304     // the new type. The new type depends on the control: what
2305     // profiling tells us is only valid from here as far as we can
2306     // tell.
2307     Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2308     cast = _gvn.transform(cast);
2309     replace_in_map(n, cast);
2310     n = cast;
2311   }
2312 
2313   return n;
2314 }
2315 
2316 /**
2317  * Record profiling data from receiver profiling at an invoke with the
2318  * type system so that it can propagate it (speculation)
2319  *
2320  * @param n  receiver node
2321  *
2322  * @return   node with improved type
2323  */
2324 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2325   if (!UseTypeSpeculation) {
2326     return n;
2327   }
2328   ciKlass* exact_kls = profile_has_unique_klass();
2329   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2330   if ((java_bc() == Bytecodes::_checkcast ||
2331        java_bc() == Bytecodes::_instanceof ||
2332        java_bc() == Bytecodes::_aastore) &&
2333       method()->method_data()->is_mature()) {
2334     ciProfileData* data = method()->method_data()->bci_to_data(bci());
2335     if (data != NULL) {
2336       if (java_bc() == Bytecodes::_aastore) {
2337         ciKlass* array_type = NULL;
2338         ciKlass* element_type = NULL;
2339         ProfilePtrKind element_ptr = ProfileMaybeNull;
2340         bool flat_array = true;
2341         bool null_free_array = true;
2342         method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2343         exact_kls = element_type;
2344         ptr_kind = element_ptr;
2345       } else {
2346         if (!data->as_BitData()->null_seen()) {
2347           ptr_kind = ProfileNeverNull;
2348         } else {
2349           assert(data->is_ReceiverTypeData(), "bad profile data type");
2350           ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2351           uint i = 0;
2352           for (; i < call->row_limit(); i++) {
2353             ciKlass* receiver = call->receiver(i);
2354             if (receiver != NULL) {
2355               break;
2356             }
2357           }
2358           ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2359         }
2360       }
2361     }
2362   }
2363   return record_profile_for_speculation(n, exact_kls, ptr_kind);
2364 }
2365 
2366 /**
2367  * Record profiling data from argument profiling at an invoke with the
2368  * type system so that it can propagate it (speculation)
2369  *
2370  * @param dest_method  target method for the call
2371  * @param bc           what invoke bytecode is this?
2372  */
2373 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2374   if (!UseTypeSpeculation) {
2375     return;
2376   }
2377   const TypeFunc* tf    = TypeFunc::make(dest_method);
2378   int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2379   int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2380   for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2381     const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2382     if (is_reference_type(targ->basic_type())) {
2383       ProfilePtrKind ptr_kind = ProfileMaybeNull;
2384       ciKlass* better_type = NULL;
2385       if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2386         record_profile_for_speculation(argument(j), better_type, ptr_kind);
2387       }
2388       i++;
2389     }
2390   }
2391 }
2392 
2393 /**
2394  * Record profiling data from parameter profiling at an invoke with
2395  * the type system so that it can propagate it (speculation)
2396  */
2397 void GraphKit::record_profiled_parameters_for_speculation() {
2398   if (!UseTypeSpeculation) {
2399     return;
2400   }
2401   for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2402     if (_gvn.type(local(i))->isa_oopptr()) {
2403       ProfilePtrKind ptr_kind = ProfileMaybeNull;
2404       ciKlass* better_type = NULL;
2405       if (method()->parameter_profiled_type(j, better_type, ptr_kind)) {
2406         record_profile_for_speculation(local(i), better_type, ptr_kind);
2407       }
2408       j++;
2409     }
2410   }
2411 }
2412 
2413 /**
2414  * Record profiling data from return value profiling at an invoke with
2415  * the type system so that it can propagate it (speculation)
2416  */
2417 void GraphKit::record_profiled_return_for_speculation() {
2418   if (!UseTypeSpeculation) {
2419     return;
2420   }
2421   ProfilePtrKind ptr_kind = ProfileMaybeNull;
2422   ciKlass* better_type = NULL;
2423   if (method()->return_profiled_type(bci(), better_type, ptr_kind)) {
2424     // If profiling reports a single type for the return value,
2425     // feed it to the type system so it can propagate it as a
2426     // speculative type
2427     record_profile_for_speculation(stack(sp()-1), better_type, ptr_kind);
2428   }
2429 }
2430 
2431 void GraphKit::round_double_result(ciMethod* dest_method) {
2432   if (Matcher::strict_fp_requires_explicit_rounding) {
2433     // If a strict caller invokes a non-strict callee, round a double result.
2434     // A non-strict method may return a double value which has an extended exponent,
2435     // but this must not be visible in a caller which is strict.
2436     BasicType result_type = dest_method->return_type()->basic_type();
2437     assert(method() != NULL, "must have caller context");
2438     if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) {
2439       // Destination method's return value is on top of stack
2440       // dstore_rounding() does gvn.transform
2441       Node *result = pop_pair();
2442       result = dstore_rounding(result);
2443       push_pair(result);
2444     }
2445   }
2446 }
2447 
2448 void GraphKit::round_double_arguments(ciMethod* dest_method) {
2449   if (Matcher::strict_fp_requires_explicit_rounding) {
2450     // (Note:  TypeFunc::make has a cache that makes this fast.)
2451     const TypeFunc* tf    = TypeFunc::make(dest_method);
2452     int             nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2453     for (int j = 0; j < nargs; j++) {
2454       const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2455       if (targ->basic_type() == T_DOUBLE) {
2456         // If any parameters are doubles, they must be rounded before
2457         // the call, dstore_rounding does gvn.transform
2458         Node *arg = argument(j);
2459         arg = dstore_rounding(arg);
2460         set_argument(j, arg);
2461       }
2462     }
2463   }
2464 }
2465 
2466 // rounding for strict float precision conformance
2467 Node* GraphKit::precision_rounding(Node* n) {
2468   if (Matcher::strict_fp_requires_explicit_rounding) {
2469 #ifdef IA32
2470     if (_method->flags().is_strict() && UseSSE == 0) {
2471       return _gvn.transform(new RoundFloatNode(0, n));
2472     }
2473 #else
2474     Unimplemented();
2475 #endif // IA32
2476   }
2477   return n;
2478 }
2479 
2480 // rounding for strict double precision conformance
2481 Node* GraphKit::dprecision_rounding(Node *n) {
2482   if (Matcher::strict_fp_requires_explicit_rounding) {
2483 #ifdef IA32
2484     if (_method->flags().is_strict() && UseSSE < 2) {
2485       return _gvn.transform(new RoundDoubleNode(0, n));
2486     }
2487 #else
2488     Unimplemented();
2489 #endif // IA32
2490   }
2491   return n;
2492 }
2493 
2494 // rounding for non-strict double stores
2495 Node* GraphKit::dstore_rounding(Node* n) {
2496   if (Matcher::strict_fp_requires_explicit_rounding) {
2497 #ifdef IA32
2498     if (UseSSE < 2) {
2499       return _gvn.transform(new RoundDoubleNode(0, n));
2500     }
2501 #else
2502     Unimplemented();
2503 #endif // IA32
2504   }
2505   return n;
2506 }
2507 
2508 //=============================================================================
2509 // Generate a fast path/slow path idiom.  Graph looks like:
2510 // [foo] indicates that 'foo' is a parameter
2511 //
2512 //              [in]     NULL
2513 //                 \    /
2514 //                  CmpP
2515 //                  Bool ne
2516 //                   If
2517 //                  /  \
2518 //              True    False-<2>
2519 //              / |
2520 //             /  cast_not_null
2521 //           Load  |    |   ^
2522 //        [fast_test]   |   |
2523 // gvn to   opt_test    |   |
2524 //          /    \      |  <1>
2525 //      True     False  |
2526 //        |         \\  |
2527 //   [slow_call]     \[fast_result]
2528 //    Ctl   Val       \      \
2529 //     |               \      \
2530 //    Catch       <1>   \      \
2531 //   /    \        ^     \      \
2532 //  Ex    No_Ex    |      \      \
2533 //  |       \   \  |       \ <2>  \
2534 //  ...      \  [slow_res] |  |    \   [null_result]
2535 //            \         \--+--+---  |  |
2536 //             \           | /    \ | /
2537 //              --------Region     Phi
2538 //
2539 //=============================================================================
2540 // Code is structured as a series of driver functions all called 'do_XXX' that
2541 // call a set of helper functions.  Helper functions first, then drivers.
2542 
2543 //------------------------------null_check_oop---------------------------------
2544 // Null check oop.  Set null-path control into Region in slot 3.
2545 // Make a cast-not-nullness use the other not-null control.  Return cast.
2546 Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
2547                                bool never_see_null,
2548                                bool safe_for_replace,
2549                                bool speculative) {
2550   // Initial NULL check taken path
2551   (*null_control) = top();
2552   Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative);
2553 
2554   // Generate uncommon_trap:
2555   if (never_see_null && (*null_control) != top()) {
2556     // If we see an unexpected null at a check-cast we record it and force a
2557     // recompile; the offending check-cast will be compiled to handle NULLs.
2558     // If we see more than one offending BCI, then all checkcasts in the
2559     // method will be compiled to handle NULLs.
2560     PreserveJVMState pjvms(this);
2561     set_control(*null_control);
2562     replace_in_map(value, null());
2563     Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative);
2564     uncommon_trap(reason,
2565                   Deoptimization::Action_make_not_entrant);
2566     (*null_control) = top();    // NULL path is dead
2567   }
2568   if ((*null_control) == top() && safe_for_replace) {
2569     replace_in_map(value, cast);
2570   }
2571 
2572   // Cast away null-ness on the result
2573   return cast;
2574 }
2575 
2576 //------------------------------opt_iff----------------------------------------
2577 // Optimize the fast-check IfNode.  Set the fast-path region slot 2.
2578 // Return slow-path control.
2579 Node* GraphKit::opt_iff(Node* region, Node* iff) {
2580   IfNode *opt_iff = _gvn.transform(iff)->as_If();
2581 
2582   // Fast path taken; set region slot 2
2583   Node *fast_taken = _gvn.transform( new IfFalseNode(opt_iff) );
2584   region->init_req(2,fast_taken); // Capture fast-control
2585 
2586   // Fast path not-taken, i.e. slow path
2587   Node *slow_taken = _gvn.transform( new IfTrueNode(opt_iff) );
2588   return slow_taken;
2589 }
2590 
2591 //-----------------------------make_runtime_call-------------------------------
2592 Node* GraphKit::make_runtime_call(int flags,
2593                                   const TypeFunc* call_type, address call_addr,
2594                                   const char* call_name,
2595                                   const TypePtr* adr_type,
2596                                   // The following parms are all optional.
2597                                   // The first NULL ends the list.
2598                                   Node* parm0, Node* parm1,
2599                                   Node* parm2, Node* parm3,
2600                                   Node* parm4, Node* parm5,
2601                                   Node* parm6, Node* parm7) {
2602   assert(call_addr != NULL, "must not call NULL targets");
2603 
2604   // Slow-path call
2605   bool is_leaf = !(flags & RC_NO_LEAF);
2606   bool has_io  = (!is_leaf && !(flags & RC_NO_IO));
2607   if (call_name == NULL) {
2608     assert(!is_leaf, "must supply name for leaf");
2609     call_name = OptoRuntime::stub_name(call_addr);
2610   }
2611   CallNode* call;
2612   if (!is_leaf) {
2613     call = new CallStaticJavaNode(call_type, call_addr, call_name,
2614                                            bci(), adr_type);
2615   } else if (flags & RC_NO_FP) {
2616     call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2617   } else {
2618     call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2619   }
2620 
2621   // The following is similar to set_edges_for_java_call,
2622   // except that the memory effects of the call are restricted to AliasIdxRaw.
2623 
2624   // Slow path call has no side-effects, uses few values
2625   bool wide_in  = !(flags & RC_NARROW_MEM);
2626   bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2627 
2628   Node* prev_mem = NULL;
2629   if (wide_in) {
2630     prev_mem = set_predefined_input_for_runtime_call(call);
2631   } else {
2632     assert(!wide_out, "narrow in => narrow out");
2633     Node* narrow_mem = memory(adr_type);
2634     prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2635   }
2636 
2637   // Hook each parm in order.  Stop looking at the first NULL.
2638   if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0);
2639   if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1);
2640   if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2);
2641   if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3);
2642   if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4);
2643   if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5);
2644   if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6);
2645   if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7);
2646     /* close each nested if ===> */  } } } } } } } }
2647   assert(call->in(call->req()-1) != NULL, "must initialize all parms");
2648 
2649   if (!is_leaf) {
2650     // Non-leaves can block and take safepoints:
2651     add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0));
2652   }
2653   // Non-leaves can throw exceptions:
2654   if (has_io) {
2655     call->set_req(TypeFunc::I_O, i_o());
2656   }
2657 
2658   if (flags & RC_UNCOMMON) {
2659     // Set the count to a tiny probability.  Cf. Estimate_Block_Frequency.
2660     // (An "if" probability corresponds roughly to an unconditional count.
2661     // Sort of.)
2662     call->set_cnt(PROB_UNLIKELY_MAG(4));
2663   }
2664 
2665   Node* c = _gvn.transform(call);
2666   assert(c == call, "cannot disappear");
2667 
2668   if (wide_out) {
2669     // Slow path call has full side-effects.
2670     set_predefined_output_for_runtime_call(call);
2671   } else {
2672     // Slow path call has few side-effects, and/or sets few values.
2673     set_predefined_output_for_runtime_call(call, prev_mem, adr_type);
2674   }
2675 
2676   if (has_io) {
2677     set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2678   }
2679   return call;
2680 
2681 }
2682 
2683 //------------------------------merge_memory-----------------------------------
2684 // Merge memory from one path into the current memory state.
2685 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2686   for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2687     Node* old_slice = mms.force_memory();
2688     Node* new_slice = mms.memory2();
2689     if (old_slice != new_slice) {
2690       PhiNode* phi;
2691       if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2692         if (mms.is_empty()) {
2693           // clone base memory Phi's inputs for this memory slice
2694           assert(old_slice == mms.base_memory(), "sanity");
2695           phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C));
2696           _gvn.set_type(phi, Type::MEMORY);
2697           for (uint i = 1; i < phi->req(); i++) {
2698             phi->init_req(i, old_slice->in(i));
2699           }
2700         } else {
2701           phi = old_slice->as_Phi(); // Phi was generated already
2702         }
2703       } else {
2704         phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
2705         _gvn.set_type(phi, Type::MEMORY);
2706       }
2707       phi->set_req(new_path, new_slice);
2708       mms.set_memory(phi);
2709     }
2710   }
2711 }
2712 
2713 //------------------------------make_slow_call_ex------------------------------
2714 // Make the exception handler hookups for the slow call
2715 void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize) {
2716   if (stopped())  return;
2717 
2718   // Make a catch node with just two handlers:  fall-through and catch-all
2719   Node* i_o  = _gvn.transform( new ProjNode(call, TypeFunc::I_O, separate_io_proj) );
2720   Node* catc = _gvn.transform( new CatchNode(control(), i_o, 2) );
2721   Node* norm = _gvn.transform( new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) );
2722   Node* excp = _gvn.transform( new CatchProjNode(catc, CatchProjNode::catch_all_index,    CatchProjNode::no_handler_bci) );
2723 
2724   { PreserveJVMState pjvms(this);
2725     set_control(excp);
2726     set_i_o(i_o);
2727 
2728     if (excp != top()) {
2729       if (deoptimize) {
2730         // Deoptimize if an exception is caught. Don't construct exception state in this case.
2731         uncommon_trap(Deoptimization::Reason_unhandled,
2732                       Deoptimization::Action_none);
2733       } else {
2734         // Create an exception state also.
2735         // Use an exact type if the caller has a specific exception.
2736         const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull);
2737         Node*       ex_oop  = new CreateExNode(ex_type, control(), i_o);
2738         add_exception_state(make_exception_state(_gvn.transform(ex_oop)));
2739       }
2740     }
2741   }
2742 
2743   // Get the no-exception control from the CatchNode.
2744   set_control(norm);
2745 }
2746 
2747 static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN& gvn, BasicType bt) {
2748   Node* cmp = NULL;
2749   switch(bt) {
2750   case T_INT: cmp = new CmpINode(in1, in2); break;
2751   case T_ADDRESS: cmp = new CmpPNode(in1, in2); break;
2752   default: fatal("unexpected comparison type %s", type2name(bt));
2753   }
2754   gvn.transform(cmp);
2755   Node* bol = gvn.transform(new BoolNode(cmp, test));
2756   IfNode* iff = new IfNode(ctrl, bol, p, COUNT_UNKNOWN);
2757   gvn.transform(iff);
2758   if (!bol->is_Con()) gvn.record_for_igvn(iff);
2759   return iff;
2760 }
2761 
2762 //-------------------------------gen_subtype_check-----------------------------
2763 // Generate a subtyping check.  Takes as input the subtype and supertype.
2764 // Returns 2 values: sets the default control() to the true path and returns
2765 // the false path.  Only reads invariant memory; sets no (visible) memory.
2766 // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding
2767 // but that's not exposed to the optimizer.  This call also doesn't take in an
2768 // Object; if you wish to check an Object you need to load the Object's class
2769 // prior to coming here.
2770 Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, Node* mem, PhaseGVN& gvn) {
2771   Compile* C = gvn.C;
2772   if ((*ctrl)->is_top()) {
2773     return C->top();
2774   }
2775 
2776   // Fast check for identical types, perhaps identical constants.
2777   // The types can even be identical non-constants, in cases
2778   // involving Array.newInstance, Object.clone, etc.
2779   if (subklass == superklass)
2780     return C->top();             // false path is dead; no test needed.
2781 
2782   if (gvn.type(superklass)->singleton()) {
2783     ciKlass* superk = gvn.type(superklass)->is_klassptr()->klass();
2784     ciKlass* subk   = gvn.type(subklass)->is_klassptr()->klass();
2785 
2786     // In the common case of an exact superklass, try to fold up the
2787     // test before generating code.  You may ask, why not just generate
2788     // the code and then let it fold up?  The answer is that the generated
2789     // code will necessarily include null checks, which do not always
2790     // completely fold away.  If they are also needless, then they turn
2791     // into a performance loss.  Example:
2792     //    Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x;
2793     // Here, the type of 'fa' is often exact, so the store check
2794     // of fa[1]=x will fold up, without testing the nullness of x.
2795     switch (C->static_subtype_check(superk, subk)) {
2796     case Compile::SSC_always_false:
2797       {
2798         Node* always_fail = *ctrl;
2799         *ctrl = gvn.C->top();
2800         return always_fail;
2801       }
2802     case Compile::SSC_always_true:
2803       return C->top();
2804     case Compile::SSC_easy_test:
2805       {
2806         // Just do a direct pointer compare and be done.
2807         IfNode* iff = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_STATIC_FREQUENT, gvn, T_ADDRESS);
2808         *ctrl = gvn.transform(new IfTrueNode(iff));
2809         return gvn.transform(new IfFalseNode(iff));
2810       }
2811     case Compile::SSC_full_test:
2812       break;
2813     default:
2814       ShouldNotReachHere();
2815     }
2816   }
2817 
2818   // %%% Possible further optimization:  Even if the superklass is not exact,
2819   // if the subklass is the unique subtype of the superklass, the check
2820   // will always succeed.  We could leave a dependency behind to ensure this.
2821 
2822   // First load the super-klass's check-offset
2823   Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset()))));
2824   Node* m = C->immutable_memory();
2825   Node *chk_off = gvn.transform(new LoadINode(NULL, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered));
2826   int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
2827   bool might_be_cache = (gvn.find_int_con(chk_off, cacheoff_con) == cacheoff_con);
2828 
2829   // Load from the sub-klass's super-class display list, or a 1-word cache of
2830   // the secondary superclass list, or a failing value with a sentinel offset
2831   // if the super-klass is an interface or exceptionally deep in the Java
2832   // hierarchy and we have to scan the secondary superclass list the hard way.
2833   // Worst-case type is a little odd: NULL is allowed as a result (usually
2834   // klass loads can never produce a NULL).
2835   Node *chk_off_X = chk_off;
2836 #ifdef _LP64
2837   chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X));
2838 #endif
2839   Node *p2 = gvn.transform(new AddPNode(subklass,subklass,chk_off_X));
2840   // For some types like interfaces the following loadKlass is from a 1-word
2841   // cache which is mutable so can't use immutable memory.  Other
2842   // types load from the super-class display table which is immutable.
2843   Node *kmem = C->immutable_memory();
2844   // secondary_super_cache is not immutable but can be treated as such because:
2845   // - no ideal node writes to it in a way that could cause an
2846   //   incorrect/missed optimization of the following Load.
2847   // - it's a cache so, worse case, not reading the latest value
2848   //   wouldn't cause incorrect execution
2849   if (might_be_cache && mem != NULL) {
2850     kmem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(C->get_alias_index(gvn.type(p2)->is_ptr())) : mem;
2851   }
2852   Node *nkls = gvn.transform(LoadKlassNode::make(gvn, NULL, kmem, p2, gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
2853 
2854   // Compile speed common case: ARE a subtype and we canNOT fail
2855   if( superklass == nkls )
2856     return C->top();             // false path is dead; no test needed.
2857 
2858   // See if we get an immediate positive hit.  Happens roughly 83% of the
2859   // time.  Test to see if the value loaded just previously from the subklass
2860   // is exactly the superklass.
2861   IfNode *iff1 = gen_subtype_check_compare(*ctrl, superklass, nkls, BoolTest::eq, PROB_LIKELY(0.83f), gvn, T_ADDRESS);
2862   Node *iftrue1 = gvn.transform( new IfTrueNode (iff1));
2863   *ctrl = gvn.transform(new IfFalseNode(iff1));
2864 
2865   // Compile speed common case: Check for being deterministic right now.  If
2866   // chk_off is a constant and not equal to cacheoff then we are NOT a
2867   // subklass.  In this case we need exactly the 1 test above and we can
2868   // return those results immediately.
2869   if (!might_be_cache) {
2870     Node* not_subtype_ctrl = *ctrl;
2871     *ctrl = iftrue1; // We need exactly the 1 test above
2872     return not_subtype_ctrl;
2873   }
2874 
2875   // Gather the various success & failures here
2876   RegionNode *r_ok_subtype = new RegionNode(4);
2877   gvn.record_for_igvn(r_ok_subtype);
2878   RegionNode *r_not_subtype = new RegionNode(3);
2879   gvn.record_for_igvn(r_not_subtype);
2880 
2881   r_ok_subtype->init_req(1, iftrue1);
2882 
2883   // Check for immediate negative hit.  Happens roughly 11% of the time (which
2884   // is roughly 63% of the remaining cases).  Test to see if the loaded
2885   // check-offset points into the subklass display list or the 1-element
2886   // cache.  If it points to the display (and NOT the cache) and the display
2887   // missed then it's not a subtype.
2888   Node *cacheoff = gvn.intcon(cacheoff_con);
2889   IfNode *iff2 = gen_subtype_check_compare(*ctrl, chk_off, cacheoff, BoolTest::ne, PROB_LIKELY(0.63f), gvn, T_INT);
2890   r_not_subtype->init_req(1, gvn.transform(new IfTrueNode (iff2)));
2891   *ctrl = gvn.transform(new IfFalseNode(iff2));
2892 
2893   // Check for self.  Very rare to get here, but it is taken 1/3 the time.
2894   // No performance impact (too rare) but allows sharing of secondary arrays
2895   // which has some footprint reduction.
2896   IfNode *iff3 = gen_subtype_check_compare(*ctrl, subklass, superklass, BoolTest::eq, PROB_LIKELY(0.36f), gvn, T_ADDRESS);
2897   r_ok_subtype->init_req(2, gvn.transform(new IfTrueNode(iff3)));
2898   *ctrl = gvn.transform(new IfFalseNode(iff3));
2899 
2900   // -- Roads not taken here: --
2901   // We could also have chosen to perform the self-check at the beginning
2902   // of this code sequence, as the assembler does.  This would not pay off
2903   // the same way, since the optimizer, unlike the assembler, can perform
2904   // static type analysis to fold away many successful self-checks.
2905   // Non-foldable self checks work better here in second position, because
2906   // the initial primary superclass check subsumes a self-check for most
2907   // types.  An exception would be a secondary type like array-of-interface,
2908   // which does not appear in its own primary supertype display.
2909   // Finally, we could have chosen to move the self-check into the
2910   // PartialSubtypeCheckNode, and from there out-of-line in a platform
2911   // dependent manner.  But it is worthwhile to have the check here,
2912   // where it can be perhaps be optimized.  The cost in code space is
2913   // small (register compare, branch).
2914 
2915   // Now do a linear scan of the secondary super-klass array.  Again, no real
2916   // performance impact (too rare) but it's gotta be done.
2917   // Since the code is rarely used, there is no penalty for moving it
2918   // out of line, and it can only improve I-cache density.
2919   // The decision to inline or out-of-line this final check is platform
2920   // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2921   Node* psc = gvn.transform(
2922     new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2923 
2924   IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2925   r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2926   r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2927 
2928   // Return false path; set default control to true path.
2929   *ctrl = gvn.transform(r_ok_subtype);
2930   return gvn.transform(r_not_subtype);
2931 }
2932 
2933 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
2934   const Type* sub_t = _gvn.type(obj_or_subklass);
2935   if (sub_t->isa_inlinetype()) {
2936     obj_or_subklass = makecon(TypeKlassPtr::make(sub_t->inline_klass()));
2937   }
2938   if (ExpandSubTypeCheckAtParseTime) {
2939     MergeMemNode* mem = merged_memory();
2940     Node* ctrl = control();
2941     Node* subklass = obj_or_subklass;
2942     if (!sub_t->isa_klassptr()) {
2943       subklass = load_object_klass(obj_or_subklass);
2944     }
2945     Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn);
2946     set_control(ctrl);
2947     return n;
2948   }
2949 
2950   Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass));
2951   Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2952   IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2953   set_control(_gvn.transform(new IfTrueNode(iff)));
2954   return _gvn.transform(new IfFalseNode(iff));
2955 }
2956 
2957 // Profile-driven exact type check:
2958 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2959                                     float prob, Node* *casted_receiver) {
2960   Node* fail = top();
2961   const Type* rec_t = _gvn.type(receiver);
2962   if (false && rec_t->isa_inlinetype()) {
2963     if (klass->equals(rec_t->inline_klass())) {
2964       (*casted_receiver) = receiver; // Always passes
2965     } else {
2966       (*casted_receiver) = top();    // Always fails
2967       fail = control();
2968       set_control(top());
2969     }
2970     return fail;
2971   }
2972   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
2973   Node* recv_klass = load_object_klass(receiver);
2974   fail = type_check(recv_klass, tklass, prob);
2975   const TypeOopPtr* recv_xtype = tklass->as_instance_type();
2976   assert(recv_xtype->klass_is_exact(), "");
2977 
2978   // Subsume downstream occurrences of receiver with a cast to
2979   // recv_xtype, since now we know what the type will be.
2980   Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
2981   Node* res = _gvn.transform(cast);
2982   if (recv_xtype->is_inlinetypeptr() && recv_xtype->inline_klass()->is_scalarizable()) {
2983     assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
2984     res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
2985   }
2986 
2987   (*casted_receiver) = res;
2988   // (User must make the replace_in_map call.)
2989 
2990   return fail;
2991 }
2992 
2993 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
2994                            float prob) {
2995   Node* want_klass = makecon(tklass);
2996   Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass));
2997   Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) );
2998   IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2999   set_control(  _gvn.transform( new IfTrueNode (iff)));
3000   Node* fail = _gvn.transform( new IfFalseNode(iff));
3001   return fail;
3002 }
3003 
3004 //------------------------------subtype_check_receiver-------------------------
3005 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3006                                        Node** casted_receiver) {
3007   const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
3008   Node* want_klass = makecon(tklass);
3009 
3010   Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3011 
3012   // Cast receiver after successful check
3013   const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3014   Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
3015   (*casted_receiver) = _gvn.transform(cast);
3016 
3017   return slow_ctl;
3018 }
3019 
3020 //------------------------------seems_never_null-------------------------------
3021 // Use null_seen information if it is available from the profile.
3022 // If we see an unexpected null at a type check we record it and force a
3023 // recompile; the offending check will be recompiled to handle NULLs.
3024 // If we see several offending BCIs, then all checks in the
3025 // method will be recompiled.
3026 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3027   speculating = !_gvn.type(obj)->speculative_maybe_null();
3028   Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3029   if (UncommonNullCast               // Cutout for this technique
3030       && obj != null()               // And not the -Xcomp stupid case?
3031       && !too_many_traps(reason)
3032       ) {
3033     if (speculating) {
3034       return true;
3035     }
3036     if (data == NULL)
3037       // Edge case:  no mature data.  Be optimistic here.
3038       return true;
3039     // If the profile has not seen a null, assume it won't happen.
3040     assert(java_bc() == Bytecodes::_checkcast ||
3041            java_bc() == Bytecodes::_instanceof ||
3042            java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here");
3043     if (java_bc() == Bytecodes::_aastore) {
3044       return ((ciArrayLoadStoreData*)data->as_ArrayLoadStoreData())->element()->ptr_kind() == ProfileNeverNull;
3045     }
3046     return !data->as_BitData()->null_seen();
3047   }
3048   speculating = false;
3049   return false;
3050 }
3051 
3052 void GraphKit::guard_klass_being_initialized(Node* klass) {
3053   int init_state_off = in_bytes(InstanceKlass::init_state_offset());
3054   Node* adr = basic_plus_adr(top(), klass, init_state_off);
3055   Node* init_state = LoadNode::make(_gvn, NULL, immutable_memory(), adr,
3056                                     adr->bottom_type()->is_ptr(), TypeInt::BYTE,
3057                                     T_BYTE, MemNode::unordered);
3058   init_state = _gvn.transform(init_state);
3059 
3060   Node* being_initialized_state = makecon(TypeInt::make(InstanceKlass::being_initialized));
3061 
3062   Node* chk = _gvn.transform(new CmpINode(being_initialized_state, init_state));
3063   Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3064 
3065   { BuildCutout unless(this, tst, PROB_MAX);
3066     uncommon_trap(Deoptimization::Reason_initialized, Deoptimization::Action_reinterpret);
3067   }
3068 }
3069 
3070 void GraphKit::guard_init_thread(Node* klass) {
3071   int init_thread_off = in_bytes(InstanceKlass::init_thread_offset());
3072   Node* adr = basic_plus_adr(top(), klass, init_thread_off);
3073 
3074   Node* init_thread = LoadNode::make(_gvn, NULL, immutable_memory(), adr,
3075                                      adr->bottom_type()->is_ptr(), TypePtr::NOTNULL,
3076                                      T_ADDRESS, MemNode::unordered);
3077   init_thread = _gvn.transform(init_thread);
3078 
3079   Node* cur_thread = _gvn.transform(new ThreadLocalNode());
3080 
3081   Node* chk = _gvn.transform(new CmpPNode(cur_thread, init_thread));
3082   Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::eq));
3083 
3084   { BuildCutout unless(this, tst, PROB_MAX);
3085     uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_none);
3086   }
3087 }
3088 
3089 void GraphKit::clinit_barrier(ciInstanceKlass* ik, ciMethod* context) {
3090   if (ik->is_being_initialized()) {
3091     if (C->needs_clinit_barrier(ik, context)) {
3092       Node* klass = makecon(TypeKlassPtr::make(ik));
3093       guard_klass_being_initialized(klass);
3094       guard_init_thread(klass);
3095       insert_mem_bar(Op_MemBarCPUOrder);
3096     }
3097   } else if (ik->is_initialized()) {
3098     return; // no barrier needed
3099   } else {
3100     uncommon_trap(Deoptimization::Reason_uninitialized,
3101                   Deoptimization::Action_reinterpret,
3102                   NULL);
3103   }
3104 }
3105 
3106 //------------------------maybe_cast_profiled_receiver-------------------------
3107 // If the profile has seen exactly one type, narrow to exactly that type.
3108 // Subsequent type checks will always fold up.
3109 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3110                                              ciKlass* require_klass,
3111                                              ciKlass* spec_klass,
3112                                              bool safe_for_replace) {
3113   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
3114 
3115   Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
3116 
3117   // Make sure we haven't already deoptimized from this tactic.
3118   if (too_many_traps_or_recompiles(reason))
3119     return NULL;
3120 
3121   // (No, this isn't a call, but it's enough like a virtual call
3122   // to use the same ciMethod accessor to get the profile info...)
3123   // If we have a speculative type use it instead of profiling (which
3124   // may not help us)
3125   ciKlass* exact_kls = spec_klass;
3126   if (exact_kls == NULL) {
3127     if (java_bc() == Bytecodes::_aastore) {
3128       ciKlass* array_type = NULL;
3129       ciKlass* element_type = NULL;
3130       ProfilePtrKind element_ptr = ProfileMaybeNull;
3131       bool flat_array = true;
3132       bool null_free_array = true;
3133       method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3134       exact_kls = element_type;
3135     } else {
3136       exact_kls = profile_has_unique_klass();
3137     }
3138   }
3139   if (exact_kls != NULL) {// no cast failures here
3140     if (require_klass == NULL ||
3141         C->static_subtype_check(require_klass, exact_kls) == Compile::SSC_always_true) {
3142       // If we narrow the type to match what the type profile sees or
3143       // the speculative type, we can then remove the rest of the
3144       // cast.
3145       // This is a win, even if the exact_kls is very specific,
3146       // because downstream operations, such as method calls,
3147       // will often benefit from the sharper type.
3148       Node* exact_obj = not_null_obj; // will get updated in place...
3149       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
3150                                             &exact_obj);
3151       { PreserveJVMState pjvms(this);
3152         set_control(slow_ctl);
3153         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3154       }
3155       if (safe_for_replace) {
3156         replace_in_map(not_null_obj, exact_obj);
3157       }
3158       return exact_obj;
3159     }
3160     // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us.
3161   }
3162 
3163   return NULL;
3164 }
3165 
3166 /**
3167  * Cast obj to type and emit guard unless we had too many traps here
3168  * already
3169  *
3170  * @param obj       node being casted
3171  * @param type      type to cast the node to
3172  * @param not_null  true if we know node cannot be null
3173  */
3174 Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
3175                                         ciKlass* type,
3176                                         bool not_null) {
3177   if (stopped()) {
3178     return obj;
3179   }
3180 
3181   // type == NULL if profiling tells us this object is always null
3182   if (type != NULL) {
3183     Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
3184     Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
3185 
3186     if (!too_many_traps_or_recompiles(null_reason) &&
3187         !too_many_traps_or_recompiles(class_reason)) {
3188       Node* not_null_obj = NULL;
3189       // not_null is true if we know the object is not null and
3190       // there's no need for a null check
3191       if (!not_null) {
3192         Node* null_ctl = top();
3193         not_null_obj = null_check_oop(obj, &null_ctl, true, true, true);
3194         assert(null_ctl->is_top(), "no null control here");
3195       } else {
3196         not_null_obj = obj;
3197       }
3198 
3199       Node* exact_obj = not_null_obj;
3200       ciKlass* exact_kls = type;
3201       Node* slow_ctl  = type_check_receiver(exact_obj, exact_kls, 1.0,
3202                                             &exact_obj);
3203       {
3204         PreserveJVMState pjvms(this);
3205         set_control(slow_ctl);
3206         uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);
3207       }
3208       replace_in_map(not_null_obj, exact_obj);
3209       obj = exact_obj;
3210     }
3211   } else {
3212     if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3213       Node* exact_obj = null_assert(obj);
3214       replace_in_map(obj, exact_obj);
3215       obj = exact_obj;
3216     }
3217   }
3218   return obj;
3219 }
3220 
3221 //-------------------------------gen_instanceof--------------------------------
3222 // Generate an instance-of idiom.  Used by both the instance-of bytecode
3223 // and the reflective instance-of call.
3224 Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
3225   kill_dead_locals();           // Benefit all the uncommon traps
3226   assert( !stopped(), "dead parse path should be checked in callers" );
3227   assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
3228          "must check for not-null not-dead klass in callers");
3229 
3230   // Make the merge point
3231   enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT };
3232   RegionNode* region = new RegionNode(PATH_LIMIT);
3233   Node*       phi    = new PhiNode(region, TypeInt::BOOL);
3234   C->set_has_split_ifs(true); // Has chance for split-if optimization
3235 
3236   ciProfileData* data = NULL;
3237   if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
3238     data = method()->method_data()->bci_to_data(bci());
3239   }
3240   bool speculative_not_null = false;
3241   bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
3242                          && seems_never_null(obj, data, speculative_not_null));
3243   bool is_value = obj->is_InlineType();
3244 
3245   // Null check; get casted pointer; set region slot 3
3246   Node* null_ctl = top();
3247   Node* not_null_obj = is_value ? obj : null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3248 
3249   // If not_null_obj is dead, only null-path is taken
3250   if (stopped()) {              // Doing instance-of on a NULL?
3251     set_control(null_ctl);
3252     return intcon(0);
3253   }
3254   region->init_req(_null_path, null_ctl);
3255   phi   ->init_req(_null_path, intcon(0)); // Set null path value
3256   if (null_ctl == top()) {
3257     // Do this eagerly, so that pattern matches like is_diamond_phi
3258     // will work even during parsing.
3259     assert(_null_path == PATH_LIMIT-1, "delete last");
3260     region->del_req(_null_path);
3261     phi   ->del_req(_null_path);
3262   }
3263 
3264   // Do we know the type check always succeed?
3265   if (!is_value) {
3266     bool known_statically = false;
3267     if (_gvn.type(superklass)->singleton()) {
3268       ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
3269       ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
3270       if (subk != NULL && subk->is_loaded()) {
3271         int static_res = C->static_subtype_check(superk, subk);
3272         known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3273       }
3274     }
3275 
3276     if (!known_statically) {
3277       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3278       // We may not have profiling here or it may not help us. If we
3279       // have a speculative type use it to perform an exact cast.
3280       ciKlass* spec_obj_type = obj_type->speculative_type();
3281       if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
3282         Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
3283         if (stopped()) {            // Profile disagrees with this path.
3284           set_control(null_ctl);    // Null is the only remaining possibility.
3285           return intcon(0);
3286         }
3287         if (cast_obj != NULL &&
3288             // A value that's sometimes null is not something we can optimize well
3289             !(cast_obj->is_InlineType() && null_ctl != top())) {
3290           not_null_obj = cast_obj;
3291           is_value = not_null_obj->is_InlineType();
3292         }
3293       }
3294     }
3295   }
3296 
3297   // Generate the subtype check
3298   Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3299 
3300   // Plug in the success path to the general merge in slot 1.
3301   region->init_req(_obj_path, control());
3302   phi   ->init_req(_obj_path, intcon(1));
3303 
3304   // Plug in the failing path to the general merge in slot 2.
3305   region->init_req(_fail_path, not_subtype_ctrl);
3306   phi   ->init_req(_fail_path, intcon(0));
3307 
3308   // Return final merged results
3309   set_control( _gvn.transform(region) );
3310   record_for_igvn(region);
3311 
3312   // If we know the type check always succeeds then we don't use the
3313   // profiling data at this bytecode. Don't lose it, feed it to the
3314   // type system as a speculative type.
3315   if (safe_for_replace && !is_value) {
3316     Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3317     replace_in_map(obj, casted_obj);
3318   }
3319 
3320   return _gvn.transform(phi);
3321 }
3322 
3323 //-------------------------------gen_checkcast---------------------------------
3324 // Generate a checkcast idiom.  Used by both the checkcast bytecode and the
3325 // array store bytecode.  Stack must be as-if BEFORE doing the bytecode so the
3326 // uncommon-trap paths work.  Adjust stack after this call.
3327 // If failure_control is supplied and not null, it is filled in with
3328 // the control edge for the cast failure.  Otherwise, an appropriate
3329 // uncommon trap or exception is thrown.
3330 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control) {
3331   kill_dead_locals();           // Benefit all the uncommon traps
3332   const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr();
3333   const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass());
3334 
3335   // Check if inline types are involved
3336   bool from_inline = obj->is_InlineType();
3337   bool to_inline = tk->klass()->is_inlinetype();
3338 
3339   // Fast cutout:  Check the case that the cast is vacuously true.
3340   // This detects the common cases where the test will short-circuit
3341   // away completely.  We do this before we perform the null check,
3342   // because if the test is going to turn into zero code, we don't
3343   // want a residual null check left around.  (Causes a slowdown,
3344   // for example, in some objArray manipulations, such as a[i]=a[j].)
3345   if (tk->singleton()) {
3346     ciKlass* klass = NULL;
3347     if (from_inline) {
3348       klass = _gvn.type(obj)->inline_klass();
3349     } else {
3350       const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3351       if (objtp != NULL) {
3352         klass = objtp->klass();
3353       }
3354     }
3355     if (klass != NULL) {
3356       switch (C->static_subtype_check(tk->klass(), klass)) {
3357       case Compile::SSC_always_true:
3358         // If we know the type check always succeed then we don't use
3359         // the profiling data at this bytecode. Don't lose it, feed it
3360         // to the type system as a speculative type.
3361         if (!from_inline) {
3362           obj = record_profiled_receiver_for_speculation(obj);
3363           if (to_inline) {
3364             obj = null_check(obj);
3365             if (toop->inline_klass()->is_scalarizable()) {
3366               obj = InlineTypeNode::make_from_oop(this, obj, toop->inline_klass());
3367             }
3368           }
3369         }
3370         return obj;
3371       case Compile::SSC_always_false:
3372         if (from_inline || to_inline) {
3373           if (!from_inline) {
3374             null_check(obj);
3375           }
3376           // Inline type is never null. Always throw an exception.
3377           builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(klass)));
3378           return top();
3379         } else {
3380           // It needs a null check because a null will *pass* the cast check.
3381           return null_assert(obj);
3382         }
3383       }
3384     }
3385   }
3386 
3387   ciProfileData* data = NULL;
3388   bool safe_for_replace = false;
3389   if (failure_control == NULL) {        // use MDO in regular case only
3390     assert(java_bc() == Bytecodes::_aastore ||
3391            java_bc() == Bytecodes::_checkcast,
3392            "interpreter profiles type checks only for these BCs");
3393     if (method()->method_data()->is_mature()) {
3394       data = method()->method_data()->bci_to_data(bci());
3395     }
3396     safe_for_replace = true;
3397   }
3398 
3399   // Make the merge point
3400   enum { _obj_path = 1, _null_path, PATH_LIMIT };
3401   RegionNode* region = new RegionNode(PATH_LIMIT);
3402   Node*       phi    = new PhiNode(region, toop);
3403   _gvn.set_type(region, Type::CONTROL);
3404   _gvn.set_type(phi, toop);
3405 
3406   C->set_has_split_ifs(true); // Has chance for split-if optimization
3407 
3408   // Use null-cast information if it is available
3409   bool speculative_not_null = false;
3410   bool never_see_null = ((failure_control == NULL)  // regular case only
3411                          && seems_never_null(obj, data, speculative_not_null));
3412 
3413   // Null check; get casted pointer; set region slot 3
3414   Node* null_ctl = top();
3415   Node* not_null_obj = NULL;
3416   if (from_inline) {
3417     not_null_obj = obj;
3418   } else if (to_inline) {
3419     not_null_obj = null_check(obj);
3420   } else {
3421     not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3422   }
3423 
3424   // If not_null_obj is dead, only null-path is taken
3425   if (stopped()) {              // Doing instance-of on a NULL?
3426     set_control(null_ctl);
3427     return null();
3428   }
3429   region->init_req(_null_path, null_ctl);
3430   phi   ->init_req(_null_path, null());  // Set null path value
3431   if (null_ctl == top()) {
3432     // Do this eagerly, so that pattern matches like is_diamond_phi
3433     // will work even during parsing.
3434     assert(_null_path == PATH_LIMIT-1, "delete last");
3435     region->del_req(_null_path);
3436     phi   ->del_req(_null_path);
3437   }
3438 
3439   Node* cast_obj = NULL;
3440   if (!from_inline && tk->klass_is_exact()) {
3441     // The following optimization tries to statically cast the speculative type of the object
3442     // (for example obtained during profiling) to the type of the superklass and then do a
3443     // dynamic check that the type of the object is what we expect. To work correctly
3444     // for checkcast and aastore the type of superklass should be exact.
3445     const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3446     // We may not have profiling here or it may not help us. If we have
3447     // a speculative type use it to perform an exact cast.
3448     ciKlass* spec_obj_type = obj_type->speculative_type();
3449     if (spec_obj_type != NULL || data != NULL) {
3450       cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
3451       if (cast_obj != NULL && cast_obj->is_InlineType()) {
3452         if (null_ctl != top()) {
3453           cast_obj = NULL; // A value that's sometimes null is not something we can optimize well
3454         } else {
3455           return cast_obj;
3456         }
3457       }
3458       if (cast_obj != NULL) {
3459         if (failure_control != NULL) // failure is now impossible
3460           (*failure_control) = top();
3461         // adjust the type of the phi to the exact klass:
3462         phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3463       }
3464     }
3465   }
3466 
3467   if (cast_obj == NULL) {
3468     // Generate the subtype check
3469     Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass);
3470 
3471     // Plug in success path into the merge
3472     cast_obj = from_inline ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3473     // Failure path ends in uncommon trap (or may be dead - failure impossible)
3474     if (failure_control == NULL) {
3475       if (not_subtype_ctrl != top()) { // If failure is possible
3476         PreserveJVMState pjvms(this);
3477         set_control(not_subtype_ctrl);
3478         Node* obj_klass = NULL;
3479         if (from_inline) {
3480           obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3481         } else {
3482           obj_klass = load_object_klass(not_null_obj);
3483         }
3484         builtin_throw(Deoptimization::Reason_class_check, obj_klass);
3485       }
3486     } else {
3487       (*failure_control) = not_subtype_ctrl;
3488     }
3489   }
3490 
3491   region->init_req(_obj_path, control());
3492   phi   ->init_req(_obj_path, cast_obj);
3493 
3494   // A merge of NULL or Casted-NotNull obj
3495   Node* res = _gvn.transform(phi);
3496 
3497   // Note I do NOT always 'replace_in_map(obj,result)' here.
3498   //  if( tk->klass()->can_be_primary_super()  )
3499     // This means that if I successfully store an Object into an array-of-String
3500     // I 'forget' that the Object is really now known to be a String.  I have to
3501     // do this because we don't have true union types for interfaces - if I store
3502     // a Baz into an array-of-Interface and then tell the optimizer it's an
3503     // Interface, I forget that it's also a Baz and cannot do Baz-like field
3504     // references to it.  FIX THIS WHEN UNION TYPES APPEAR!
3505   //  replace_in_map( obj, res );
3506 
3507   // Return final merged results
3508   set_control( _gvn.transform(region) );
3509   record_for_igvn(region);
3510 
3511   bool not_inline = !toop->can_be_inline_type();
3512   bool not_flattened = !UseFlatArray || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flatten_array());
3513   if (EnableValhalla && not_flattened) {
3514     // Check if obj has been loaded from an array
3515     obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3516     Node* array = NULL;
3517     if (obj->isa_Load()) {
3518       Node* address = obj->in(MemNode::Address);
3519       if (address->isa_AddP()) {
3520         array = address->as_AddP()->in(AddPNode::Base);
3521       }
3522     } else if (obj->is_Phi()) {
3523       Node* region = obj->in(0);
3524       // TODO make this more robust (see JDK-8231346)
3525       if (region->req() == 3 && region->in(2) != NULL && region->in(2)->in(0) != NULL) {
3526         IfNode* iff = region->in(2)->in(0)->isa_If();
3527         if (iff != NULL) {
3528           iff->is_non_flattened_array_check(&_gvn, &array);
3529         }
3530       }
3531     }
3532     if (array != NULL) {
3533       const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3534       if (ary_t != NULL) {
3535         if (!ary_t->is_not_null_free() && not_inline) {
3536           // Casting array element to a non-inline-type, mark array as not null-free.
3537           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3538           replace_in_map(array, cast);
3539         } else if (!ary_t->is_not_flat()) {
3540           // Casting array element to a non-flattened type, mark array as not flat.
3541           Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3542           replace_in_map(array, cast);
3543         }
3544       }
3545     }
3546   }
3547 
3548   if (!from_inline) {
3549     res = record_profiled_receiver_for_speculation(res);
3550     if (to_inline && toop->inline_klass()->is_scalarizable()) {
3551       assert(!gvn().type(res)->maybe_null(), "Inline types are null-free");
3552       res = InlineTypeNode::make_from_oop(this, res, toop->inline_klass());
3553     }
3554   }
3555   return res;
3556 }
3557 
3558 // Check if 'obj' is an inline type by checking if it has the always_locked markWord pattern set.
3559 Node* GraphKit::is_inline_type(Node* obj) {
3560   Node* mark_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3561   Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3562   Node* mask = _gvn.MakeConX(markWord::always_locked_pattern);
3563   Node* andx = _gvn.transform(new AndXNode(mark, mask));
3564   Node* cmp = _gvn.transform(new CmpXNode(andx, mask));
3565   return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3566 }
3567 
3568 // Check if 'ary' is a non-flattened array
3569 Node* GraphKit::is_non_flattened_array(Node* ary) {
3570   Node* kls = load_object_klass(ary);
3571   Node* tag = load_lh_array_tag(kls);
3572   Node* cmp = gen_lh_array_test(kls, Klass::_lh_array_tag_vt_value);
3573   return _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3574 }
3575 
3576 // Check if 'ary' is a nullable array
3577 Node* GraphKit::is_nullable_array(Node* ary) {
3578   Node* kls = load_object_klass(ary);
3579   Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
3580   Node* layout_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lhp, lhp->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3581   Node* null_free = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_null_free_shift)));
3582   null_free = _gvn.transform(new AndINode(null_free, intcon(Klass::_lh_null_free_mask)));
3583   Node* cmp = _gvn.transform(new CmpINode(null_free, intcon(0)));
3584   return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3585 }
3586 
3587 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3588 Node* GraphKit::gen_inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3589   const Type* val_t = _gvn.type(val);
3590   if (val->is_InlineType() || !TypePtr::NULL_PTR->higher_equal(val_t)) {
3591     return ary; // Never null
3592   }
3593   RegionNode* region = new RegionNode(3);
3594   Node* null_ctl = top();
3595   null_check_oop(val, &null_ctl);
3596   if (null_ctl != top()) {
3597     PreserveJVMState pjvms(this);
3598     set_control(null_ctl);
3599     {
3600       // Deoptimize if null-free array
3601       BuildCutout unless(this, is_nullable_array(ary), PROB_MAX);
3602       inc_sp(nargs);
3603       uncommon_trap(Deoptimization::Reason_null_check,
3604                     Deoptimization::Action_none);
3605     }
3606     region->init_req(1, control());
3607   }
3608   region->init_req(2, control());
3609   set_control(_gvn.transform(region));
3610   record_for_igvn(region);
3611   const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3612   if (val_t == TypePtr::NULL_PTR && !ary_t->is_not_null_free()) {
3613     // Since we were just successfully storing null, the array can't be null free.
3614     ary_t = ary_t->cast_to_not_null_free();
3615     Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3616     if (safe_for_replace) {
3617       replace_in_map(ary, cast);
3618     }
3619     ary = cast;
3620   }
3621   return ary;
3622 }
3623 
3624 Node* GraphKit::load_lh_array_tag(Node* kls) {
3625   Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
3626   Node* layout_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lhp, lhp->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
3627   return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
3628 }
3629 
3630 Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) {
3631   Node* layout_val = load_lh_array_tag(kls);
3632   Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value)));
3633   return cmp;
3634 }
3635 
3636 //------------------------------next_monitor-----------------------------------
3637 // What number should be given to the next monitor?
3638 int GraphKit::next_monitor() {
3639   int current = jvms()->monitor_depth()* C->sync_stack_slots();
3640   int next = current + C->sync_stack_slots();
3641   // Keep the toplevel high water mark current:
3642   if (C->fixed_slots() < next)  C->set_fixed_slots(next);
3643   return current;
3644 }
3645 
3646 //------------------------------insert_mem_bar---------------------------------
3647 // Memory barrier to avoid floating things around
3648 // The membar serves as a pinch point between both control and all memory slices.
3649 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3650   MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3651   mb->init_req(TypeFunc::Control, control());
3652   mb->init_req(TypeFunc::Memory,  reset_memory());
3653   Node* membar = _gvn.transform(mb);
3654   set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3655   set_all_memory_call(membar);
3656   return membar;
3657 }
3658 
3659 //-------------------------insert_mem_bar_volatile----------------------------
3660 // Memory barrier to avoid floating things around
3661 // The membar serves as a pinch point between both control and memory(alias_idx).
3662 // If you want to make a pinch point on all memory slices, do not use this
3663 // function (even with AliasIdxBot); use insert_mem_bar() instead.
3664 Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) {
3665   // When Parse::do_put_xxx updates a volatile field, it appends a series
3666   // of MemBarVolatile nodes, one for *each* volatile field alias category.
3667   // The first membar is on the same memory slice as the field store opcode.
3668   // This forces the membar to follow the store.  (Bug 6500685 broke this.)
3669   // All the other membars (for other volatile slices, including AliasIdxBot,
3670   // which stands for all unknown volatile slices) are control-dependent
3671   // on the first membar.  This prevents later volatile loads or stores
3672   // from sliding up past the just-emitted store.
3673 
3674   MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent);
3675   mb->set_req(TypeFunc::Control,control());
3676   if (alias_idx == Compile::AliasIdxBot) {
3677     mb->set_req(TypeFunc::Memory, merged_memory()->base_memory());
3678   } else {
3679     assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller");
3680     mb->set_req(TypeFunc::Memory, memory(alias_idx));
3681   }
3682   Node* membar = _gvn.transform(mb);
3683   set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3684   if (alias_idx == Compile::AliasIdxBot) {
3685     merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3686   } else {
3687     set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3688   }
3689   return membar;
3690 }
3691 
3692 //------------------------------shared_lock------------------------------------
3693 // Emit locking code.
3694 FastLockNode* GraphKit::shared_lock(Node* obj) {
3695   // bci is either a monitorenter bc or InvocationEntryBci
3696   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3697   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3698 
3699   if( !GenerateSynchronizationCode )
3700     return NULL;                // Not locking things?
3701 
3702   if (stopped())                // Dead monitor?
3703     return NULL;
3704 
3705   assert(dead_locals_are_killed(), "should kill locals before sync. point");
3706 
3707   // Box the stack location
3708   Node* box = _gvn.transform(new BoxLockNode(next_monitor()));
3709   Node* mem = reset_memory();
3710 
3711   FastLockNode * flock = _gvn.transform(new FastLockNode(0, obj, box) )->as_FastLock();
3712   if (UseBiasedLocking && PrintPreciseBiasedLockingStatistics) {
3713     // Create the counters for this fast lock.
3714     flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3715   }
3716 
3717   // Create the rtm counters for this fast lock if needed.
3718   flock->create_rtm_lock_counter(sync_jvms()); // sync_jvms used to get current bci
3719 
3720   // Add monitor to debug info for the slow path.  If we block inside the
3721   // slow path and de-opt, we need the monitor hanging around
3722   map()->push_monitor( flock );
3723 
3724   const TypeFunc *tf = LockNode::lock_type();
3725   LockNode *lock = new LockNode(C, tf);
3726 
3727   lock->init_req( TypeFunc::Control, control() );
3728   lock->init_req( TypeFunc::Memory , mem );
3729   lock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
3730   lock->init_req( TypeFunc::FramePtr, frameptr() );
3731   lock->init_req( TypeFunc::ReturnAdr, top() );
3732 
3733   lock->init_req(TypeFunc::Parms + 0, obj);
3734   lock->init_req(TypeFunc::Parms + 1, box);
3735   lock->init_req(TypeFunc::Parms + 2, flock);
3736   add_safepoint_edges(lock);
3737 
3738   lock = _gvn.transform( lock )->as_Lock();
3739 
3740   // lock has no side-effects, sets few values
3741   set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM);
3742 
3743   insert_mem_bar(Op_MemBarAcquireLock);
3744 
3745   // Add this to the worklist so that the lock can be eliminated
3746   record_for_igvn(lock);
3747 
3748 #ifndef PRODUCT
3749   if (PrintLockStatistics) {
3750     // Update the counter for this lock.  Don't bother using an atomic
3751     // operation since we don't require absolute accuracy.
3752     lock->create_lock_counter(map()->jvms());
3753     increment_counter(lock->counter()->addr());
3754   }
3755 #endif
3756 
3757   return flock;
3758 }
3759 
3760 
3761 //------------------------------shared_unlock----------------------------------
3762 // Emit unlocking code.
3763 void GraphKit::shared_unlock(Node* box, Node* obj) {
3764   // bci is either a monitorenter bc or InvocationEntryBci
3765   // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3766   assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3767 
3768   if( !GenerateSynchronizationCode )
3769     return;
3770   if (stopped()) {               // Dead monitor?
3771     map()->pop_monitor();        // Kill monitor from debug info
3772     return;
3773   }
3774   assert(!obj->is_InlineTypeBase(), "should not unlock on inline type");
3775 
3776   // Memory barrier to avoid floating things down past the locked region
3777   insert_mem_bar(Op_MemBarReleaseLock);
3778 
3779   const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3780   UnlockNode *unlock = new UnlockNode(C, tf);
3781 #ifdef ASSERT
3782   unlock->set_dbg_jvms(sync_jvms());
3783 #endif
3784   uint raw_idx = Compile::AliasIdxRaw;
3785   unlock->init_req( TypeFunc::Control, control() );
3786   unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3787   unlock->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
3788   unlock->init_req( TypeFunc::FramePtr, frameptr() );
3789   unlock->init_req( TypeFunc::ReturnAdr, top() );
3790 
3791   unlock->init_req(TypeFunc::Parms + 0, obj);
3792   unlock->init_req(TypeFunc::Parms + 1, box);
3793   unlock = _gvn.transform(unlock)->as_Unlock();
3794 
3795   Node* mem = reset_memory();
3796 
3797   // unlock has no side-effects, sets few values
3798   set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3799 
3800   // Kill monitor from debug info
3801   map()->pop_monitor( );
3802 }
3803 
3804 //-------------------------------get_layout_helper-----------------------------
3805 // If the given klass is a constant or known to be an array,
3806 // fetch the constant layout helper value into constant_value
3807 // and return (Node*)NULL.  Otherwise, load the non-constant
3808 // layout helper value, and return the node which represents it.
3809 // This two-faced routine is useful because allocation sites
3810 // almost always feature constant types.
3811 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3812   const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
3813   if (!StressReflectiveCode && inst_klass != NULL) {
3814     ciKlass* klass = inst_klass->klass();
3815     assert(klass != NULL, "klass should not be NULL");
3816     bool    xklass = inst_klass->klass_is_exact();
3817     bool can_be_flattened = false;
3818     if (UseFlatArray && klass->is_obj_array_klass()) {
3819       ciKlass* elem = klass->as_obj_array_klass()->element_klass();
3820       can_be_flattened = elem->can_be_inline_klass() && (!elem->is_inlinetype() || elem->flatten_array());
3821     }
3822     if (xklass || (klass->is_array_klass() && !can_be_flattened)) {
3823       jint lhelper = klass->layout_helper();
3824       if (lhelper != Klass::_lh_neutral_value) {
3825         constant_value = lhelper;
3826         return (Node*) NULL;
3827       }
3828     }
3829   }
3830   constant_value = Klass::_lh_neutral_value;  // put in a known value
3831   Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3832   return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3833 }
3834 
3835 // We just put in an allocate/initialize with a big raw-memory effect.
3836 // Hook selected additional alias categories on the initialization.
3837 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3838                                 MergeMemNode* init_in_merge,
3839                                 Node* init_out_raw) {
3840   DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3841   assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3842 
3843   Node* prevmem = kit.memory(alias_idx);
3844   init_in_merge->set_memory_at(alias_idx, prevmem);
3845   kit.set_memory(init_out_raw, alias_idx);
3846 }
3847 
3848 //---------------------------set_output_for_allocation-------------------------
3849 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3850                                           const TypeOopPtr* oop_type,
3851                                           bool deoptimize_on_exception) {
3852   int rawidx = Compile::AliasIdxRaw;
3853   alloc->set_req( TypeFunc::FramePtr, frameptr() );
3854   add_safepoint_edges(alloc);
3855   Node* allocx = _gvn.transform(alloc);
3856   set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3857   // create memory projection for i_o
3858   set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3859   make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3860 
3861   // create a memory projection as for the normal control path
3862   Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3863   set_memory(malloc, rawidx);
3864 
3865   // a normal slow-call doesn't change i_o, but an allocation does
3866   // we create a separate i_o projection for the normal control path
3867   set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3868   Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3869 
3870   // put in an initialization barrier
3871   InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3872                                                  rawoop)->as_Initialize();
3873   assert(alloc->initialization() == init,  "2-way macro link must work");
3874   assert(init ->allocation()     == alloc, "2-way macro link must work");
3875   {
3876     // Extract memory strands which may participate in the new object's
3877     // initialization, and source them from the new InitializeNode.
3878     // This will allow us to observe initializations when they occur,
3879     // and link them properly (as a group) to the InitializeNode.
3880     assert(init->in(InitializeNode::Memory) == malloc, "");
3881     MergeMemNode* minit_in = MergeMemNode::make(malloc);
3882     init->set_req(InitializeNode::Memory, minit_in);
3883     record_for_igvn(minit_in); // fold it up later, if possible
3884     _gvn.set_type(minit_in, Type::MEMORY);
3885     Node* minit_out = memory(rawidx);
3886     assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3887     // Add an edge in the MergeMem for the header fields so an access
3888     // to one of those has correct memory state
3889     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3890     set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3891     if (oop_type->isa_aryptr()) {
3892       const TypeAryPtr* arytype = oop_type->is_aryptr();
3893       if (arytype->klass()->is_flat_array_klass()) {
3894         // Initially all flattened array accesses share a single slice
3895         // but that changes after parsing. Prepare the memory graph so
3896         // it can optimize flattened array accesses properly once they
3897         // don't share a single slice.
3898         assert(C->flattened_accesses_share_alias(), "should be set at parse time");
3899         C->set_flattened_accesses_share_alias(false);
3900         ciFlatArrayKlass* vak = arytype->klass()->as_flat_array_klass();
3901         ciInlineKlass* vk = vak->element_klass()->as_inline_klass();
3902         for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
3903           ciField* field = vk->nonstatic_field_at(i);
3904           if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3905             continue;  // do not bother to track really large numbers of fields
3906           int off_in_vt = field->offset() - vk->first_field_offset();
3907           const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
3908           int fieldidx = C->get_alias_index(adr_type, true);
3909           hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3910         }
3911         C->set_flattened_accesses_share_alias(true);
3912         hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
3913       } else {
3914         const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3915         int            elemidx  = C->get_alias_index(telemref);
3916         hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3917       }
3918     } else if (oop_type->isa_instptr()) {
3919       set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
3920       ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
3921       for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3922         ciField* field = ik->nonstatic_field_at(i);
3923         if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
3924           continue;  // do not bother to track really large numbers of fields
3925         // Find (or create) the alias category for this field:
3926         int fieldidx = C->alias_type(field)->index();
3927         hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3928       }
3929     }
3930   }
3931 
3932   // Cast raw oop to the real thing...
3933   Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3934   javaoop = _gvn.transform(javaoop);
3935   C->set_recent_alloc(control(), javaoop);
3936   assert(just_allocated_object(control()) == javaoop, "just allocated");
3937 
3938 #ifdef ASSERT
3939   { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3940     assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc,
3941            "Ideal_allocation works");
3942     assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc,
3943            "Ideal_allocation works");
3944     if (alloc->is_AllocateArray()) {
3945       assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(),
3946              "Ideal_allocation works");
3947       assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(),
3948              "Ideal_allocation works");
3949     } else {
3950       assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3951     }
3952   }
3953 #endif //ASSERT
3954 
3955   return javaoop;
3956 }
3957 
3958 //---------------------------new_instance--------------------------------------
3959 // This routine takes a klass_node which may be constant (for a static type)
3960 // or may be non-constant (for reflective code).  It will work equally well
3961 // for either, and the graph will fold nicely if the optimizer later reduces
3962 // the type to a constant.
3963 // The optional arguments are for specialized use by intrinsics:
3964 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3965 //  - If 'return_size_val', report the the total object size to the caller.
3966 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3967 Node* GraphKit::new_instance(Node* klass_node,
3968                              Node* extra_slow_test,
3969                              Node* *return_size_val,
3970                              bool deoptimize_on_exception,
3971                              InlineTypeBaseNode* inline_type_node) {
3972   // Compute size in doublewords
3973   // The size is always an integral number of doublewords, represented
3974   // as a positive bytewise size stored in the klass's layout_helper.
3975   // The layout_helper also encodes (in a low bit) the need for a slow path.
3976   jint  layout_con = Klass::_lh_neutral_value;
3977   Node* layout_val = get_layout_helper(klass_node, layout_con);
3978   bool  layout_is_con = (layout_val == NULL);
3979 
3980   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
3981   // Generate the initial go-slow test.  It's either ALWAYS (return a
3982   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3983   // case) a computed value derived from the layout_helper.
3984   Node* initial_slow_test = NULL;
3985   if (layout_is_con) {
3986     assert(!StressReflectiveCode, "stress mode does not use these paths");
3987     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3988     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3989   } else {   // reflective case
3990     // This reflective path is used by Unsafe.allocateInstance.
3991     // (It may be stress-tested by specifying StressReflectiveCode.)
3992     // Basically, we want to get into the VM is there's an illegal argument.
3993     Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3994     initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3995     if (extra_slow_test != intcon(0)) {
3996       initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3997     }
3998     // (Macro-expander will further convert this to a Bool, if necessary.)
3999   }
4000 
4001   // Find the size in bytes.  This is easy; it's the layout_helper.
4002   // The size value must be valid even if the slow path is taken.
4003   Node* size = NULL;
4004   if (layout_is_con) {
4005     size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con));
4006   } else {   // reflective case
4007     // This reflective path is used by clone and Unsafe.allocateInstance.
4008     size = ConvI2X(layout_val);
4009 
4010     // Clear the low bits to extract layout_helper_size_in_bytes:
4011     assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4012     Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4013     size = _gvn.transform( new AndXNode(size, mask) );
4014   }
4015   if (return_size_val != NULL) {
4016     (*return_size_val) = size;
4017   }
4018 
4019   // This is a precise notnull oop of the klass.
4020   // (Actually, it need not be precise if this is a reflective allocation.)
4021   // It's what we cast the result to.
4022   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4023   if (!tklass)  tklass = TypeKlassPtr::OBJECT;
4024   const TypeOopPtr* oop_type = tklass->as_instance_type();
4025 
4026   // Now generate allocation code
4027 
4028   // The entire memory state is needed for slow path of the allocation
4029   // since GC and deoptimization can happen.
4030   Node *mem = reset_memory();
4031   set_all_memory(mem); // Create new memory state
4032 
4033   AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4034                                          control(), mem, i_o(),
4035                                          size, klass_node,
4036                                          initial_slow_test, inline_type_node);
4037 
4038   return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4039 }
4040 
4041 // With compressed oops, the 64 bit init value for non flattened value
4042 // arrays is built from 2 32 bit compressed oops
4043 static Node* raw_default_for_coops(Node* default_value, GraphKit& kit) {
4044   Node* lower = kit.gvn().transform(new CastP2XNode(kit.control(), default_value));
4045   Node* upper = kit.gvn().transform(new LShiftLNode(lower, kit.intcon(32)));
4046   return kit.gvn().transform(new OrLNode(lower, upper));
4047 }
4048 
4049 //-------------------------------new_array-------------------------------------
4050 // helper for newarray and anewarray
4051 // The 'length' parameter is (obviously) the length of the array.
4052 // See comments on new_instance for the meaning of the other arguments.
4053 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
4054                           Node* length,         // number of array elements
4055                           int   nargs,          // number of arguments to push back for uncommon trap
4056                           Node* *return_size_val,
4057                           bool deoptimize_on_exception) {
4058   jint  layout_con = Klass::_lh_neutral_value;
4059   Node* layout_val = get_layout_helper(klass_node, layout_con);
4060   bool  layout_is_con = (layout_val == NULL);
4061 
4062   if (!layout_is_con && !StressReflectiveCode &&
4063       !too_many_traps(Deoptimization::Reason_class_check)) {
4064     // This is a reflective array creation site.
4065     // Optimistically assume that it is a subtype of Object[],
4066     // so that we can fold up all the address arithmetic.
4067     layout_con = Klass::array_layout_helper(T_OBJECT);
4068     Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4069     Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4070     { BuildCutout unless(this, bol_lh, PROB_MAX);
4071       inc_sp(nargs);
4072       uncommon_trap(Deoptimization::Reason_class_check,
4073                     Deoptimization::Action_maybe_recompile);
4074     }
4075     layout_val = NULL;
4076     layout_is_con = true;
4077   }
4078 
4079   // Generate the initial go-slow test.  Make sure we do not overflow
4080   // if length is huge (near 2Gig) or negative!  We do not need
4081   // exact double-words here, just a close approximation of needed
4082   // double-words.  We can't add any offset or rounding bits, lest we
4083   // take a size -1 of bytes and make it positive.  Use an unsigned
4084   // compare, so negative sizes look hugely positive.
4085   int fast_size_limit = FastAllocateSizeLimit;
4086   if (layout_is_con) {
4087     assert(!StressReflectiveCode, "stress mode does not use these paths");
4088     // Increase the size limit if we have exact knowledge of array type.
4089     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4090     fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4091   }
4092 
4093   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4094   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4095 
4096   // --- Size Computation ---
4097   // array_size = round_to_heap(array_header + (length << elem_shift));
4098   // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4099   // and align_to(x, y) == ((x + y-1) & ~(y-1))
4100   // The rounding mask is strength-reduced, if possible.
4101   int round_mask = MinObjAlignmentInBytes - 1;
4102   Node* header_size = NULL;
4103   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4104   // (T_BYTE has the weakest alignment and size restrictions...)
4105   if (layout_is_con) {
4106     int       hsize  = Klass::layout_helper_header_size(layout_con);
4107     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
4108     bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4109     if ((round_mask & ~right_n_bits(eshift)) == 0)
4110       round_mask = 0;  // strength-reduce it if it goes away completely
4111     assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4112     assert(header_size_min <= hsize, "generic minimum is smallest");
4113     header_size_min = hsize;
4114     header_size = intcon(hsize + round_mask);
4115   } else {
4116     Node* hss   = intcon(Klass::_lh_header_size_shift);
4117     Node* hsm   = intcon(Klass::_lh_header_size_mask);
4118     Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
4119     hsize       = _gvn.transform( new AndINode(hsize, hsm) );
4120     Node* mask  = intcon(round_mask);
4121     header_size = _gvn.transform( new AddINode(hsize, mask) );
4122   }
4123 
4124   Node* elem_shift = NULL;
4125   if (layout_is_con) {
4126     int eshift = Klass::layout_helper_log2_element_size(layout_con);
4127     if (eshift != 0)
4128       elem_shift = intcon(eshift);
4129   } else {
4130     // There is no need to mask or shift this value.
4131     // The semantics of LShiftINode include an implicit mask to 0x1F.
4132     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4133     elem_shift = layout_val;
4134   }
4135 
4136   // Transition to native address size for all offset calculations:
4137   Node* lengthx = ConvI2X(length);
4138   Node* headerx = ConvI2X(header_size);
4139 #ifdef _LP64
4140   { const TypeInt* tilen = _gvn.find_int_type(length);
4141     if (tilen != NULL && tilen->_lo < 0) {
4142       // Add a manual constraint to a positive range.  Cf. array_element_address.
4143       jint size_max = fast_size_limit;
4144       if (size_max > tilen->_hi)  size_max = tilen->_hi;
4145       const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
4146 
4147       // Only do a narrow I2L conversion if the range check passed.
4148       IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
4149       _gvn.transform(iff);
4150       RegionNode* region = new RegionNode(3);
4151       _gvn.set_type(region, Type::CONTROL);
4152       lengthx = new PhiNode(region, TypeLong::LONG);
4153       _gvn.set_type(lengthx, TypeLong::LONG);
4154 
4155       // Range check passed. Use ConvI2L node with narrow type.
4156       Node* passed = IfFalse(iff);
4157       region->init_req(1, passed);
4158       // Make I2L conversion control dependent to prevent it from
4159       // floating above the range check during loop optimizations.
4160       lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
4161 
4162       // Range check failed. Use ConvI2L with wide type because length may be invalid.
4163       region->init_req(2, IfTrue(iff));
4164       lengthx->init_req(2, ConvI2X(length));
4165 
4166       set_control(region);
4167       record_for_igvn(region);
4168       record_for_igvn(lengthx);
4169     }
4170   }
4171 #endif
4172 
4173   // Combine header size (plus rounding) and body size.  Then round down.
4174   // This computation cannot overflow, because it is used only in two
4175   // places, one where the length is sharply limited, and the other
4176   // after a successful allocation.
4177   Node* abody = lengthx;
4178   if (elem_shift != NULL)
4179     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
4180   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
4181   if (round_mask != 0) {
4182     Node* mask = MakeConX(~round_mask);
4183     size       = _gvn.transform( new AndXNode(size, mask) );
4184   }
4185   // else if round_mask == 0, the size computation is self-rounding
4186 
4187   if (return_size_val != NULL) {
4188     // This is the size
4189     (*return_size_val) = size;
4190   }
4191 
4192   // Now generate allocation code
4193 
4194   // The entire memory state is needed for slow path of the allocation
4195   // since GC and deoptimization can happen.
4196   Node *mem = reset_memory();
4197   set_all_memory(mem); // Create new memory state
4198 
4199   if (initial_slow_test->is_Bool()) {
4200     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4201     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4202   }
4203 
4204   const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4205   const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4206   const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
4207 
4208   // Inline type array variants:
4209   // - null-ok:              MyValue.ref[] (ciObjArrayKlass "[LMyValue$ref")
4210   // - null-free:            MyValue.val[] (ciObjArrayKlass "[QMyValue$val")
4211   // - null-free, flattened: MyValue.val[] (ciFlatArrayKlass "[QMyValue$val")
4212   // Check if array is a null-free, non-flattened inline type array
4213   // that needs to be initialized with the default inline type.
4214   Node* default_value = NULL;
4215   Node* raw_default_value = NULL;
4216   if (ary_ptr != NULL && ary_ptr->klass_is_exact()) {
4217     // Array type is known
4218     ciKlass* elem_klass = ary_ptr->klass()->as_array_klass()->element_klass();
4219     if (elem_klass != NULL && elem_klass->is_inlinetype()) {
4220       ciInlineKlass* vk = elem_klass->as_inline_klass();
4221       if (!vk->flatten_array()) {
4222         default_value = InlineTypeNode::default_oop(gvn(), vk);
4223         if (UseCompressedOops) {
4224           default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4225           raw_default_value = raw_default_for_coops(default_value, *this);
4226         } else {
4227           raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4228         }
4229       }
4230     }
4231   } else if (ary_klass->klass()->can_be_inline_array_klass()) {
4232     // Array type is not known, add runtime checks
4233     assert(!ary_klass->klass_is_exact(), "unexpected exact type");
4234     Node* r = new RegionNode(4);
4235     default_value = new PhiNode(r, TypeInstPtr::BOTTOM);
4236 
4237     // Check if array is an object array
4238     Node* cmp = gen_lh_array_test(klass_node, Klass::_lh_array_tag_obj_value);
4239     Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
4240     IfNode* iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
4241 
4242     // Not an object array, initialize with all zero
4243     r->init_req(1, _gvn.transform(new IfFalseNode(iff)));
4244     default_value->init_req(1, null());
4245 
4246     // Object array, check if null-free
4247     set_control(_gvn.transform(new IfTrueNode(iff)));
4248     Node* lhp = basic_plus_adr(klass_node, in_bytes(Klass::layout_helper_offset()));
4249     Node* layout_val = _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), lhp, lhp->bottom_type()->is_ptr(), TypeInt::INT, T_INT, MemNode::unordered));
4250     Node* null_free = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_null_free_shift)));
4251     null_free = _gvn.transform(new AndINode(null_free, intcon(Klass::_lh_null_free_mask)));
4252     cmp = _gvn.transform(new CmpINode(null_free, intcon(0)));
4253     bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
4254     iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
4255 
4256     // Not null-free, initialize with all zero
4257     r->init_req(2, _gvn.transform(new IfFalseNode(iff)));
4258     default_value->init_req(2, null());
4259 
4260     // Null-free, non-flattened inline type array, initialize with the default value
4261     set_control(_gvn.transform(new IfTrueNode(iff)));
4262     Node* p = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset()));
4263     Node* eklass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), p, TypeInstPtr::KLASS));
4264     Node* adr_fixed_block_addr = basic_plus_adr(eklass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()));
4265     Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4266     Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(InlineKlass::default_value_offset_offset()));
4267     Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
4268     Node* elem_mirror = load_mirror_from_klass(eklass);
4269     Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset));
4270     Node* val = access_load_at(elem_mirror, default_value_addr, _gvn.type(default_value_addr)->is_ptr(), TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP);
4271     r->init_req(3, control());
4272     default_value->init_req(3, val);
4273 
4274     set_control(_gvn.transform(r));
4275     default_value = _gvn.transform(default_value);
4276     if (UseCompressedOops) {
4277       default_value = _gvn.transform(new EncodePNode(default_value, default_value->bottom_type()->make_narrowoop()));
4278       raw_default_value = raw_default_for_coops(default_value, *this);
4279     } else {
4280       raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
4281     }
4282   }
4283 
4284   // Create the AllocateArrayNode and its result projections
4285   AllocateArrayNode* alloc = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4286                                                    control(), mem, i_o(),
4287                                                    size, klass_node,
4288                                                    initial_slow_test,
4289                                                    length, default_value,
4290                                                    raw_default_value);
4291 
4292   // Cast to correct type.  Note that the klass_node may be constant or not,
4293   // and in the latter case the actual array type will be inexact also.
4294   // (This happens via a non-constant argument to inline_native_newArray.)
4295   // In any case, the value of klass_node provides the desired array type.
4296   const TypeInt* length_type = _gvn.find_int_type(length);
4297   if (ary_type->isa_aryptr() && length_type != NULL) {
4298     // Try to get a better type than POS for the size
4299     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4300   }
4301 
4302   Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4303 
4304   // Cast length on remaining path to be as narrow as possible
4305   if (map()->find_edge(length) >= 0) {
4306     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
4307     if (ccast != length) {
4308       _gvn.set_type_bottom(ccast);
4309       record_for_igvn(ccast);
4310       replace_in_map(length, ccast);
4311     }
4312   }
4313 
4314   return javaoop;
4315 }
4316 
4317 // The following "Ideal_foo" functions are placed here because they recognize
4318 // the graph shapes created by the functions immediately above.
4319 
4320 //---------------------------Ideal_allocation----------------------------------
4321 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
4322 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
4323   if (ptr == NULL) {     // reduce dumb test in callers
4324     return NULL;
4325   }
4326 
4327   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4328   ptr = bs->step_over_gc_barrier(ptr);
4329 
4330   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
4331     ptr = ptr->in(1);
4332     if (ptr == NULL) return NULL;
4333   }
4334   // Return NULL for allocations with several casts:
4335   //   j.l.reflect.Array.newInstance(jobject, jint)
4336   //   Object.clone()
4337   // to keep more precise type from last cast.
4338   if (ptr->is_Proj()) {
4339     Node* allo = ptr->in(0);
4340     if (allo != NULL && allo->is_Allocate()) {
4341       return allo->as_Allocate();
4342     }
4343   }
4344   // Report failure to match.
4345   return NULL;
4346 }
4347 
4348 // Fancy version which also strips off an offset (and reports it to caller).
4349 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase,
4350                                              intptr_t& offset) {
4351   Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset);
4352   if (base == NULL)  return NULL;
4353   return Ideal_allocation(base, phase);
4354 }
4355 
4356 // Trace Initialize <- Proj[Parm] <- Allocate
4357 AllocateNode* InitializeNode::allocation() {
4358   Node* rawoop = in(InitializeNode::RawAddress);
4359   if (rawoop->is_Proj()) {
4360     Node* alloc = rawoop->in(0);
4361     if (alloc->is_Allocate()) {
4362       return alloc->as_Allocate();
4363     }
4364   }
4365   return NULL;
4366 }
4367 
4368 // Trace Allocate -> Proj[Parm] -> Initialize
4369 InitializeNode* AllocateNode::initialization() {
4370   ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress);
4371   if (rawoop == NULL)  return NULL;
4372   for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) {
4373     Node* init = rawoop->fast_out(i);
4374     if (init->is_Initialize()) {
4375       assert(init->as_Initialize()->allocation() == this, "2-way link");
4376       return init->as_Initialize();
4377     }
4378   }
4379   return NULL;
4380 }
4381 
4382 //----------------------------- loop predicates ---------------------------
4383 
4384 //------------------------------add_predicate_impl----------------------------
4385 void GraphKit::add_empty_predicate_impl(Deoptimization::DeoptReason reason, int nargs) {
4386   // Too many traps seen?
4387   if (too_many_traps(reason)) {
4388 #ifdef ASSERT
4389     if (TraceLoopPredicate) {
4390       int tc = C->trap_count(reason);
4391       tty->print("too many traps=%s tcount=%d in ",
4392                     Deoptimization::trap_reason_name(reason), tc);
4393       method()->print(); // which method has too many predicate traps
4394       tty->cr();
4395     }
4396 #endif
4397     // We cannot afford to take more traps here,
4398     // do not generate predicate.
4399     return;
4400   }
4401 
4402   Node *cont    = _gvn.intcon(1);
4403   Node* opq     = _gvn.transform(new Opaque1Node(C, cont));
4404   Node *bol     = _gvn.transform(new Conv2BNode(opq));
4405   IfNode* iff   = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
4406   Node* iffalse = _gvn.transform(new IfFalseNode(iff));
4407   C->add_predicate_opaq(opq);
4408   {
4409     PreserveJVMState pjvms(this);
4410     set_control(iffalse);
4411     inc_sp(nargs);
4412     uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
4413   }
4414   Node* iftrue = _gvn.transform(new IfTrueNode(iff));
4415   set_control(iftrue);
4416 }
4417 
4418 //------------------------------add_predicate---------------------------------
4419 void GraphKit::add_empty_predicates(int nargs) {
4420   // These loop predicates remain empty. All concrete loop predicates are inserted above the corresponding
4421   // empty loop predicate later by 'PhaseIdealLoop::create_new_if_for_predicate'. All concrete loop predicates of
4422   // a specific kind (normal, profile or limit check) share the same uncommon trap as the empty loop predicate.
4423   if (UseLoopPredicate) {
4424     add_empty_predicate_impl(Deoptimization::Reason_predicate, nargs);
4425   }
4426   if (UseProfiledLoopPredicate) {
4427     add_empty_predicate_impl(Deoptimization::Reason_profile_predicate, nargs);
4428   }
4429   // loop's limit check predicate should be near the loop.
4430   add_empty_predicate_impl(Deoptimization::Reason_loop_limit_check, nargs);
4431 }
4432 
4433 void GraphKit::sync_kit(IdealKit& ideal) {
4434   set_all_memory(ideal.merged_memory());
4435   set_i_o(ideal.i_o());
4436   set_control(ideal.ctrl());
4437 }
4438 
4439 void GraphKit::final_sync(IdealKit& ideal) {
4440   // Final sync IdealKit and graphKit.
4441   sync_kit(ideal);
4442 }
4443 
4444 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4445   Node* len = load_array_length(load_String_value(str, set_ctrl));
4446   Node* coder = load_String_coder(str, set_ctrl);
4447   // Divide length by 2 if coder is UTF16
4448   return _gvn.transform(new RShiftINode(len, coder));
4449 }
4450 
4451 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4452   int value_offset = java_lang_String::value_offset();
4453   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4454                                                      false, NULL, Type::Offset(0));
4455   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4456   const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4457                                                   TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, true, true),
4458                                                   ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4459   Node* p = basic_plus_adr(str, str, value_offset);
4460   Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4461                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4462   return load;
4463 }
4464 
4465 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4466   if (!CompactStrings) {
4467     return intcon(java_lang_String::CODER_UTF16);
4468   }
4469   int coder_offset = java_lang_String::coder_offset();
4470   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4471                                                      false, NULL, Type::Offset(0));
4472   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4473 
4474   Node* p = basic_plus_adr(str, str, coder_offset);
4475   Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4476                               IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4477   return load;
4478 }
4479 
4480 void GraphKit::store_String_value(Node* str, Node* value) {
4481   int value_offset = java_lang_String::value_offset();
4482   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4483                                                      false, NULL, Type::Offset(0));
4484   const TypePtr* value_field_type = string_type->add_offset(value_offset);
4485 
4486   access_store_at(str,  basic_plus_adr(str, value_offset), value_field_type,
4487                   value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4488 }
4489 
4490 void GraphKit::store_String_coder(Node* str, Node* value) {
4491   int coder_offset = java_lang_String::coder_offset();
4492   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4493                                                      false, NULL, Type::Offset(0));
4494   const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4495 
4496   access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4497                   value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4498 }
4499 
4500 // Capture src and dst memory state with a MergeMemNode
4501 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4502   if (src_type == dst_type) {
4503     // Types are equal, we don't need a MergeMemNode
4504     return memory(src_type);
4505   }
4506   MergeMemNode* merge = MergeMemNode::make(map()->memory());
4507   record_for_igvn(merge); // fold it up later, if possible
4508   int src_idx = C->get_alias_index(src_type);
4509   int dst_idx = C->get_alias_index(dst_type);
4510   merge->set_memory_at(src_idx, memory(src_idx));
4511   merge->set_memory_at(dst_idx, memory(dst_idx));
4512   return merge;
4513 }
4514 
4515 Node* GraphKit::compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count) {
4516   assert(Matcher::match_rule_supported(Op_StrCompressedCopy), "Intrinsic not supported");
4517   assert(src_type == TypeAryPtr::BYTES || src_type == TypeAryPtr::CHARS, "invalid source type");
4518   // If input and output memory types differ, capture both states to preserve
4519   // the dependency between preceding and subsequent loads/stores.
4520   // For example, the following program:
4521   //  StoreB
4522   //  compress_string
4523   //  LoadB
4524   // has this memory graph (use->def):
4525   //  LoadB -> compress_string -> CharMem
4526   //             ... -> StoreB -> ByteMem
4527   // The intrinsic hides the dependency between LoadB and StoreB, causing
4528   // the load to read from memory not containing the result of the StoreB.
4529   // The correct memory graph should look like this:
4530   //  LoadB -> compress_string -> MergeMem(CharMem, StoreB(ByteMem))
4531   Node* mem = capture_memory(src_type, TypeAryPtr::BYTES);
4532   StrCompressedCopyNode* str = new StrCompressedCopyNode(control(), mem, src, dst, count);
4533   Node* res_mem = _gvn.transform(new SCMemProjNode(str));
4534   set_memory(res_mem, TypeAryPtr::BYTES);
4535   return str;
4536 }
4537 
4538 void GraphKit::inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count) {
4539   assert(Matcher::match_rule_supported(Op_StrInflatedCopy), "Intrinsic not supported");
4540   assert(dst_type == TypeAryPtr::BYTES || dst_type == TypeAryPtr::CHARS, "invalid dest type");
4541   // Capture src and dst memory (see comment in 'compress_string').
4542   Node* mem = capture_memory(TypeAryPtr::BYTES, dst_type);
4543   StrInflatedCopyNode* str = new StrInflatedCopyNode(control(), mem, src, dst, count);
4544   set_memory(_gvn.transform(str), dst_type);
4545 }
4546 
4547 void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* count) {
4548   /**
4549    * int i_char = start;
4550    * for (int i_byte = 0; i_byte < count; i_byte++) {
4551    *   dst[i_char++] = (char)(src[i_byte] & 0xff);
4552    * }
4553    */
4554   add_empty_predicates();
4555   RegionNode* head = new RegionNode(3);
4556   head->init_req(1, control());
4557   gvn().set_type(head, Type::CONTROL);
4558   record_for_igvn(head);
4559 
4560   Node* i_byte = new PhiNode(head, TypeInt::INT);
4561   i_byte->init_req(1, intcon(0));
4562   gvn().set_type(i_byte, TypeInt::INT);
4563   record_for_igvn(i_byte);
4564 
4565   Node* i_char = new PhiNode(head, TypeInt::INT);
4566   i_char->init_req(1, start);
4567   gvn().set_type(i_char, TypeInt::INT);
4568   record_for_igvn(i_char);
4569 
4570   Node* mem = PhiNode::make(head, memory(TypeAryPtr::BYTES), Type::MEMORY, TypeAryPtr::BYTES);
4571   gvn().set_type(mem, Type::MEMORY);
4572   record_for_igvn(mem);
4573   set_control(head);
4574   set_memory(mem, TypeAryPtr::BYTES);
4575   Node* ch = load_array_element(control(), src, i_byte, TypeAryPtr::BYTES);
4576   Node* st = store_to_memory(control(), array_element_address(dst, i_char, T_BYTE),
4577                              AndI(ch, intcon(0xff)), T_CHAR, TypeAryPtr::BYTES, MemNode::unordered,
4578                              false, false, true /* mismatched */);
4579 
4580   IfNode* iff = create_and_map_if(head, Bool(CmpI(i_byte, count), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);
4581   head->init_req(2, IfTrue(iff));
4582   mem->init_req(2, st);
4583   i_byte->init_req(2, AddI(i_byte, intcon(1)));
4584   i_char->init_req(2, AddI(i_char, intcon(2)));
4585 
4586   set_control(IfFalse(iff));
4587   set_memory(st, TypeAryPtr::BYTES);
4588 }
4589 
4590 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4591   if (!field->is_constant()) {
4592     return NULL; // Field not marked as constant.
4593   }
4594   ciInstance* holder = NULL;
4595   if (!field->is_static()) {
4596     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4597     if (const_oop != NULL && const_oop->is_instance()) {
4598       holder = const_oop->as_instance();
4599     }
4600   }
4601   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4602                                                         /*is_unsigned_load=*/false);
4603   if (con_type != NULL) {
4604     Node* con = makecon(con_type);
4605     assert(!field->type()->is_inlinetype() || (field->is_static() && !con_type->is_zero_type()), "sanity");
4606     // Check type of constant which might be more precise
4607     if (con_type->is_inlinetypeptr() && con_type->inline_klass()->is_scalarizable()) {
4608       // Load inline type from constant oop
4609       con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass());
4610     }
4611     return con;
4612   }
4613   return NULL;
4614 }
4615 
4616 //---------------------------load_mirror_from_klass----------------------------
4617 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4618 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4619   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4620   Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4621   // mirror = ((OopHandle)mirror)->resolve();
4622   return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4623 }