17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/loopnode.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/movenode.hpp"
40 #include "opto/narrowptrnode.hpp"
41 #include "opto/mulnode.hpp"
42 #include "opto/phaseX.hpp"
43 #include "opto/regmask.hpp"
44 #include "opto/runtime.hpp"
45 #include "opto/subnode.hpp"
46 #include "utilities/vmError.hpp"
47
48 // Portions of code courtesy of Clifford Click
49
50 // Optimization - Graph Style
51
52 //=============================================================================
53 //------------------------------Value------------------------------------------
54 // Compute the type of the RegionNode.
55 const Type* RegionNode::Value(PhaseGVN* phase) const {
56 for( uint i=1; i<req(); ++i ) { // For all paths in
354 nstack.push(n);
355 visited.set(n->_idx);
356 while (nstack.size() != 0) {
357 n = nstack.pop();
358 uint max = n->outcnt();
359 for (uint i = 0; i < max; i++) {
360 Node* m = n->raw_out(i);
361 if (m != NULL && m->is_CFG()) {
362 if (phase->eqv(m, this)) {
363 return false; // We reached the Region node - it is not dead.
364 }
365 if (!visited.test_set(m->_idx))
366 nstack.push(m);
367 }
368 }
369 }
370
371 return true; // The Region node is unreachable - it is dead.
372 }
373
374 bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) {
375 // Incremental inlining + PhaseStringOpts sometimes produce:
376 //
377 // cmpP with 1 top input
378 // |
379 // If
380 // / \
381 // IfFalse IfTrue /- Some Node
382 // \ / / /
383 // Region / /-MergeMem
384 // \---Phi
385 //
386 //
387 // It's expected by PhaseStringOpts that the Region goes away and is
388 // replaced by If's control input but because there's still a Phi,
389 // the Region stays in the graph. The top input from the cmpP is
390 // propagated forward and a subgraph that is useful goes away. The
391 // code below replaces the Phi with the MergeMem so that the Region
392 // is simplified.
393
394 PhiNode* phi = has_unique_phi();
395 if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) {
396 MergeMemNode* m = NULL;
397 assert(phi->req() == 3, "same as region");
398 for (uint i = 1; i < 3; ++i) {
399 Node *mem = phi->in(i);
400 if (mem && mem->is_MergeMem() && in(i)->outcnt() == 1) {
401 // Nothing is control-dependent on path #i except the region itself.
402 m = mem->as_MergeMem();
403 uint j = 3 - i;
404 Node* other = phi->in(j);
405 if (other && other == m->base_memory()) {
406 // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
407 // This will allow the diamond to collapse completely.
408 phase->is_IterGVN()->replace_node(phi, m);
409 return true;
410 }
411 }
412 }
413 }
414 return false;
415 }
416
417 //------------------------------Ideal------------------------------------------
418 // Return a node which is more "ideal" than the current node. Must preserve
419 // the CFG, but we can still strip out dead paths.
420 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
421 if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy
422 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
423
424 // Check for RegionNode with no Phi users and both inputs come from either
425 // arm of the same IF. If found, then the control-flow split is useless.
426 bool has_phis = false;
427 if (can_reshape) { // Need DU info to check for Phi users
428 has_phis = (has_phi() != NULL); // Cache result
429 if (has_phis && try_clean_mem_phi(phase)) {
430 has_phis = false;
431 }
432
433 if (!has_phis) { // No Phi users? Nothing merging?
434 for (uint i = 1; i < req()-1; i++) {
435 Node *if1 = in(i);
436 if( !if1 ) continue;
437 Node *iff = if1->in(0);
438 if( !iff || !iff->is_If() ) continue;
439 for( uint j=i+1; j<req(); j++ ) {
440 if( in(j) && in(j)->in(0) == iff &&
441 if1->Opcode() != in(j)->Opcode() ) {
442 // Add the IF Projections to the worklist. They (and the IF itself)
443 // will be eliminated if dead.
444 phase->is_IterGVN()->add_users_to_worklist(iff);
445 set_req(i, iff->in(0));// Skip around the useless IF diamond
446 set_req(j, NULL);
447 return this; // Record progress
448 }
449 }
450 }
878
879 //=============================================================================
880 // note that these functions assume that the _adr_type field is flattened
881 uint PhiNode::hash() const {
882 const Type* at = _adr_type;
883 return TypeNode::hash() + (at ? at->hash() : 0);
884 }
885 bool PhiNode::cmp( const Node &n ) const {
886 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
887 }
888 static inline
889 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
890 if (at == NULL || at == TypePtr::BOTTOM) return at;
891 return Compile::current()->alias_type(at)->adr_type();
892 }
893
894 //----------------------------make---------------------------------------------
895 // create a new phi with edges matching r and set (initially) to x
896 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
897 uint preds = r->req(); // Number of predecessor paths
898 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at");
899 PhiNode* p = new PhiNode(r, t, at);
900 for (uint j = 1; j < preds; j++) {
901 // Fill in all inputs, except those which the region does not yet have
902 if (r->in(j) != NULL)
903 p->init_req(j, x);
904 }
905 return p;
906 }
907 PhiNode* PhiNode::make(Node* r, Node* x) {
908 const Type* t = x->bottom_type();
909 const TypePtr* at = NULL;
910 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
911 return make(r, x, t, at);
912 }
913 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
914 const Type* t = x->bottom_type();
915 const TypePtr* at = NULL;
916 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
917 return new PhiNode(r, t, at);
918 }
1088 }
1089 }
1090 } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1091 in(LoopNode::EntryControl) != NULL &&
1092 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1093 // During CCP, if we saturate the type of a counted loop's Phi
1094 // before the special code for counted loop above has a chance
1095 // to run (that is as long as the type of the backedge's control
1096 // is top), we might end up with non monotonic types
1097 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1098 }
1099 }
1100
1101 // Until we have harmony between classes and interfaces in the type
1102 // lattice, we must tread carefully around phis which implicitly
1103 // convert the one to the other.
1104 const TypePtr* ttp = _type->make_ptr();
1105 const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1106 const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL;
1107 bool is_intf = false;
1108 if (ttip != NULL) {
1109 ciKlass* k = ttip->klass();
1110 if (k->is_loaded() && k->is_interface())
1111 is_intf = true;
1112 }
1113 if (ttkp != NULL) {
1114 ciKlass* k = ttkp->klass();
1115 if (k->is_loaded() && k->is_interface())
1116 is_intf = true;
1117 }
1118
1119 // Default case: merge all inputs
1120 const Type *t = Type::TOP; // Merged type starting value
1121 for (uint i = 1; i < req(); ++i) {// For all paths in
1122 // Reachable control path?
1123 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1124 const Type* ti = phase->type(in(i));
1125 // We assume that each input of an interface-valued Phi is a true
1126 // subtype of that interface. This might not be true of the meet
1127 // of all the input types. The lattice is not distributive in
1128 // such cases. Ward off asserts in type.cpp by refusing to do
1129 // meets between interfaces and proper classes.
1130 const TypePtr* tip = ti->make_ptr();
1131 const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1132 if (tiip) {
1133 bool ti_is_intf = false;
1134 ciKlass* k = tiip->klass();
1135 if (k->is_loaded() && k->is_interface())
1136 ti_is_intf = true;
1153 // (Occurrences of this case suggest improvements to Value methods.)
1154 //
1155 // It is not possible to see Type::BOTTOM values as phi inputs,
1156 // because the ciTypeFlow pre-pass produces verifier-quality types.
1157 const Type* ft = t->filter_speculative(_type); // Worst case type
1158
1159 #ifdef ASSERT
1160 // The following logic has been moved into TypeOopPtr::filter.
1161 const Type* jt = t->join_speculative(_type);
1162 if (jt->empty()) { // Emptied out???
1163
1164 // Check for evil case of 't' being a class and '_type' expecting an
1165 // interface. This can happen because the bytecodes do not contain
1166 // enough type info to distinguish a Java-level interface variable
1167 // from a Java-level object variable. If we meet 2 classes which
1168 // both implement interface I, but their meet is at 'j/l/O' which
1169 // doesn't implement I, we have no way to tell if the result should
1170 // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows
1171 // into a Phi which "knows" it's an Interface type we'll have to
1172 // uplift the type.
1173 if (!t->empty() && ttip && ttip->is_loaded() && ttip->klass()->is_interface()) {
1174 assert(ft == _type, ""); // Uplift to interface
1175 } else if (!t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1176 assert(ft == _type, ""); // Uplift to interface
1177 } else {
1178 // We also have to handle 'evil cases' of interface- vs. class-arrays
1179 Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1180 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1181 assert(ft == _type, ""); // Uplift to array of interface
1182 } else {
1183 // Otherwise it's something stupid like non-overlapping int ranges
1184 // found on dying counted loops.
1185 assert(ft == Type::TOP, ""); // Canonical empty value
1186 }
1187 }
1188 }
1189
1190 else {
1191
1192 // If we have an interface-typed Phi and we narrow to a class type, the join
1193 // should report back the class. However, if we have a J/L/Object
1194 // class-typed Phi and an interface flows in, it's possible that the meet &
1195 // join report an interface back out. This isn't possible but happens
1317
1318 //------------------------------Identity---------------------------------------
1319 // Check for Region being Identity.
1320 Node* PhiNode::Identity(PhaseGVN* phase) {
1321 // Check for no merging going on
1322 // (There used to be special-case code here when this->region->is_Loop.
1323 // It would check for a tributary phi on the backedge that the main phi
1324 // trivially, perhaps with a single cast. The unique_input method
1325 // does all this and more, by reducing such tributaries to 'this'.)
1326 Node* uin = unique_input(phase, false);
1327 if (uin != NULL) {
1328 return uin;
1329 }
1330
1331 int true_path = is_diamond_phi();
1332 if (true_path != 0) {
1333 Node* id = is_cmove_id(phase, true_path);
1334 if (id != NULL) return id;
1335 }
1336
1337 // Looking for phis with identical inputs. If we find one that has
1338 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1339 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1340 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1341 uint phi_len = req();
1342 Node* phi_reg = region();
1343 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1344 Node* u = phi_reg->fast_out(i);
1345 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1346 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1347 u->req() == phi_len) {
1348 for (uint j = 1; j < phi_len; j++) {
1349 if (in(j) != u->in(j)) {
1350 u = NULL;
1351 break;
1352 }
1353 }
1354 if (u != NULL) {
1355 return u;
1356 }
1863 }
1864 return delay;
1865 }
1866
1867 //------------------------------Ideal------------------------------------------
1868 // Return a node which is more "ideal" than the current node. Must preserve
1869 // the CFG, but we can still strip out dead paths.
1870 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1871 // The next should never happen after 6297035 fix.
1872 if( is_copy() ) // Already degraded to a Copy ?
1873 return NULL; // No change
1874
1875 Node *r = in(0); // RegionNode
1876 assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1877
1878 // Note: During parsing, phis are often transformed before their regions.
1879 // This means we have to use type_or_null to defend against untyped regions.
1880 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1881 return NULL; // No change
1882
1883 Node *top = phase->C->top();
1884 bool new_phi = (outcnt() == 0); // transforming new Phi
1885 // No change for igvn if new phi is not hooked
1886 if (new_phi && can_reshape)
1887 return NULL;
1888
1889 // The are 2 situations when only one valid phi's input is left
1890 // (in addition to Region input).
1891 // One: region is not loop - replace phi with this input.
1892 // Two: region is loop - replace phi with top since this data path is dead
1893 // and we need to break the dead data loop.
1894 Node* progress = NULL; // Record if any progress made
1895 for( uint j = 1; j < req(); ++j ){ // For all paths in
1896 // Check unreachable control paths
1897 Node* rc = r->in(j);
1898 Node* n = in(j); // Get the input
1899 if (rc == NULL || phase->type(rc) == Type::TOP) {
1900 if (n != top) { // Not already top?
1901 PhaseIterGVN *igvn = phase->is_IterGVN();
1902 if (can_reshape && igvn != NULL) {
2161 for (uint i = 1; i < req(); i++) {
2162 offset->init_req(i, in(i)->in(AddPNode::Offset));
2163 }
2164 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2165 }
2166 return new AddPNode(base, address, offset);
2167 }
2168 }
2169 }
2170
2171 // Split phis through memory merges, so that the memory merges will go away.
2172 // Piggy-back this transformation on the search for a unique input....
2173 // It will be as if the merged memory is the unique value of the phi.
2174 // (Do not attempt this optimization unless parsing is complete.
2175 // It would make the parser's memory-merge logic sick.)
2176 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2177 if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2178 // see if this phi should be sliced
2179 uint merge_width = 0;
2180 bool saw_self = false;
2181 for( uint i=1; i<req(); ++i ) {// For all paths in
2182 Node *ii = in(i);
2183 // TOP inputs should not be counted as safe inputs because if the
2184 // Phi references itself through all other inputs then splitting the
2185 // Phi through memory merges would create dead loop at later stage.
2186 if (ii == top) {
2187 return NULL; // Delay optimization until graph is cleaned.
2188 }
2189 if (ii->is_MergeMem()) {
2190 MergeMemNode* n = ii->as_MergeMem();
2191 merge_width = MAX2(merge_width, n->req());
2192 saw_self = saw_self || phase->eqv(n->base_memory(), this);
2193 }
2194 }
2195
2196 // This restriction is temporarily necessary to ensure termination:
2197 if (!saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2198
2199 if (merge_width > Compile::AliasIdxRaw) {
2200 // found at least one non-empty MergeMem
2201 const TypePtr* at = adr_type();
2202 if (at != TypePtr::BOTTOM) {
2203 // Patch the existing phi to select an input from the merge:
2204 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2205 // Phi:AT1(...m1...)
2206 int alias_idx = phase->C->get_alias_index(at);
2207 for (uint i=1; i<req(); ++i) {
2208 Node *ii = in(i);
2209 if (ii->is_MergeMem()) {
2210 MergeMemNode* n = ii->as_MergeMem();
2211 // compress paths and change unreachable cycles to TOP
2212 // If not, we can update the input infinitely along a MergeMem cycle
2213 // Equivalent code is in MemNode::Ideal_common
2214 Node *m = phase->transform(n);
2215 if (outcnt() == 0) { // Above transform() may kill us!
2216 return top;
2217 }
2606 return in(0)->in(0);
2607 }
2608
2609
2610 #ifndef PRODUCT
2611 void CatchProjNode::dump_spec(outputStream *st) const {
2612 ProjNode::dump_spec(st);
2613 st->print("@bci %d ",_handler_bci);
2614 }
2615 #endif
2616
2617 //=============================================================================
2618 //------------------------------Identity---------------------------------------
2619 // Check for CreateEx being Identity.
2620 Node* CreateExNode::Identity(PhaseGVN* phase) {
2621 if( phase->type(in(1)) == Type::TOP ) return in(1);
2622 if( phase->type(in(0)) == Type::TOP ) return in(0);
2623 // We only come from CatchProj, unless the CatchProj goes away.
2624 // If the CatchProj is optimized away, then we just carry the
2625 // exception oop through.
2626 CallNode *call = in(1)->in(0)->as_Call();
2627
2628 return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2629 ? this
2630 : call->in(TypeFunc::Parms);
2631 }
2632
2633 //=============================================================================
2634 //------------------------------Value------------------------------------------
2635 // Check for being unreachable.
2636 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2637 if (!in(0) || in(0)->is_top()) return Type::TOP;
2638 return bottom_type();
2639 }
2640
2641 //------------------------------Ideal------------------------------------------
2642 // Check for no longer being part of a loop
2643 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2644 if (can_reshape && !in(0)->is_Loop()) {
2645 // Dead code elimination can sometimes delete this projection so
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/c2/barrierSetC2.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/castnode.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/inlinetypenode.hpp"
38 #include "opto/loopnode.hpp"
39 #include "opto/machnode.hpp"
40 #include "opto/movenode.hpp"
41 #include "opto/narrowptrnode.hpp"
42 #include "opto/mulnode.hpp"
43 #include "opto/phaseX.hpp"
44 #include "opto/regmask.hpp"
45 #include "opto/runtime.hpp"
46 #include "opto/subnode.hpp"
47 #include "utilities/vmError.hpp"
48
49 // Portions of code courtesy of Clifford Click
50
51 // Optimization - Graph Style
52
53 //=============================================================================
54 //------------------------------Value------------------------------------------
55 // Compute the type of the RegionNode.
56 const Type* RegionNode::Value(PhaseGVN* phase) const {
57 for( uint i=1; i<req(); ++i ) { // For all paths in
355 nstack.push(n);
356 visited.set(n->_idx);
357 while (nstack.size() != 0) {
358 n = nstack.pop();
359 uint max = n->outcnt();
360 for (uint i = 0; i < max; i++) {
361 Node* m = n->raw_out(i);
362 if (m != NULL && m->is_CFG()) {
363 if (phase->eqv(m, this)) {
364 return false; // We reached the Region node - it is not dead.
365 }
366 if (!visited.test_set(m->_idx))
367 nstack.push(m);
368 }
369 }
370 }
371
372 return true; // The Region node is unreachable - it is dead.
373 }
374
375 Node* PhiNode::try_clean_mem_phi(PhaseGVN *phase) {
376 // Incremental inlining + PhaseStringOpts sometimes produce:
377 //
378 // cmpP with 1 top input
379 // |
380 // If
381 // / \
382 // IfFalse IfTrue /- Some Node
383 // \ / / /
384 // Region / /-MergeMem
385 // \---Phi
386 //
387 //
388 // It's expected by PhaseStringOpts that the Region goes away and is
389 // replaced by If's control input but because there's still a Phi,
390 // the Region stays in the graph. The top input from the cmpP is
391 // propagated forward and a subgraph that is useful goes away. The
392 // code below replaces the Phi with the MergeMem so that the Region
393 // is simplified.
394
395 if (type() == Type::MEMORY && is_diamond_phi(true)) {
396 MergeMemNode* m = NULL;
397 assert(req() == 3, "same as region");
398 Node* r = in(0);
399 for (uint i = 1; i < 3; ++i) {
400 Node *mem = in(i);
401 if (mem && mem->is_MergeMem() && r->in(i)->outcnt() == 1) {
402 // Nothing is control-dependent on path #i except the region itself.
403 m = mem->as_MergeMem();
404 uint j = 3 - i;
405 Node* other = in(j);
406 if (other && other == m->base_memory()) {
407 // m is a successor memory to other, and is not pinned inside the diamond, so push it out.
408 // This will allow the diamond to collapse completely.
409 return m;
410 }
411 }
412 }
413 }
414 return NULL;
415 }
416
417 //------------------------------Ideal------------------------------------------
418 // Return a node which is more "ideal" than the current node. Must preserve
419 // the CFG, but we can still strip out dead paths.
420 Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
421 if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy
422 assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge");
423
424 // Check for RegionNode with no Phi users and both inputs come from either
425 // arm of the same IF. If found, then the control-flow split is useless.
426 bool has_phis = false;
427 if (can_reshape) { // Need DU info to check for Phi users
428 has_phis = (has_phi() != NULL); // Cache result
429 if (has_phis) {
430 PhiNode* phi = has_unique_phi();
431 if (phi != NULL) {
432 Node* m = phi->try_clean_mem_phi(phase);
433 if (m != NULL) {
434 phase->is_IterGVN()->replace_node(phi, m);
435 has_phis = false;
436 }
437 }
438 }
439
440 if (!has_phis) { // No Phi users? Nothing merging?
441 for (uint i = 1; i < req()-1; i++) {
442 Node *if1 = in(i);
443 if( !if1 ) continue;
444 Node *iff = if1->in(0);
445 if( !iff || !iff->is_If() ) continue;
446 for( uint j=i+1; j<req(); j++ ) {
447 if( in(j) && in(j)->in(0) == iff &&
448 if1->Opcode() != in(j)->Opcode() ) {
449 // Add the IF Projections to the worklist. They (and the IF itself)
450 // will be eliminated if dead.
451 phase->is_IterGVN()->add_users_to_worklist(iff);
452 set_req(i, iff->in(0));// Skip around the useless IF diamond
453 set_req(j, NULL);
454 return this; // Record progress
455 }
456 }
457 }
885
886 //=============================================================================
887 // note that these functions assume that the _adr_type field is flattened
888 uint PhiNode::hash() const {
889 const Type* at = _adr_type;
890 return TypeNode::hash() + (at ? at->hash() : 0);
891 }
892 bool PhiNode::cmp( const Node &n ) const {
893 return TypeNode::cmp(n) && _adr_type == ((PhiNode&)n)._adr_type;
894 }
895 static inline
896 const TypePtr* flatten_phi_adr_type(const TypePtr* at) {
897 if (at == NULL || at == TypePtr::BOTTOM) return at;
898 return Compile::current()->alias_type(at)->adr_type();
899 }
900
901 //----------------------------make---------------------------------------------
902 // create a new phi with edges matching r and set (initially) to x
903 PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) {
904 uint preds = r->req(); // Number of predecessor paths
905 assert(t != Type::MEMORY || at == flatten_phi_adr_type(at) || (flatten_phi_adr_type(at) == TypeAryPtr::INLINES && Compile::current()->flattened_accesses_share_alias()), "flatten at");
906 PhiNode* p = new PhiNode(r, t, at);
907 for (uint j = 1; j < preds; j++) {
908 // Fill in all inputs, except those which the region does not yet have
909 if (r->in(j) != NULL)
910 p->init_req(j, x);
911 }
912 return p;
913 }
914 PhiNode* PhiNode::make(Node* r, Node* x) {
915 const Type* t = x->bottom_type();
916 const TypePtr* at = NULL;
917 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
918 return make(r, x, t, at);
919 }
920 PhiNode* PhiNode::make_blank(Node* r, Node* x) {
921 const Type* t = x->bottom_type();
922 const TypePtr* at = NULL;
923 if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type());
924 return new PhiNode(r, t, at);
925 }
1095 }
1096 }
1097 } else if (l->in(LoopNode::LoopBackControl) != NULL &&
1098 in(LoopNode::EntryControl) != NULL &&
1099 phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) {
1100 // During CCP, if we saturate the type of a counted loop's Phi
1101 // before the special code for counted loop above has a chance
1102 // to run (that is as long as the type of the backedge's control
1103 // is top), we might end up with non monotonic types
1104 return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
1105 }
1106 }
1107
1108 // Until we have harmony between classes and interfaces in the type
1109 // lattice, we must tread carefully around phis which implicitly
1110 // convert the one to the other.
1111 const TypePtr* ttp = _type->make_ptr();
1112 const TypeInstPtr* ttip = (ttp != NULL) ? ttp->isa_instptr() : NULL;
1113 const TypeKlassPtr* ttkp = (ttp != NULL) ? ttp->isa_klassptr() : NULL;
1114 bool is_intf = false;
1115 if (ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1116 is_intf = true;
1117 } else if (ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1118 is_intf = true;
1119 }
1120
1121 // Default case: merge all inputs
1122 const Type *t = Type::TOP; // Merged type starting value
1123 for (uint i = 1; i < req(); ++i) {// For all paths in
1124 // Reachable control path?
1125 if (r->in(i) && phase->type(r->in(i)) == Type::CONTROL) {
1126 const Type* ti = phase->type(in(i));
1127 // We assume that each input of an interface-valued Phi is a true
1128 // subtype of that interface. This might not be true of the meet
1129 // of all the input types. The lattice is not distributive in
1130 // such cases. Ward off asserts in type.cpp by refusing to do
1131 // meets between interfaces and proper classes.
1132 const TypePtr* tip = ti->make_ptr();
1133 const TypeInstPtr* tiip = (tip != NULL) ? tip->isa_instptr() : NULL;
1134 if (tiip) {
1135 bool ti_is_intf = false;
1136 ciKlass* k = tiip->klass();
1137 if (k->is_loaded() && k->is_interface())
1138 ti_is_intf = true;
1155 // (Occurrences of this case suggest improvements to Value methods.)
1156 //
1157 // It is not possible to see Type::BOTTOM values as phi inputs,
1158 // because the ciTypeFlow pre-pass produces verifier-quality types.
1159 const Type* ft = t->filter_speculative(_type); // Worst case type
1160
1161 #ifdef ASSERT
1162 // The following logic has been moved into TypeOopPtr::filter.
1163 const Type* jt = t->join_speculative(_type);
1164 if (jt->empty()) { // Emptied out???
1165
1166 // Check for evil case of 't' being a class and '_type' expecting an
1167 // interface. This can happen because the bytecodes do not contain
1168 // enough type info to distinguish a Java-level interface variable
1169 // from a Java-level object variable. If we meet 2 classes which
1170 // both implement interface I, but their meet is at 'j/l/O' which
1171 // doesn't implement I, we have no way to tell if the result should
1172 // be 'I' or 'j/l/O'. Thus we'll pick 'j/l/O'. If this then flows
1173 // into a Phi which "knows" it's an Interface type we'll have to
1174 // uplift the type.
1175 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1176 assert(ft == _type, ""); // Uplift to interface
1177 } else if (!t->empty() && ttkp != NULL && ttkp->is_loaded() && ttkp->klass()->is_interface()) {
1178 assert(ft == _type, ""); // Uplift to interface
1179 } else {
1180 // We also have to handle 'evil cases' of interface- vs. class-arrays
1181 Type::get_arrays_base_elements(jt, _type, NULL, &ttip);
1182 if (!t->empty() && ttip != NULL && ttip->is_loaded() && ttip->klass()->is_interface()) {
1183 assert(ft == _type, ""); // Uplift to array of interface
1184 } else {
1185 // Otherwise it's something stupid like non-overlapping int ranges
1186 // found on dying counted loops.
1187 assert(ft == Type::TOP, ""); // Canonical empty value
1188 }
1189 }
1190 }
1191
1192 else {
1193
1194 // If we have an interface-typed Phi and we narrow to a class type, the join
1195 // should report back the class. However, if we have a J/L/Object
1196 // class-typed Phi and an interface flows in, it's possible that the meet &
1197 // join report an interface back out. This isn't possible but happens
1319
1320 //------------------------------Identity---------------------------------------
1321 // Check for Region being Identity.
1322 Node* PhiNode::Identity(PhaseGVN* phase) {
1323 // Check for no merging going on
1324 // (There used to be special-case code here when this->region->is_Loop.
1325 // It would check for a tributary phi on the backedge that the main phi
1326 // trivially, perhaps with a single cast. The unique_input method
1327 // does all this and more, by reducing such tributaries to 'this'.)
1328 Node* uin = unique_input(phase, false);
1329 if (uin != NULL) {
1330 return uin;
1331 }
1332
1333 int true_path = is_diamond_phi();
1334 if (true_path != 0) {
1335 Node* id = is_cmove_id(phase, true_path);
1336 if (id != NULL) return id;
1337 }
1338
1339 if (phase->is_IterGVN()) {
1340 Node* m = try_clean_mem_phi(phase);
1341 if (m != NULL) {
1342 return m;
1343 }
1344 }
1345
1346
1347 // Looking for phis with identical inputs. If we find one that has
1348 // type TypePtr::BOTTOM, replace the current phi with the bottom phi.
1349 if (phase->is_IterGVN() && type() == Type::MEMORY && adr_type() !=
1350 TypePtr::BOTTOM && !adr_type()->is_known_instance()) {
1351 uint phi_len = req();
1352 Node* phi_reg = region();
1353 for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) {
1354 Node* u = phi_reg->fast_out(i);
1355 if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY &&
1356 u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg &&
1357 u->req() == phi_len) {
1358 for (uint j = 1; j < phi_len; j++) {
1359 if (in(j) != u->in(j)) {
1360 u = NULL;
1361 break;
1362 }
1363 }
1364 if (u != NULL) {
1365 return u;
1366 }
1873 }
1874 return delay;
1875 }
1876
1877 //------------------------------Ideal------------------------------------------
1878 // Return a node which is more "ideal" than the current node. Must preserve
1879 // the CFG, but we can still strip out dead paths.
1880 Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1881 // The next should never happen after 6297035 fix.
1882 if( is_copy() ) // Already degraded to a Copy ?
1883 return NULL; // No change
1884
1885 Node *r = in(0); // RegionNode
1886 assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge");
1887
1888 // Note: During parsing, phis are often transformed before their regions.
1889 // This means we have to use type_or_null to defend against untyped regions.
1890 if( phase->type_or_null(r) == Type::TOP ) // Dead code?
1891 return NULL; // No change
1892
1893 // If all inputs are inline types of the same type, push the inline type node down
1894 // through the phi because inline type nodes should be merged through their input values.
1895 if (req() > 2 && in(1) != NULL && in(1)->is_InlineTypeBase() && (can_reshape || in(1)->is_InlineType())) {
1896 int opcode = in(1)->Opcode();
1897 uint i = 2;
1898 // Check if inputs are values of the same type
1899 for (; i < req() && in(i) && in(i)->is_InlineTypeBase() && in(i)->cmp(*in(1)); i++) {
1900 assert(in(i)->Opcode() == opcode, "mixing pointers and values?");
1901 }
1902 if (i == req()) {
1903 InlineTypeBaseNode* vt = in(1)->as_InlineTypeBase()->clone_with_phis(phase, in(0));
1904 for (uint i = 2; i < req(); ++i) {
1905 vt->merge_with(phase, in(i)->as_InlineTypeBase(), i, i == (req()-1));
1906 }
1907 return vt;
1908 }
1909 }
1910
1911 Node *top = phase->C->top();
1912 bool new_phi = (outcnt() == 0); // transforming new Phi
1913 // No change for igvn if new phi is not hooked
1914 if (new_phi && can_reshape)
1915 return NULL;
1916
1917 // The are 2 situations when only one valid phi's input is left
1918 // (in addition to Region input).
1919 // One: region is not loop - replace phi with this input.
1920 // Two: region is loop - replace phi with top since this data path is dead
1921 // and we need to break the dead data loop.
1922 Node* progress = NULL; // Record if any progress made
1923 for( uint j = 1; j < req(); ++j ){ // For all paths in
1924 // Check unreachable control paths
1925 Node* rc = r->in(j);
1926 Node* n = in(j); // Get the input
1927 if (rc == NULL || phase->type(rc) == Type::TOP) {
1928 if (n != top) { // Not already top?
1929 PhaseIterGVN *igvn = phase->is_IterGVN();
1930 if (can_reshape && igvn != NULL) {
2189 for (uint i = 1; i < req(); i++) {
2190 offset->init_req(i, in(i)->in(AddPNode::Offset));
2191 }
2192 phase->is_IterGVN()->register_new_node_with_optimizer(offset);
2193 }
2194 return new AddPNode(base, address, offset);
2195 }
2196 }
2197 }
2198
2199 // Split phis through memory merges, so that the memory merges will go away.
2200 // Piggy-back this transformation on the search for a unique input....
2201 // It will be as if the merged memory is the unique value of the phi.
2202 // (Do not attempt this optimization unless parsing is complete.
2203 // It would make the parser's memory-merge logic sick.)
2204 // (MergeMemNode is not dead_loop_safe - need to check for dead loop.)
2205 if (progress == NULL && can_reshape && type() == Type::MEMORY) {
2206 // see if this phi should be sliced
2207 uint merge_width = 0;
2208 bool saw_self = false;
2209 // TODO revisit this with JDK-8247216
2210 bool mergemem_only = true;
2211 for( uint i=1; i<req(); ++i ) {// For all paths in
2212 Node *ii = in(i);
2213 // TOP inputs should not be counted as safe inputs because if the
2214 // Phi references itself through all other inputs then splitting the
2215 // Phi through memory merges would create dead loop at later stage.
2216 if (ii == top) {
2217 return NULL; // Delay optimization until graph is cleaned.
2218 }
2219 if (ii->is_MergeMem()) {
2220 MergeMemNode* n = ii->as_MergeMem();
2221 merge_width = MAX2(merge_width, n->req());
2222 saw_self = saw_self || phase->eqv(n->base_memory(), this);
2223 } else {
2224 mergemem_only = false;
2225 }
2226 }
2227
2228 // This restriction is temporarily necessary to ensure termination:
2229 if (!mergemem_only && !saw_self && adr_type() == TypePtr::BOTTOM) merge_width = 0;
2230
2231 if (merge_width > Compile::AliasIdxRaw) {
2232 // found at least one non-empty MergeMem
2233 const TypePtr* at = adr_type();
2234 if (at != TypePtr::BOTTOM) {
2235 // Patch the existing phi to select an input from the merge:
2236 // Phi:AT1(...MergeMem(m0, m1, m2)...) into
2237 // Phi:AT1(...m1...)
2238 int alias_idx = phase->C->get_alias_index(at);
2239 for (uint i=1; i<req(); ++i) {
2240 Node *ii = in(i);
2241 if (ii->is_MergeMem()) {
2242 MergeMemNode* n = ii->as_MergeMem();
2243 // compress paths and change unreachable cycles to TOP
2244 // If not, we can update the input infinitely along a MergeMem cycle
2245 // Equivalent code is in MemNode::Ideal_common
2246 Node *m = phase->transform(n);
2247 if (outcnt() == 0) { // Above transform() may kill us!
2248 return top;
2249 }
2638 return in(0)->in(0);
2639 }
2640
2641
2642 #ifndef PRODUCT
2643 void CatchProjNode::dump_spec(outputStream *st) const {
2644 ProjNode::dump_spec(st);
2645 st->print("@bci %d ",_handler_bci);
2646 }
2647 #endif
2648
2649 //=============================================================================
2650 //------------------------------Identity---------------------------------------
2651 // Check for CreateEx being Identity.
2652 Node* CreateExNode::Identity(PhaseGVN* phase) {
2653 if( phase->type(in(1)) == Type::TOP ) return in(1);
2654 if( phase->type(in(0)) == Type::TOP ) return in(0);
2655 // We only come from CatchProj, unless the CatchProj goes away.
2656 // If the CatchProj is optimized away, then we just carry the
2657 // exception oop through.
2658
2659 // CheckCastPPNode::Ideal() for inline types reuses the exception
2660 // paths of a call to perform an allocation: we can see a Phi here.
2661 if (in(1)->is_Phi()) {
2662 return this;
2663 }
2664 CallNode *call = in(1)->in(0)->as_Call();
2665
2666 return ( in(0)->is_CatchProj() && in(0)->in(0)->in(1) == in(1) )
2667 ? this
2668 : call->in(TypeFunc::Parms);
2669 }
2670
2671 //=============================================================================
2672 //------------------------------Value------------------------------------------
2673 // Check for being unreachable.
2674 const Type* NeverBranchNode::Value(PhaseGVN* phase) const {
2675 if (!in(0) || in(0)->is_top()) return Type::TOP;
2676 return bottom_type();
2677 }
2678
2679 //------------------------------Ideal------------------------------------------
2680 // Check for no longer being part of a loop
2681 Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2682 if (can_reshape && !in(0)->is_Loop()) {
2683 // Dead code elimination can sometimes delete this projection so
|