< prev index next > src/hotspot/share/opto/compile.cpp
Print this page
#include "opto/connode.hpp"
#include "opto/convertnode.hpp"
#include "opto/divnode.hpp"
#include "opto/escape.hpp"
#include "opto/idealGraphPrinter.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/loopnode.hpp"
#include "opto/machnode.hpp"
#include "opto/macro.hpp"
#include "opto/matcher.hpp"
#include "opto/mathexactnode.hpp"
Node* opaq = opaque4_node(i);
if (!useful.member(opaq)) {
remove_opaque4_node(opaq);
}
}
+ // Remove useless inline type nodes
+ for (int i = _inline_type_nodes->length() - 1; i >= 0; i--) {
+ Node* vt = _inline_type_nodes->at(i);
+ if (!useful.member(vt)) {
+ _inline_type_nodes->remove(vt);
+ }
+ }
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
bs->eliminate_useless_gc_barriers(useful, this);
// clean up the late inline lists
remove_useless_late_inlines(&_string_late_inlines, useful);
remove_useless_late_inlines(&_boxing_late_inlines, useful);
initial_gvn()->transform_no_reclaim(top());
// Set up tf(), start(), and find a CallGenerator.
CallGenerator* cg = NULL;
if (is_osr_compilation()) {
! const TypeTuple *domain = StartOSRNode::osr_domain();
! const TypeTuple *range = TypeTuple::make_range(method()->signature());
- init_tf(TypeFunc::make(domain, range));
- StartNode* s = new StartOSRNode(root(), domain);
initial_gvn()->set_type_bottom(s);
init_start(s);
cg = CallGenerator::for_osr(method(), entry_bci());
} else {
// Normal case.
init_tf(TypeFunc::make(method()));
! StartNode* s = new StartNode(root(), tf()->domain());
initial_gvn()->set_type_bottom(s);
init_start(s);
if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
// With java.lang.ref.reference.get() we must go through the
// intrinsic - even when get() is the root
initial_gvn()->transform_no_reclaim(top());
// Set up tf(), start(), and find a CallGenerator.
CallGenerator* cg = NULL;
if (is_osr_compilation()) {
! init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
! StartNode* s = new StartOSRNode(root(), tf()->domain_sig());
initial_gvn()->set_type_bottom(s);
init_start(s);
cg = CallGenerator::for_osr(method(), entry_bci());
} else {
// Normal case.
init_tf(TypeFunc::make(method()));
! StartNode* s = new StartNode(root(), tf()->domain_cc());
initial_gvn()->set_type_bottom(s);
init_start(s);
if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
// With java.lang.ref.reference.get() we must go through the
// intrinsic - even when get() is the root
}
// Now that we know the size of all the monitors we can add a fixed slot
// for the original deopt pc.
int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
+ if (needs_stack_repair()) {
+ // One extra slot for the special stack increment value
+ next_slot += 2;
+ }
set_fixed_slots(next_slot);
// Compute when to use implicit null checks. Used by matching trap based
// nodes and NullCheck optimization.
set_allowed_deopt_reasons();
Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
set_decompile_count(0);
set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
_loop_opts_cnt = LoopOptsCount;
+ _has_flattened_accesses = false;
+ _flattened_accesses_share_alias = true;
+
set_do_inlining(Inline);
set_max_inline_size(MaxInlineSize);
set_freq_inline_size(FreqInlineSize);
set_do_scheduling(OptoScheduling);
set_do_count_invocations(false);
_macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
+ _inline_type_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
register_library_intrinsics();
#ifdef ASSERT
_type_verify_symmetry = true;
_phase_optimize_finished = false;
#endif
bool is_known_inst = tj->isa_oopptr() != NULL &&
tj->is_oopptr()->is_known_instance();
// Process weird unsafe references.
if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
! assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
tj = TypeOopPtr::BOTTOM;
ptr = tj->ptr();
offset = tj->offset();
}
bool is_known_inst = tj->isa_oopptr() != NULL &&
tj->is_oopptr()->is_known_instance();
// Process weird unsafe references.
if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
! bool default_value_load = EnableValhalla && tj->is_instptr()->klass() == ciEnv::current()->Class_klass();
+ assert(InlineUnsafeOps || default_value_load, "indeterminate pointers come only from unsafe ops");
assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
tj = TypeOopPtr::BOTTOM;
ptr = tj->ptr();
offset = tj->offset();
}
const TypeAryPtr *ta = tj->isa_aryptr();
if (ta && ta->is_stable()) {
// Erase stability property for alias analysis.
tj = ta = ta->cast_to_stable(false);
}
if( ta && is_known_inst ) {
if ( offset != Type::OffsetBot &&
offset > arrayOopDesc::length_offset_in_bytes() ) {
offset = Type::OffsetBot; // Flatten constant access into array body only
! tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
}
} else if( ta && _AliasLevel >= 2 ) {
// For arrays indexed by constant indices, we flatten the alias
// space to include all of the array body. Only the header, klass
// and array length can be accessed un-aliased.
if( offset != Type::OffsetBot ) {
if( ta->const_oop() ) { // MethodData* or Method*
offset = Type::OffsetBot; // Flatten constant access into array body
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
} else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
// range is OK as-is.
tj = ta = TypeAryPtr::RANGE;
} else if( offset == oopDesc::klass_offset_in_bytes() ) {
tj = TypeInstPtr::KLASS; // all klass loads look alike
const TypeAryPtr *ta = tj->isa_aryptr();
if (ta && ta->is_stable()) {
// Erase stability property for alias analysis.
tj = ta = ta->cast_to_stable(false);
}
+ if (ta && ta->is_not_flat()) {
+ // Erase not flat property for alias analysis.
+ tj = ta = ta->cast_to_not_flat(false);
+ }
+ if (ta && ta->is_not_null_free()) {
+ // Erase not null free property for alias analysis.
+ tj = ta = ta->cast_to_not_null_free(false);
+ }
+
if( ta && is_known_inst ) {
if ( offset != Type::OffsetBot &&
offset > arrayOopDesc::length_offset_in_bytes() ) {
offset = Type::OffsetBot; // Flatten constant access into array body only
! tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, Type::Offset(offset), ta->field_offset(), ta->instance_id());
}
} else if( ta && _AliasLevel >= 2 ) {
// For arrays indexed by constant indices, we flatten the alias
// space to include all of the array body. Only the header, klass
// and array length can be accessed un-aliased.
+ // For flattened inline type array, each field has its own slice so
+ // we must include the field offset.
if( offset != Type::OffsetBot ) {
if( ta->const_oop() ) { // MethodData* or Method*
offset = Type::OffsetBot; // Flatten constant access into array body
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
} else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
// range is OK as-is.
tj = ta = TypeAryPtr::RANGE;
} else if( offset == oopDesc::klass_offset_in_bytes() ) {
tj = TypeInstPtr::KLASS; // all klass loads look alike
tj = TypeInstPtr::MARK;
ta = TypeAryPtr::RANGE; // generic ignored junk
ptr = TypePtr::BotPTR;
} else { // Random constant offset into array body
offset = Type::OffsetBot; // Flatten constant access into array body
! tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
}
}
// Arrays of fixed size alias with arrays of unknown size.
if (ta->size() != TypeInt::POS) {
const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
}
// Arrays of known objects become arrays of unknown objects.
if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
}
if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
}
// Arrays of bytes and of booleans both use 'bastore' and 'baload' so
// cannot be distinguished by bytecode alone.
if (ta->elem() == TypeInt::BOOL) {
const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
}
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
! tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
}
}
// Oop pointers need some flattening
const TypeInstPtr *to = tj->isa_instptr();
tj = TypeInstPtr::MARK;
ta = TypeAryPtr::RANGE; // generic ignored junk
ptr = TypePtr::BotPTR;
} else { // Random constant offset into array body
offset = Type::OffsetBot; // Flatten constant access into array body
! tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
}
}
// Arrays of fixed size alias with arrays of unknown size.
if (ta->size() != TypeInt::POS) {
const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,Type::Offset(offset), ta->field_offset());
}
// Arrays of known objects become arrays of unknown objects.
if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
}
if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), ta->field_offset());
+ }
+ // Initially all flattened array accesses share a single slice
+ if (ta->is_flat() && ta->elem() != TypeInlineType::BOTTOM && _flattened_accesses_share_alias) {
+ const TypeAry *tary = TypeAry::make(TypeInlineType::BOTTOM, ta->size());
+ tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
}
// Arrays of bytes and of booleans both use 'bastore' and 'baload' so
// cannot be distinguished by bytecode alone.
if (ta->elem() == TypeInt::BOOL) {
const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
! tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
}
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
! tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,Type::Offset(offset), ta->field_offset());
}
}
// Oop pointers need some flattening
const TypeInstPtr *to = tj->isa_instptr();
if (to->klass() != ciEnv::current()->Class_klass() ||
offset < k->size_helper() * wordSize) {
// No constant oop pointers (such as Strings); they alias with
// unknown strings.
assert(!is_known_inst, "not scalarizable allocation");
! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
}
} else if( is_known_inst ) {
tj = to; // Keep NotNull and klass_is_exact for instance type
} else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
}
if (to->speculative() != NULL) {
! tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
}
// Canonicalize the holder of this field
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
// First handle header references such as a LoadKlassNode, even if the
// object's klass is unloaded at compile time (4965979).
if (!is_known_inst) { // Do it only for non-instance types
! tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
}
} else if (offset < 0 || offset >= k->size_helper() * wordSize) {
// Static fields are in the space above the normal instance
// fields in the java.lang.Class instance.
if (to->klass() != ciEnv::current()->Class_klass()) {
if (to->klass() != ciEnv::current()->Class_klass() ||
offset < k->size_helper() * wordSize) {
// No constant oop pointers (such as Strings); they alias with
// unknown strings.
assert(!is_known_inst, "not scalarizable allocation");
! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
}
} else if( is_known_inst ) {
tj = to; // Keep NotNull and klass_is_exact for instance type
} else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
// During the 2nd round of IterGVN, NotNull castings are removed.
// Make sure the Bottom and NotNull variants alias the same.
// Also, make sure exact and non-exact variants alias the same.
! tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,Type::Offset(offset));
}
if (to->speculative() != NULL) {
! tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),Type::Offset(to->offset()), to->klass()->flatten_array(), to->instance_id());
}
// Canonicalize the holder of this field
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
// First handle header references such as a LoadKlassNode, even if the
// object's klass is unloaded at compile time (4965979).
if (!is_known_inst) { // Do it only for non-instance types
! tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, Type::Offset(offset));
}
} else if (offset < 0 || offset >= k->size_helper() * wordSize) {
// Static fields are in the space above the normal instance
// fields in the java.lang.Class instance.
if (to->klass() != ciEnv::current()->Class_klass()) {
}
} else {
ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
if (!k->equals(canonical_holder) || tj->offset() != offset) {
if( is_known_inst ) {
! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
} else {
! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
}
}
}
}
}
} else {
ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
if (!k->equals(canonical_holder) || tj->offset() != offset) {
if( is_known_inst ) {
! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, Type::Offset(offset), canonical_holder->flatten_array(), to->instance_id());
} else {
! tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, Type::Offset(offset));
}
}
}
}
// use NotNull as the PTR.
if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
TypeKlassPtr::OBJECT->klass(),
! offset);
}
ciKlass* klass = tk->klass();
! if( klass->is_obj_array_klass() ) {
ciKlass* k = TypeAryPtr::OOPS->klass();
if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs
k = TypeInstPtr::BOTTOM->klass();
! tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
}
// Check for precise loads from the primary supertype array and force them
// to the supertype cache alias index. Check for generic array loads from
// the primary supertype array and also force them to the supertype cache
// use NotNull as the PTR.
if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
TypeKlassPtr::OBJECT->klass(),
! Type::Offset(offset));
}
ciKlass* klass = tk->klass();
! if (klass != NULL && klass->is_obj_array_klass()) {
ciKlass* k = TypeAryPtr::OOPS->klass();
if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs
k = TypeInstPtr::BOTTOM->klass();
! tj = tk = TypeKlassPtr::make(TypePtr::NotNull, k, Type::Offset(offset));
}
// Check for precise loads from the primary supertype array and force them
// to the supertype cache alias index. Check for generic array loads from
// the primary supertype array and also force them to the supertype cache
if (offset == Type::OffsetBot ||
(offset >= primary_supers_offset &&
offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
offset = in_bytes(Klass::secondary_super_cache_offset());
! tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
}
}
// Flatten all Raw pointers together.
if (tj->base() == Type::RawPtr)
if (offset == Type::OffsetBot ||
(offset >= primary_supers_offset &&
offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
offset = in_bytes(Klass::secondary_super_cache_offset());
! tj = tk = TypeKlassPtr::make(TypePtr::NotNull, tk->klass(), Type::Offset(offset));
}
}
// Flatten all Raw pointers together.
if (tj->base() == Type::RawPtr)
for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
}
//--------------------------------find_alias_type------------------------------
! Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
if (_AliasLevel == 0)
return alias_type(AliasIdxBot);
! AliasCacheEntry* ace = probe_alias_cache(adr_type);
! if (ace->_adr_type == adr_type) {
! return alias_type(ace->_index);
}
// Handle special cases.
if (adr_type == NULL) return alias_type(AliasIdxTop);
if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
}
//--------------------------------find_alias_type------------------------------
! Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
if (_AliasLevel == 0)
return alias_type(AliasIdxBot);
! AliasCacheEntry* ace = NULL;
! if (!uncached) {
! ace = probe_alias_cache(adr_type);
+ if (ace->_adr_type == adr_type) {
+ return alias_type(ace->_index);
+ }
}
// Handle special cases.
if (adr_type == NULL) return alias_type(AliasIdxTop);
if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
if (flat->isa_instptr()) {
if (flat->offset() == java_lang_Class::klass_offset()
&& flat->is_instptr()->klass() == env()->Class_klass())
alias_type(idx)->set_rewritable(false);
}
if (flat->isa_aryptr()) {
#ifdef ASSERT
const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
// (T_BYTE has the weakest alignment and size restrictions...)
assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
#endif
if (flat->offset() == TypePtr::OffsetBot) {
! alias_type(idx)->set_element(flat->is_aryptr()->elem());
}
}
if (flat->isa_klassptr()) {
if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->isa_instptr()) {
if (flat->offset() == java_lang_Class::klass_offset()
&& flat->is_instptr()->klass() == env()->Class_klass())
alias_type(idx)->set_rewritable(false);
}
+ ciField* field = NULL;
if (flat->isa_aryptr()) {
#ifdef ASSERT
const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
// (T_BYTE has the weakest alignment and size restrictions...)
assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
#endif
+ const Type* elemtype = flat->is_aryptr()->elem();
if (flat->offset() == TypePtr::OffsetBot) {
! alias_type(idx)->set_element(elemtype);
+ }
+ int field_offset = flat->is_aryptr()->field_offset().get();
+ if (elemtype->isa_inlinetype() &&
+ elemtype->inline_klass() != NULL &&
+ field_offset != Type::OffsetBot) {
+ ciInlineKlass* vk = elemtype->inline_klass();
+ field_offset += vk->first_field_offset();
+ field = vk->get_field_by_offset(field_offset, false);
}
}
if (flat->isa_klassptr()) {
if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false);
alias_type(idx)->set_rewritable(false);
if (flat->offset() == in_bytes(Klass::access_flags_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
alias_type(idx)->set_rewritable(false);
}
// %%% (We would like to finalize JavaThread::threadObj_offset(),
// but the base pointer type is not distinctive enough to identify
// references into JavaThread.)
// Check for final fields.
const TypeInstPtr* tinst = flat->isa_instptr();
if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
- ciField* field;
if (tinst->const_oop() != NULL &&
tinst->klass() == ciEnv::current()->Class_klass() &&
tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
// static field
ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), true);
} else {
! ciInstanceKlass *k = tinst->klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), false);
}
! assert(field == NULL ||
! original_field == NULL ||
! (field->holder() == original_field->holder() &&
! field->offset() == original_field->offset() &&
! field->is_static() == original_field->is_static()), "wrong field?");
! // Set field() and is_rewritable() attributes.
! if (field != NULL) alias_type(idx)->set_field(field);
}
}
// Fill the cache for next time.
! ace->_adr_type = adr_type;
! ace->_index = idx;
! assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
! // Might as well try to fill the cache for the flattened version, too.
! AliasCacheEntry* face = probe_alias_cache(flat);
! if (face->_adr_type == NULL) {
! face->_adr_type = flat;
! face->_index = idx;
! assert(alias_type(flat) == alias_type(idx), "flat type must work too");
}
return alias_type(idx);
}
alias_type(idx)->set_rewritable(false);
if (flat->offset() == in_bytes(Klass::access_flags_offset()))
alias_type(idx)->set_rewritable(false);
if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
alias_type(idx)->set_rewritable(false);
+ if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
+ alias_type(idx)->set_rewritable(false);
if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
alias_type(idx)->set_rewritable(false);
}
// %%% (We would like to finalize JavaThread::threadObj_offset(),
// but the base pointer type is not distinctive enough to identify
// references into JavaThread.)
// Check for final fields.
const TypeInstPtr* tinst = flat->isa_instptr();
if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
if (tinst->const_oop() != NULL &&
tinst->klass() == ciEnv::current()->Class_klass() &&
tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
// static field
ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), true);
+ } else if (tinst->klass()->is_inlinetype()) {
+ // Inline type field
+ ciInlineKlass* vk = tinst->inline_klass();
+ field = vk->get_field_by_offset(tinst->offset(), false);
} else {
! ciInstanceKlass* k = tinst->klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), false);
}
! }
! assert(field == NULL ||
! original_field == NULL ||
! (field->holder() == original_field->holder() &&
! field->offset() == original_field->offset() &&
! field->is_static() == original_field->is_static()), "wrong field?");
! // Set field() and is_rewritable() attributes.
+ if (field != NULL) {
+ alias_type(idx)->set_field(field);
+ if (flat->isa_aryptr()) {
+ // Fields of flat arrays are rewritable although they are declared final
+ assert(flat->is_aryptr()->is_flat(), "must be a flat array");
+ alias_type(idx)->set_rewritable(true);
+ }
}
}
// Fill the cache for next time.
! if (!uncached) {
! ace->_adr_type = adr_type;
! ace->_index = idx;
+ assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
! // Might as well try to fill the cache for the flattened version, too.
! AliasCacheEntry* face = probe_alias_cache(flat);
! if (face->_adr_type == NULL) {
! face->_adr_type = flat;
! face->_index = idx;
! assert(alias_type(flat) == alias_type(idx), "flat type must work too");
+ }
}
return alias_type(idx);
}
igvn.replace_node(opaq, opaq->in(2));
}
assert(opaque4_count() == 0, "should be empty");
}
+ void Compile::add_inline_type(Node* n) {
+ assert(n->is_InlineTypeBase(), "unexpected node");
+ if (_inline_type_nodes != NULL) {
+ _inline_type_nodes->push(n);
+ }
+ }
+
+ void Compile::remove_inline_type(Node* n) {
+ assert(n->is_InlineTypeBase(), "unexpected node");
+ if (_inline_type_nodes != NULL && _inline_type_nodes->contains(n)) {
+ _inline_type_nodes->remove(n);
+ }
+ }
+
+ // Does the return value keep otherwise useless inline type allocations alive?
+ static bool return_val_keeps_allocations_alive(Node* ret_val) {
+ ResourceMark rm;
+ Unique_Node_List wq;
+ wq.push(ret_val);
+ bool some_allocations = false;
+ for (uint i = 0; i < wq.size(); i++) {
+ Node* n = wq.at(i);
+ assert(!n->is_InlineType(), "chain of inline type nodes");
+ if (n->outcnt() > 1) {
+ // Some other use for the allocation
+ return false;
+ } else if (n->is_InlineTypePtr()) {
+ wq.push(n->in(1));
+ } else if (n->is_Phi()) {
+ for (uint j = 1; j < n->req(); j++) {
+ wq.push(n->in(j));
+ }
+ } else if (n->is_CheckCastPP() &&
+ n->in(1)->is_Proj() &&
+ n->in(1)->in(0)->is_Allocate()) {
+ some_allocations = true;
+ }
+ }
+ return some_allocations;
+ }
+
+ void Compile::process_inline_types(PhaseIterGVN &igvn, bool post_ea) {
+ // Make inline types scalar in safepoints
+ for (int i = _inline_type_nodes->length()-1; i >= 0; i--) {
+ InlineTypeBaseNode* vt = _inline_type_nodes->at(i)->as_InlineTypeBase();
+ vt->make_scalar_in_safepoints(&igvn);
+ }
+ // Remove InlineTypePtr nodes only after EA to give scalar replacement a chance
+ // to remove buffer allocations. InlineType nodes are kept until loop opts and
+ // removed via InlineTypeNode::remove_redundant_allocations.
+ if (post_ea) {
+ while (_inline_type_nodes->length() > 0) {
+ InlineTypeBaseNode* vt = _inline_type_nodes->pop()->as_InlineTypeBase();
+ if (vt->is_InlineTypePtr()) {
+ igvn.replace_node(vt, vt->get_oop());
+ }
+ }
+ }
+ // Make sure that the return value does not keep an unused allocation alive
+ if (tf()->returns_inline_type_as_fields()) {
+ Node* ret = NULL;
+ for (uint i = 1; i < root()->req(); i++){
+ Node* in = root()->in(i);
+ if (in->Opcode() == Op_Return) {
+ assert(ret == NULL, "only one return");
+ ret = in;
+ }
+ }
+ if (ret != NULL) {
+ Node* ret_val = ret->in(TypeFunc::Parms);
+ if (igvn.type(ret_val)->isa_oopptr() &&
+ return_val_keeps_allocations_alive(ret_val)) {
+ igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
+ assert(ret_val->outcnt() == 0, "should be dead now");
+ igvn.remove_dead_node(ret_val);
+ }
+ }
+ }
+ igvn.optimize();
+ }
+
+ void Compile::adjust_flattened_array_access_aliases(PhaseIterGVN& igvn) {
+ if (!_has_flattened_accesses) {
+ return;
+ }
+ // Initially, all flattened array accesses share the same slice to
+ // keep dependencies with Object[] array accesses (that could be
+ // to a flattened array) correct. We're done with parsing so we
+ // now know all flattened array accesses in this compile
+ // unit. Let's move flattened array accesses to their own slice,
+ // one per element field. This should help memory access
+ // optimizations.
+ ResourceMark rm;
+ Unique_Node_List wq;
+ wq.push(root());
+
+ Node_List mergememnodes;
+ Node_List memnodes;
+
+ // Alias index currently shared by all flattened memory accesses
+ int index = get_alias_index(TypeAryPtr::INLINES);
+
+ // Find MergeMem nodes and flattened array accesses
+ for (uint i = 0; i < wq.size(); i++) {
+ Node* n = wq.at(i);
+ if (n->is_Mem()) {
+ const TypePtr* adr_type = NULL;
+ if (n->Opcode() == Op_StoreCM) {
+ adr_type = get_adr_type(get_alias_index(n->in(MemNode::OopStore)->adr_type()));
+ } else {
+ adr_type = get_adr_type(get_alias_index(n->adr_type()));
+ }
+ if (adr_type == TypeAryPtr::INLINES) {
+ memnodes.push(n);
+ }
+ } else if (n->is_MergeMem()) {
+ MergeMemNode* mm = n->as_MergeMem();
+ if (mm->memory_at(index) != mm->base_memory()) {
+ mergememnodes.push(n);
+ }
+ }
+ for (uint j = 0; j < n->req(); j++) {
+ Node* m = n->in(j);
+ if (m != NULL) {
+ wq.push(m);
+ }
+ }
+ }
+
+ if (memnodes.size() > 0) {
+ _flattened_accesses_share_alias = false;
+
+ // We are going to change the slice for the flattened array
+ // accesses so we need to clear the cache entries that refer to
+ // them.
+ for (uint i = 0; i < AliasCacheSize; i++) {
+ AliasCacheEntry* ace = &_alias_cache[i];
+ if (ace->_adr_type != NULL &&
+ ace->_adr_type->isa_aryptr() &&
+ ace->_adr_type->is_aryptr()->is_flat()) {
+ ace->_adr_type = NULL;
+ ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the NULL adr_type resolves to AliasIdxTop
+ }
+ }
+
+ // Find what aliases we are going to add
+ int start_alias = num_alias_types()-1;
+ int stop_alias = 0;
+
+ for (uint i = 0; i < memnodes.size(); i++) {
+ Node* m = memnodes.at(i);
+ const TypePtr* adr_type = NULL;
+ if (m->Opcode() == Op_StoreCM) {
+ adr_type = m->in(MemNode::OopStore)->adr_type();
+ Node* clone = new StoreCMNode(m->in(MemNode::Control), m->in(MemNode::Memory), m->in(MemNode::Address),
+ m->adr_type(), m->in(MemNode::ValueIn), m->in(MemNode::OopStore),
+ get_alias_index(adr_type));
+ igvn.register_new_node_with_optimizer(clone);
+ igvn.replace_node(m, clone);
+ } else {
+ adr_type = m->adr_type();
+ #ifdef ASSERT
+ m->as_Mem()->set_adr_type(adr_type);
+ #endif
+ }
+ int idx = get_alias_index(adr_type);
+ start_alias = MIN2(start_alias, idx);
+ stop_alias = MAX2(stop_alias, idx);
+ }
+
+ assert(stop_alias >= start_alias, "should have expanded aliases");
+
+ Node_Stack stack(0);
+ #ifdef ASSERT
+ VectorSet seen(Thread::current()->resource_area());
+ #endif
+ // Now let's fix the memory graph so each flattened array access
+ // is moved to the right slice. Start from the MergeMem nodes.
+ uint last = unique();
+ for (uint i = 0; i < mergememnodes.size(); i++) {
+ MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
+ Node* n = current->memory_at(index);
+ MergeMemNode* mm = NULL;
+ do {
+ // Follow memory edges through memory accesses, phis and
+ // narrow membars and push nodes on the stack. Once we hit
+ // bottom memory, we pop element off the stack one at a
+ // time, in reverse order, and move them to the right slice
+ // by changing their memory edges.
+ if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
+ assert(!seen.test_set(n->_idx), "");
+ // Uses (a load for instance) will need to be moved to the
+ // right slice as well and will get a new memory state
+ // that we don't know yet. The use could also be the
+ // backedge of a loop. We put a place holder node between
+ // the memory node and its uses. We replace that place
+ // holder with the correct memory state once we know it,
+ // i.e. when nodes are popped off the stack. Using the
+ // place holder make the logic work in the presence of
+ // loops.
+ if (n->outcnt() > 1) {
+ Node* place_holder = NULL;
+ assert(!n->has_out_with(Op_Node), "");
+ for (DUIterator k = n->outs(); n->has_out(k); k++) {
+ Node* u = n->out(k);
+ if (u != current && u->_idx < last) {
+ bool success = false;
+ for (uint l = 0; l < u->req(); l++) {
+ if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
+ continue;
+ }
+ Node* in = u->in(l);
+ if (in == n) {
+ if (place_holder == NULL) {
+ place_holder = new Node(1);
+ place_holder->init_req(0, n);
+ }
+ igvn.replace_input_of(u, l, place_holder);
+ success = true;
+ }
+ }
+ if (success) {
+ --k;
+ }
+ }
+ }
+ }
+ if (n->is_Phi()) {
+ stack.push(n, 1);
+ n = n->in(1);
+ } else if (n->is_Mem()) {
+ stack.push(n, n->req());
+ n = n->in(MemNode::Memory);
+ } else {
+ assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
+ stack.push(n, n->req());
+ n = n->in(0)->in(TypeFunc::Memory);
+ }
+ } else {
+ assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
+ // Build a new MergeMem node to carry the new memory state
+ // as we build it. IGVN should fold extraneous MergeMem
+ // nodes.
+ mm = MergeMemNode::make(n);
+ igvn.register_new_node_with_optimizer(mm);
+ while (stack.size() > 0) {
+ Node* m = stack.node();
+ uint idx = stack.index();
+ if (m->is_Mem()) {
+ // Move memory node to its new slice
+ const TypePtr* adr_type = m->adr_type();
+ int alias = get_alias_index(adr_type);
+ Node* prev = mm->memory_at(alias);
+ igvn.replace_input_of(m, MemNode::Memory, prev);
+ mm->set_memory_at(alias, m);
+ } else if (m->is_Phi()) {
+ // We need as many new phis as there are new aliases
+ igvn.replace_input_of(m, idx, mm);
+ if (idx == m->req()-1) {
+ Node* r = m->in(0);
+ for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+ const Type* adr_type = get_adr_type(j);
+ if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
+ continue;
+ }
+ Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
+ igvn.register_new_node_with_optimizer(phi);
+ for (uint k = 1; k < m->req(); k++) {
+ phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
+ }
+ mm->set_memory_at(j, phi);
+ }
+ Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
+ igvn.register_new_node_with_optimizer(base_phi);
+ for (uint k = 1; k < m->req(); k++) {
+ base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
+ }
+ mm->set_base_memory(base_phi);
+ }
+ } else {
+ // This is a MemBarCPUOrder node from
+ // Parse::array_load()/Parse::array_store(), in the
+ // branch that handles flattened arrays hidden under
+ // an Object[] array. We also need one new membar per
+ // new alias to keep the unknown access that the
+ // membars protect properly ordered with accesses to
+ // known flattened array.
+ assert(m->is_Proj(), "projection expected");
+ Node* ctrl = m->in(0)->in(TypeFunc::Control);
+ igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
+ for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+ const Type* adr_type = get_adr_type(j);
+ if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
+ continue;
+ }
+ MemBarNode* mb = new MemBarCPUOrderNode(this, j, NULL);
+ igvn.register_new_node_with_optimizer(mb);
+ Node* mem = mm->memory_at(j);
+ mb->init_req(TypeFunc::Control, ctrl);
+ mb->init_req(TypeFunc::Memory, mem);
+ ctrl = new ProjNode(mb, TypeFunc::Control);
+ igvn.register_new_node_with_optimizer(ctrl);
+ mem = new ProjNode(mb, TypeFunc::Memory);
+ igvn.register_new_node_with_optimizer(mem);
+ mm->set_memory_at(j, mem);
+ }
+ igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
+ }
+ if (idx < m->req()-1) {
+ idx += 1;
+ stack.set_index(idx);
+ n = m->in(idx);
+ break;
+ }
+ // Take care of place holder nodes
+ if (m->has_out_with(Op_Node)) {
+ Node* place_holder = m->find_out_with(Op_Node);
+ if (place_holder != NULL) {
+ Node* mm_clone = mm->clone();
+ igvn.register_new_node_with_optimizer(mm_clone);
+ Node* hook = new Node(1);
+ hook->init_req(0, mm);
+ igvn.replace_node(place_holder, mm_clone);
+ hook->destruct();
+ }
+ assert(!m->has_out_with(Op_Node), "place holder should be gone now");
+ }
+ stack.pop();
+ }
+ }
+ } while(stack.size() > 0);
+ // Fix the memory state at the MergeMem we started from
+ igvn.rehash_node_delayed(current);
+ for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
+ const Type* adr_type = get_adr_type(j);
+ if (!adr_type->isa_aryptr() || !adr_type->is_aryptr()->is_flat()) {
+ continue;
+ }
+ current->set_memory_at(j, mm);
+ }
+ current->set_memory_at(index, current->base_memory());
+ }
+ igvn.optimize();
+ }
+ print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
+ }
+
+
// StringOpts and late inlining of string methods
void Compile::inline_string_calls(bool parse_time) {
{
// remove useless nodes to make the usage analysis simpler
ResourceMark rm;
set_for_igvn(&new_worklist);
igvn = PhaseIterGVN(initial_gvn());
igvn.optimize();
}
+ if (_inline_type_nodes->length() > 0) {
+ // Do this once all inlining is over to avoid getting inconsistent debug info
+ process_inline_types(igvn);
+ }
+
+ adjust_flattened_array_access_aliases(igvn);
+
// Perform escape analysis
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
if (has_loops()) {
// Cleanup graph (remove dead nodes).
TracePhase tp("idealLoop", &timers[_t_idealLoop]);
if (failing()) return;
}
}
+ if (_inline_type_nodes->length() > 0) {
+ // Process inline types again now that EA might have simplified the graph
+ process_inline_types(igvn, /* post_ea= */ true);
+ }
+
// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.
// Set loop opts counter
if((_loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
mem = prev->in(MemNode::Memory);
}
}
}
+
//------------------------------final_graph_reshaping_impl----------------------
// Implement items 1-5 from final_graph_reshaping below.
void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
if ( n->outcnt() == 0 ) return; // dead node
Node* cmp = new CmpLNode(andl, n->in(2));
n->subsume_by(cmp, this);
}
break;
}
+ #ifdef ASSERT
+ case Op_InlineTypePtr:
+ case Op_InlineType: {
+ n->dump(-1);
+ assert(false, "inline type node was not removed");
+ break;
+ }
+ #endif
default:
assert(!n->is_Call(), "");
assert(!n->is_Mem(), "");
assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
break;
if (holder->is_being_initialized()) {
if (accessing_method->holder() == holder) {
// Access inside a class. The barrier can be elided when access happens in <clinit>,
// <init>, or a static method. In all those cases, there was an initialization
// barrier on the holder klass passed.
! if (accessing_method->is_static_initializer() ||
! accessing_method->is_object_initializer() ||
accessing_method->is_static()) {
return false;
}
} else if (accessing_method->holder()->is_subclass_of(holder)) {
// Access from a subclass. The barrier can be elided only when access happens in <clinit>.
// In case of <init> or a static method, the barrier is on the subclass is not enough:
// child class can become fully initialized while its parent class is still being initialized.
! if (accessing_method->is_static_initializer()) {
return false;
}
}
ciMethod* root = method(); // the root method of compilation
if (root != accessing_method) {
if (holder->is_being_initialized()) {
if (accessing_method->holder() == holder) {
// Access inside a class. The barrier can be elided when access happens in <clinit>,
// <init>, or a static method. In all those cases, there was an initialization
// barrier on the holder klass passed.
! if (accessing_method->is_class_initializer() ||
! accessing_method->is_object_constructor() ||
accessing_method->is_static()) {
return false;
}
} else if (accessing_method->holder()->is_subclass_of(holder)) {
// Access from a subclass. The barrier can be elided only when access happens in <clinit>.
// In case of <init> or a static method, the barrier is on the subclass is not enough:
// child class can become fully initialized while its parent class is still being initialized.
! if (accessing_method->is_class_initializer()) {
return false;
}
}
ciMethod* root = method(); // the root method of compilation
if (root != accessing_method) {
// (0) superklass is java.lang.Object (can occur in reflective code)
// (1) subklass is already limited to a subtype of superklass => always ok
// (2) subklass does not overlap with superklass => always fail
// (3) superklass has NO subtypes and we can check with a simple compare.
int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
! if (StressReflectiveCode) {
return SSC_full_test; // Let caller generate the general case.
}
if (superk == env()->Object_klass()) {
return SSC_always_true; // (0) this test cannot fail
}
ciType* superelem = superk;
! if (superelem->is_array_klass())
superelem = superelem->as_array_klass()->base_element_type();
if (!subk->is_interface()) { // cannot trust static interface types yet
if (subk->is_subtype_of(superk)) {
return SSC_always_true; // (1) false path dead; no dynamic test needed
}
// (0) superklass is java.lang.Object (can occur in reflective code)
// (1) subklass is already limited to a subtype of superklass => always ok
// (2) subklass does not overlap with superklass => always fail
// (3) superklass has NO subtypes and we can check with a simple compare.
int Compile::static_subtype_check(ciKlass* superk, ciKlass* subk) {
! if (StressReflectiveCode || superk == NULL || subk == NULL) {
return SSC_full_test; // Let caller generate the general case.
}
if (superk == env()->Object_klass()) {
return SSC_always_true; // (0) this test cannot fail
}
ciType* superelem = superk;
! if (superelem->is_array_klass()) {
+ ciArrayKlass* ak = superelem->as_array_klass();
superelem = superelem->as_array_klass()->base_element_type();
+ }
if (!subk->is_interface()) { // cannot trust static interface types yet
if (subk->is_subtype_of(superk)) {
return SSC_always_true; // (1) false path dead; no dynamic test needed
}
igvn.check_no_speculative_types();
#endif
}
}
+ Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
+ const TypeInstPtr* ta = phase->type(a)->isa_instptr();
+ const TypeInstPtr* tb = phase->type(b)->isa_instptr();
+ if (!EnableValhalla || ta == NULL || tb == NULL ||
+ ta->is_zero_type() || tb->is_zero_type() ||
+ !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
+ // Use old acmp if one operand is null or not an inline type
+ return new CmpPNode(a, b);
+ } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
+ // We know that one operand is an inline type. Therefore,
+ // new acmp will only return true if both operands are NULL.
+ // Check if both operands are null by or'ing the oops.
+ a = phase->transform(new CastP2XNode(NULL, a));
+ b = phase->transform(new CastP2XNode(NULL, b));
+ a = phase->transform(new OrXNode(a, b));
+ return new CmpXNode(a, phase->MakeConX(0));
+ }
+ // Use new acmp
+ return NULL;
+ }
+
// Auxiliary method to support randomized stressing/fuzzing.
//
// This method can be called the arbitrary number of times, with current count
// as the argument. The logic allows selecting a single candidate from the
// running list of candidates as follows:
< prev index next >