< prev index next > src/hotspot/share/opto/macro.cpp
Print this page
/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
* questions.
*
*/
#include "precompiled.hpp"
+ #include "ci/ciFlatArrayKlass.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "libadt/vectset.hpp"
#include "memory/universe.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/convertnode.hpp"
#include "opto/graphKit.hpp"
+ #include "opto/inlinetypenode.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/locknode.hpp"
#include "opto/loopnode.hpp"
#include "opto/macro.hpp"
#include "opto/memnode.hpp"
}
}
return nreplacements;
}
- void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
- assert(old != NULL, "sanity");
- for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
- Node* use = old->fast_out(i);
- _igvn.rehash_node_delayed(use);
- imax -= replace_input(use, old, target);
- // back up iterator
- --i;
- }
- assert(old->outcnt() == 0, "all uses must be deleted");
- }
-
- void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) {
- // Copy debug information and adjust JVMState information
- uint old_dbg_start = oldcall->tf()->domain()->cnt();
- uint new_dbg_start = newcall->tf()->domain()->cnt();
- int jvms_adj = new_dbg_start - old_dbg_start;
- assert (new_dbg_start == newcall->req(), "argument count mismatch");
-
- // SafePointScalarObject node could be referenced several times in debug info.
- // Use Dict to record cloned nodes.
- Dict* sosn_map = new Dict(cmpkey,hashkey);
- for (uint i = old_dbg_start; i < oldcall->req(); i++) {
- Node* old_in = oldcall->in(i);
- // Clone old SafePointScalarObjectNodes, adjusting their field contents.
- if (old_in != NULL && old_in->is_SafePointScalarObject()) {
- SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
- uint old_unique = C->unique();
- Node* new_in = old_sosn->clone(sosn_map);
- if (old_unique != C->unique()) { // New node?
- new_in->set_req(0, C->root()); // reset control edge
- new_in = transform_later(new_in); // Register new node.
- }
- old_in = new_in;
- }
- newcall->add_req(old_in);
- }
-
- // JVMS may be shared so clone it before we modify it
- newcall->set_jvms(oldcall->jvms() != NULL ? oldcall->jvms()->clone_deep(C) : NULL);
- for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) {
- jvms->set_map(newcall);
- jvms->set_locoff(jvms->locoff()+jvms_adj);
- jvms->set_stkoff(jvms->stkoff()+jvms_adj);
- jvms->set_monoff(jvms->monoff()+jvms_adj);
- jvms->set_scloff(jvms->scloff()+jvms_adj);
- jvms->set_endoff(jvms->endoff()+jvms_adj);
- }
- }
-
Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) {
Node* cmp;
if (mask != 0) {
Node* and_node = transform_later(new AndXNode(word, MakeConX(mask)));
cmp = transform_later(new CmpXNode(and_node, MakeConX(bits)));
// Slow path call has no side-effects, uses few values
copy_predefined_input_for_runtime_call(slow_path, oldcall, call );
if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0);
if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1);
if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2);
- copy_call_debug_info(oldcall, call);
+ call->copy_call_debug_info(&_igvn, oldcall);
call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
_igvn.replace_node(oldcall, call);
transform_later(call);
return call;
} else if (mem->is_Store()) {
const TypePtr* atype = mem->as_Store()->adr_type();
int adr_idx = phase->C->get_alias_index(atype);
if (adr_idx == alias_idx) {
assert(atype->isa_oopptr(), "address type must be oopptr");
- int adr_offset = atype->offset();
+ int adr_offset = atype->flattened_offset();
uint adr_iid = atype->is_oopptr()->instance_id();
// Array elements references have the same alias_idx
// but different offset and different instance_id.
if (adr_offset == offset && adr_iid == alloc->_idx)
return mem;
DEBUG_ONLY(mem->dump();)
assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
return NULL;
}
mem = mem->in(MemNode::Memory);
- } else if (mem->Opcode() == Op_StrInflatedCopy) {
+ } else if (mem->Opcode() == Op_StrInflatedCopy) {
Node* adr = mem->in(3); // Destination array
const TypePtr* atype = adr->bottom_type()->is_ptr();
int adr_idx = phase->C->get_alias_index(atype);
if (adr_idx == alias_idx) {
DEBUG_ONLY(mem->dump();)
Node* dest_pos = ac->in(ArrayCopyNode::DestPos);
const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int();
const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int();
Node* adr = NULL;
- const TypePtr* adr_type = NULL;
+ Node* base = ac->in(ArrayCopyNode::Src);
+ const TypePtr* adr_type = _igvn.type(base)->is_ptr();
+ assert(adr_type->isa_aryptr(), "only arrays here");
if (src_pos_t->is_con() && dest_pos_t->is_con()) {
intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset;
- Node* base = ac->in(ArrayCopyNode::Src);
- adr = _igvn.transform(new AddPNode(base, base, MakeConX(off)));
+ adr = _igvn.transform(new AddPNode(base, base, MakeConX(off)));
+ adr_type = _igvn.type(adr)->is_ptr();
assert(adr_type == _igvn.type(base)->is_aryptr()->add_field_offset_and_offset(off), "incorrect address type");
if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
// Don't emit a new load from src if src == dst but try to get the value from memory instead
return value_from_mem(ac->in(TypeFunc::Memory), ctl, ft, ftype, adr_type->isa_oopptr(), alloc);
}
} else {
+ if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
+ // Non constant offset in the array: we can't statically
+ // determine the value
+ return NULL;
+ }
Node* diff = _igvn.transform(new SubINode(ac->in(ArrayCopyNode::SrcPos), ac->in(ArrayCopyNode::DestPos)));
#ifdef _LP64
diff = _igvn.transform(new ConvI2LNode(diff));
#endif
diff = _igvn.transform(new LShiftXNode(diff, intcon(shift)));
Node* off = _igvn.transform(new AddXNode(MakeConX(offset), diff));
- Node* base = ac->in(ArrayCopyNode::Src);
- adr = _igvn.transform(new AddPNode(base, base, off));
- adr_type = _igvn.type(base)->is_ptr()->add_offset(Type::OffsetBot);
- if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) {
- // Non constant offset in the array: we can't statically
- // determine the value
- return NULL;
+ adr = _igvn.transform(new AddPNode(base, base, off));
+ // In the case of a flattened inline type array, each field has its
+ // own slice so we need to extract the field being accessed from
+ // the address computation
+ adr_type = adr_type->is_aryptr()->add_field_offset_and_offset(offset)->add_offset(Type::OffsetBot);
adr = _igvn.transform(new CastPPNode(adr, adr_type));
}
MergeMemNode* mergemen = MergeMemNode::make(mem);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt);
}
}
if (res != NULL) {
if (ftype->isa_narrowoop()) {
// PhaseMacroExpand::scalar_replacement adds DecodeN nodes
+ assert(res->isa_DecodeN(), "should be narrow oop");
res = _igvn.transform(new EncodePNode(res, ftype));
}
return res;
}
return NULL;
// Note: this function is recursive, its depth is limited by the "level" argument
// Returns the computed Phi, or NULL if it cannot compute it.
Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) {
assert(mem->is_Phi(), "sanity");
int alias_idx = C->get_alias_index(adr_t);
- int offset = adr_t->offset();
+ int offset = adr_t->flattened_offset();
int instance_id = adr_t->instance_id();
// Check if an appropriate value phi already exists.
Node* region = mem->in(0);
for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) {
values.at_put(j, in);
} else {
Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn);
if (val == start_mem || val == alloc_mem) {
// hit a sentinel, return appropriate 0 value
- values.at_put(j, _igvn.zerocon(ft));
+ Node* default_value = alloc->in(AllocateNode::DefaultValue);
+ if (default_value != NULL) {
+ values.at_put(j, default_value);
+ } else {
+ assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
+ values.at_put(j, _igvn.zerocon(ft));
+ }
continue;
}
if (val->is_Initialize()) {
val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
}
Node* n = val->in(MemNode::ValueIn);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
n = bs->step_over_gc_barrier(n);
values.at_put(j, n);
} else if(val->is_Proj() && val->in(0) == alloc) {
- values.at_put(j, _igvn.zerocon(ft));
+ Node* default_value = alloc->in(AllocateNode::DefaultValue);
+ if (default_value != NULL) {
+ values.at_put(j, default_value);
+ } else {
+ assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
+ values.at_put(j, _igvn.zerocon(ft));
+ }
} else if (val->is_Phi()) {
val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1);
if (val == NULL) {
return NULL;
}
assert(adr_t->is_known_instance_field(), "instance required");
int instance_id = adr_t->instance_id();
assert((uint)instance_id == alloc->_idx, "wrong allocation");
int alias_idx = C->get_alias_index(adr_t);
- int offset = adr_t->offset();
+ int offset = adr_t->flattened_offset();
Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
- Node *alloc_ctrl = alloc->in(TypeFunc::Control);
Node *alloc_mem = alloc->in(TypeFunc::Memory);
VectorSet visited;
bool done = sfpt_mem == alloc_mem;
Node *mem = sfpt_mem;
if (mem == start_mem || mem == alloc_mem) {
done = true; // hit a sentinel, return appropriate 0 value
} else if (mem->is_Initialize()) {
mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn);
if (mem == NULL) {
- done = true; // Something go wrong.
+ done = true; // Something went wrong.
} else if (mem->is_Store()) {
const TypePtr* atype = mem->as_Store()->adr_type();
assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice");
done = true;
}
} else if (mem->is_Store()) {
const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr();
assert(atype != NULL, "address type must be oopptr");
assert(C->get_alias_index(atype) == alias_idx &&
- atype->is_known_instance_field() && atype->offset() == offset &&
+ atype->is_known_instance_field() && atype->flattened_offset() == offset &&
atype->instance_id() == instance_id, "store is correct memory slice");
done = true;
} else if (mem->is_Phi()) {
// try to find a phi's unique input
Node *unique_input = NULL;
}
}
if (mem != NULL) {
if (mem == start_mem || mem == alloc_mem) {
// hit a sentinel, return appropriate 0 value
+ Node* default_value = alloc->in(AllocateNode::DefaultValue);
+ if (default_value != NULL) {
+ return default_value;
+ }
+ assert(alloc->in(AllocateNode::RawDefaultValue) == NULL, "default value may not be null");
return _igvn.zerocon(ft);
} else if (mem->is_Store()) {
Node* n = mem->in(MemNode::ValueIn);
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
n = bs->step_over_gc_barrier(n);
m = sfpt_mem;
}
return make_arraycopy_load(mem->as_ArrayCopy(), offset, ctl, m, ft, ftype, alloc);
}
}
- // Something go wrong.
+ // Something went wrong.
return NULL;
}
+ // Search the last value stored into the inline type's fields.
+ Node* PhaseMacroExpand::inline_type_from_mem(Node* mem, Node* ctl, ciInlineKlass* vk, const TypeAryPtr* adr_type, int offset, AllocateNode* alloc) {
+ // Subtract the offset of the first field to account for the missing oop header
+ offset -= vk->first_field_offset();
+ // Create a new InlineTypeNode and retrieve the field values from memory
+ InlineTypeNode* vt = InlineTypeNode::make_uninitialized(_igvn, vk)->as_InlineType();
+ for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) {
+ ciType* field_type = vt->field_type(i);
+ int field_offset = offset + vt->field_offset(i);
+ // Each inline type field has its own memory slice
+ adr_type = adr_type->with_field_offset(field_offset);
+ Node* value = NULL;
+ if (vt->field_is_flattened(i)) {
+ value = inline_type_from_mem(mem, ctl, field_type->as_inline_klass(), adr_type, field_offset, alloc);
+ } else {
+ const Type* ft = Type::get_const_type(field_type);
+ BasicType bt = field_type->basic_type();
+ if (UseCompressedOops && !is_java_primitive(bt)) {
+ ft = ft->make_narrowoop();
+ bt = T_NARROWOOP;
+ }
+ value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc);
+ if (value != NULL && ft->isa_narrowoop()) {
+ assert(UseCompressedOops, "unexpected narrow oop");
+ value = transform_later(new DecodeNNode(value, value->get_ptr_type()));
+ }
+ }
+ if (value != NULL) {
+ vt->set_field_value(i, value);
+ } else {
+ // We might have reached the TrackedInitializationLimit
+ return NULL;
+ }
+ }
+ return transform_later(vt);
+ }
+
// Check the possibility of scalar replacement.
bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) {
// Scan the uses of the allocation to check for anything that would
// prevent us from eliminating it.
NOT_PRODUCT( const char* fail_eliminate = NULL; )
SHENANDOAHGC_ONLY(&& (!UseShenandoahGC || !ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(n))) ) {
DEBUG_ONLY(disq_node = n;)
if (n->is_Load() || n->is_LoadStore()) {
NOT_PRODUCT(fail_eliminate = "Field load";)
} else {
- NOT_PRODUCT(fail_eliminate = "Not store field referrence";)
+ NOT_PRODUCT(fail_eliminate = "Not store field reference";)
}
can_eliminate = false;
}
}
} else if (use->is_ArrayCopy() &&
NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";)
can_eliminate = false;
} else {
safepoints.append_if_missing(sfpt);
}
+ } else if (use->is_InlineType() && use->isa_InlineType()->get_oop() == res) {
+ // ok to eliminate
+ } else if (use->Opcode() == Op_StoreX && use->in(MemNode::Address) == res) {
+ // store to mark work
} else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark
if (use->is_Phi()) {
if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) {
NOT_PRODUCT(fail_eliminate = "Object is return value";)
} else {
}
DEBUG_ONLY(disq_node = use;)
} else {
if (use->Opcode() == Op_Return) {
NOT_PRODUCT(fail_eliminate = "Object is return value";)
- }else {
+ } else {
NOT_PRODUCT(fail_eliminate = "Object is referenced by node";)
}
DEBUG_ONLY(disq_node = use;)
}
can_eliminate = false;
+ } else {
+ assert(use->Opcode() == Op_CastP2X, "should be");
+ assert(!use->has_out_with(Op_OrL), "should have been removed because oop is never null");
}
}
}
#ifndef PRODUCT
// find the array's elements which will be needed for safepoint debug information
nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1);
assert(klass->is_array_klass() && nfields >= 0, "must be an array klass.");
elem_type = klass->as_array_klass()->element_type();
basic_elem_type = elem_type->basic_type();
+ if (elem_type->is_inlinetype() && !klass->is_flat_array_klass()) {
+ assert(basic_elem_type == T_INLINE_TYPE, "unexpected element basic type");
+ basic_elem_type = T_OBJECT;
+ }
array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
element_size = type2aelembytes(basic_elem_type);
+ if (klass->is_flat_array_klass()) {
+ // Flattened inline type array
+ element_size = klass->as_flat_array_klass()->element_byte_size();
+ }
}
}
//
// Process the safepoint uses
//
+ Unique_Node_List value_worklist;
while (safepoints.length() > 0) {
SafePointNode* sfpt = safepoints.pop();
Node* mem = sfpt->memory();
Node* ctl = sfpt->control();
assert(sfpt->jvms() != NULL, "missed JVMS");
if (iklass != NULL) {
field = iklass->nonstatic_field_at(j);
offset = field->offset();
elem_type = field->type();
basic_elem_type = field->layout_type();
+ assert(!field->is_flattened(), "flattened inline type fields should not have safepoint uses");
} else {
offset = array_base + j * (intptr_t)element_size;
}
const Type *field_type;
}
} else {
field_type = Type::get_const_basic_type(basic_elem_type);
}
- const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr();
-
- Node *field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc);
+ Node* field_val = NULL;
+ const TypeOopPtr* field_addr_type = res_type->add_offset(offset)->isa_oopptr();
+ if (klass->is_flat_array_klass()) {
+ ciInlineKlass* vk = elem_type->as_inline_klass();
+ assert(vk->flatten_array(), "must be flattened");
+ field_val = inline_type_from_mem(mem, ctl, vk, field_addr_type->isa_aryptr(), 0, alloc);
+ } else {
+ field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc);
+ }
if (field_val == NULL) {
// We weren't able to find a value for this field,
// give up on eliminating this allocation.
// Remove any extra entries we added to the safepoint.
res->dump();
}
#endif
return false;
}
- if (UseCompressedOops && field_type->isa_narrowoop()) {
+ if (field_val->is_InlineType()) {
+ // Keep track of inline types to scalarize them later
+ value_worklist.push(field_val);
+ } else if (UseCompressedOops && field_type->isa_narrowoop()) {
// Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation
// to be able scalar replace the allocation.
if (field_val->is_EncodeP()) {
field_val = field_val->in(1);
} else {
int end = jvms->debug_end();
sfpt->replace_edges_in_range(res, sobj, start, end);
_igvn._worklist.push(sfpt);
safepoints_done.append_if_missing(sfpt); // keep it for rollback
}
+ // Scalarize inline types that were added to the safepoint
+ for (uint i = 0; i < value_worklist.size(); ++i) {
+ Node* vt = value_worklist.at(i);
+ vt->as_InlineType()->make_scalar_in_safepoints(&_igvn);
+ }
return true;
}
static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) {
Node* ctl_proj = n->proj_out_or_null(TypeFunc::Control);
igvn.replace_node(mem_proj, n->in(TypeFunc::Memory));
}
}
// Process users of eliminated allocation.
- void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
+ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc, bool inline_alloc) {
Node* res = alloc->result_cast();
if (res != NULL) {
for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) {
Node *use = res->last_out(j);
uint oc1 = res->outcnt();
if (use->is_AddP()) {
for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) {
Node *n = use->last_out(k);
uint oc2 = use->outcnt();
if (n->is_Store()) {
- #ifdef ASSERT
- // Verify that there is no dependent MemBarVolatile nodes,
- // they should be removed during IGVN, see MemBarNode::Ideal().
- for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
- p < pmax; p++) {
- Node* mb = n->fast_out(p);
- assert(mb->is_Initialize() || !mb->is_MemBar() ||
- mb->req() <= MemBarNode::Precedent ||
- mb->in(MemBarNode::Precedent) != n,
- "MemBarVolatile should be eliminated for non-escaping object");
+ for (DUIterator_Fast pmax, p = n->fast_outs(pmax); p < pmax; p++) {
+ MemBarNode* mb = n->fast_out(p)->isa_MemBar();
+ if (mb != NULL && mb->req() <= MemBarNode::Precedent && mb->in(MemBarNode::Precedent) == n) {
+ // MemBarVolatiles should have been removed by MemBarNode::Ideal() for non-inline allocations
+ assert(inline_alloc, "MemBarVolatile should be eliminated for non-escaping object");
+ mb->remove(&_igvn);
+ }
}
- #endif
_igvn.replace_node(n, n->in(MemNode::Memory));
} else {
eliminate_gc_barrier(n);
}
k -= (oc2 - use->outcnt());
}
} else {
assert(ac->is_arraycopy_validated() ||
ac->is_copyof_validated() ||
ac->is_copyofrange_validated(), "unsupported");
- CallProjections callprojs;
- ac->extract_projections(&callprojs, true);
+ CallProjections* callprojs = ac->extract_projections(true);
- _igvn.replace_node(callprojs.fallthrough_ioproj, ac->in(TypeFunc::I_O));
- _igvn.replace_node(callprojs.fallthrough_memproj, ac->in(TypeFunc::Memory));
- _igvn.replace_node(callprojs.fallthrough_catchproj, ac->in(TypeFunc::Control));
+ _igvn.replace_node(callprojs->fallthrough_ioproj, ac->in(TypeFunc::I_O));
+ _igvn.replace_node(callprojs->fallthrough_memproj, ac->in(TypeFunc::Memory));
+ _igvn.replace_node(callprojs->fallthrough_catchproj, ac->in(TypeFunc::Control));
// Set control to top. IGVN will remove the remaining projections
ac->set_req(0, top());
ac->replace_edge(res, top());
if (src->outcnt() == 0 && !src->is_top()) {
_igvn.remove_dead_node(src);
}
}
_igvn._worklist.push(ac);
+ } else if (use->is_InlineType()) {
+ assert(use->isa_InlineType()->get_oop() == res, "unexpected inline type use");
+ _igvn.rehash_node_delayed(use);
+ use->isa_InlineType()->set_oop(_igvn.zerocon(T_INLINE_TYPE));
+ } else if (use->is_Store()) {
+ _igvn.replace_node(use, use->in(MemNode::Memory));
} else {
eliminate_gc_barrier(use);
}
j -= (oc1 - res->outcnt());
}
// Eliminate Initialize node.
InitializeNode *init = use->as_Initialize();
assert(init->outcnt() <= 2, "only a control and memory projection expected");
Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control);
if (ctrl_proj != NULL) {
+ // Inline type buffer allocations are followed by a membar
+ Node* membar_after = ctrl_proj->unique_ctrl_out();
+ if (inline_alloc && membar_after->Opcode() == Op_MemBarCPUOrder) {
+ membar_after->as_MemBar()->remove(&_igvn);
+ }
_igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control));
#ifdef ASSERT
Node* tmp = init->in(TypeFunc::Control);
assert(tmp == _fallthroughcatchproj, "allocation control projection");
#endif
assert(mem == _memproj_fallthrough, "allocation memory projection");
}
#endif
_igvn.replace_node(mem_proj, mem);
}
+ } else if (use->Opcode() == Op_MemBarStoreStore) {
+ // Inline type buffer allocations are followed by a membar
+ assert(inline_alloc, "Unexpected MemBarStoreStore");
+ use->as_MemBar()->remove(&_igvn);
} else {
assert(false, "only Initialize or AddP expected");
}
j -= (oc1 - _resproj->outcnt());
}
bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
// Don't do scalar replacement if the frame can be popped by JVMTI:
// if reallocation fails during deoptimization we'll pop all
// interpreter frames for this compiled frame and that won't play
// nice with JVMTI popframe.
- if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
+ if (!EliminateAllocations || JvmtiExport::can_pop_frame()) {
return false;
}
Node* klass = alloc->in(AllocateNode::KlassNode);
const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
- Node* res = alloc->result_cast();
+
+ // Attempt to eliminate inline type buffer allocations
+ // regardless of usage and escape/replaceable status.
+ bool inline_alloc = tklass->klass()->is_inlinetype();
+ if (!alloc->_is_non_escaping && !inline_alloc) {
+ return false;
+ }
// Eliminate boxing allocations which are not used
- // regardless scalar replacable status.
- bool boxing_alloc = C->eliminate_boxing() &&
- tklass->klass()->is_instance_klass() &&
+ // regardless of scalar replaceable status.
+ Node* res = alloc->result_cast();
+ bool boxing_alloc = (res == NULL) && C->eliminate_boxing() &&
+ tklass->klass()->is_instance_klass() &&
tklass->klass()->as_instance_klass()->is_box_klass();
- if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) {
+ if (!alloc->_is_scalar_replaceable && !boxing_alloc && !inline_alloc) {
return false;
}
extract_call_projections(alloc);
if (!can_eliminate_allocation(alloc, safepoints)) {
return false;
}
if (!alloc->_is_scalar_replaceable) {
- assert(res == NULL, "sanity");
+ assert(res == NULL || inline_alloc, "sanity");
// We can only eliminate allocation if all debug info references
// are already replaced with SafePointScalarObject because
// we can't search for a fields value without instance_id.
if (safepoints.length() > 0) {
+ assert(!inline_alloc, "Inline type allocations should not have safepoint uses");
return false;
}
}
if (!scalar_replacement(alloc, safepoints)) {
p = p->caller();
}
log->tail("eliminate_allocation");
}
- process_users_of_allocation(alloc);
+ process_users_of_allocation(alloc, inline_alloc);
#ifndef PRODUCT
if (PrintEliminateAllocations) {
if (alloc->is_AllocateArray())
tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
assert(boxing->result_cast() == NULL, "unexpected boxing node result");
extract_call_projections(boxing);
- const TypeTuple* r = boxing->tf()->range();
+ const TypeTuple* r = boxing->tf()->range_sig();
assert(r->cnt() > TypeFunc::Parms, "sanity");
const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr();
assert(t != NULL, "sanity");
CompileLog* log = C->log();
slow_region = new RegionNode(3);
// Now make the initial failure test. Usually a too-big test but
// might be a TRUE for finalizers or a fancy class check for
// newInstance0.
- IfNode *toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
+ IfNode* toobig_iff = new IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
transform_later(toobig_iff);
// Plug the failing-too-big test into the slow-path region
- Node *toobig_true = new IfTrueNode( toobig_iff );
+ Node* toobig_true = new IfTrueNode(toobig_iff);
transform_later(toobig_true);
slow_region ->init_req( too_big_or_final_path, toobig_true );
- toobig_false = new IfFalseNode( toobig_iff );
+ toobig_false = new IfFalseNode(toobig_iff);
transform_later(toobig_false);
} else {
// No initial test, just fall into next case
assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
toobig_false = ctrl;
result_phi_i_o->init_req(slow_result_path, i_o);
// Name successful fast-path variables
Node* fast_oop_ctrl;
Node* fast_oop_rawmem;
+
if (allocation_has_use) {
Node* needgc_ctrl = NULL;
result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
call->init_req(TypeFunc::Parms+0, klass_node);
if (length != NULL) {
call->init_req(TypeFunc::Parms+1, length);
+ } else {
+ // Let the runtime know if this is a larval allocation
+ call->init_req(TypeFunc::Parms+1, _igvn.intcon(alloc->_larval));
}
// Copy debug information and adjust JVMState information, then replace
// allocate node with the call
- copy_call_debug_info((CallNode *) alloc, call);
+ call->copy_call_debug_info(&_igvn, alloc);
if (expand_fast_path) {
call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON.
} else {
// Hook i_o projection to avoid its elimination during allocation
// replacement (when only a slow call is generated).
// An allocate node has separate memory projections for the uses on
// the control and i_o paths. Replace the control memory projection with
// result_phi_rawmem (unless we are only generating a slow call when
// both memory projections are combined)
if (expand_fast_path && _memproj_fallthrough != NULL) {
- migrate_outs(_memproj_fallthrough, result_phi_rawmem);
+ _igvn.replace_in_uses(_memproj_fallthrough, result_phi_rawmem);
}
// Now change uses of _memproj_catchall to use _memproj_fallthrough and delete
// _memproj_catchall so we end up with a call that has only 1 memory projection.
- if (_memproj_catchall != NULL ) {
+ if (_memproj_catchall != NULL) {
if (_memproj_fallthrough == NULL) {
_memproj_fallthrough = new ProjNode(call, TypeFunc::Memory);
transform_later(_memproj_fallthrough);
}
- migrate_outs(_memproj_catchall, _memproj_fallthrough);
+ _igvn.replace_in_uses(_memproj_catchall, _memproj_fallthrough);
_igvn.remove_dead_node(_memproj_catchall);
}
// An allocate node has separate i_o projections for the uses on the control
// and i_o paths. Always replace the control i_o projection with result i_o
// otherwise incoming i_o become dead when only a slow call is generated
// (it is different from memory projections where both projections are
// combined in such case).
if (_ioproj_fallthrough != NULL) {
- migrate_outs(_ioproj_fallthrough, result_phi_i_o);
+ _igvn.replace_in_uses(_ioproj_fallthrough, result_phi_i_o);
}
// Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete
// _ioproj_catchall so we end up with a call that has only 1 i_o projection.
- if (_ioproj_catchall != NULL ) {
+ if (_ioproj_catchall != NULL) {
if (_ioproj_fallthrough == NULL) {
_ioproj_fallthrough = new ProjNode(call, TypeFunc::I_O);
transform_later(_ioproj_fallthrough);
}
- migrate_outs(_ioproj_catchall, _ioproj_fallthrough);
+ _igvn.replace_in_uses(_ioproj_catchall, _ioproj_fallthrough);
_igvn.remove_dead_node(_ioproj_catchall);
}
// if we generated only a slow call, we are done
if (!expand_fast_path) {
}
assert(_resproj->outcnt() == 0, "all uses must be deleted");
_igvn.remove_dead_node(_resproj);
}
if (_fallthroughcatchproj != NULL) {
- migrate_outs(_fallthroughcatchproj, ctrl);
+ _igvn.replace_in_uses(_fallthroughcatchproj, ctrl);
_igvn.remove_dead_node(_fallthroughcatchproj);
}
if (_catchallcatchproj != NULL) {
_igvn.rehash_node_delayed(_catchallcatchproj);
_catchallcatchproj->set_req(0, top());
Node* catchnode = _fallthroughproj->unique_ctrl_out();
_igvn.remove_dead_node(catchnode);
_igvn.remove_dead_node(_fallthroughproj);
}
if (_memproj_fallthrough != NULL) {
- migrate_outs(_memproj_fallthrough, mem);
+ _igvn.replace_in_uses(_memproj_fallthrough, mem);
_igvn.remove_dead_node(_memproj_fallthrough);
}
if (_ioproj_fallthrough != NULL) {
- migrate_outs(_ioproj_fallthrough, i_o);
+ _igvn.replace_in_uses(_ioproj_fallthrough, i_o);
_igvn.remove_dead_node(_ioproj_fallthrough);
}
if (_memproj_catchall != NULL) {
_igvn.rehash_node_delayed(_memproj_catchall);
_memproj_catchall->set_req(0, top());
}
}
// Helper for PhaseMacroExpand::expand_allocate_common.
// Initializes the newly-allocated storage.
- Node*
- PhaseMacroExpand::initialize_object(AllocateNode* alloc,
- Node* control, Node* rawmem, Node* object,
- Node* klass_node, Node* length,
- Node* size_in_bytes) {
+ Node* PhaseMacroExpand::initialize_object(AllocateNode* alloc,
+ Node* control, Node* rawmem, Node* object,
+ Node* klass_node, Node* length,
+ Node* size_in_bytes) {
InitializeNode* init = alloc->initialization();
// Store the klass & mark bits
- Node* mark_node = alloc->make_ideal_mark(&_igvn, object, control, rawmem);
+ Node* mark_node = alloc->make_ideal_mark(&_igvn, control, rawmem);
if (!mark_node->is_Con()) {
transform_later(mark_node);
}
rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, TypeX_X->basic_type());
// there can be two Allocates to one Initialize. The answer in all these
// edge cases is safety first. It is always safe to clear immediately
// within an Allocate, and then (maybe or maybe not) clear some more later.
if (!(UseTLAB && ZeroTLAB)) {
rawmem = ClearArrayNode::clear_memory(control, rawmem, object,
+ alloc->in(AllocateNode::DefaultValue),
+ alloc->in(AllocateNode::RawDefaultValue),
header_size, size_in_bytes,
&_igvn);
}
} else {
if (!init->is_complete()) {
if (!alock->is_eliminated()) {
return false;
}
#ifdef ASSERT
+ const Type* obj_type = _igvn.type(alock->obj_node());
+ assert(!obj_type->isa_inlinetype() && !obj_type->is_inlinetypeptr(), "Eliminating lock on inline type");
if (!alock->is_coarsened()) {
// Check that new "eliminated" BoxLock node is created.
BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
assert(oldbox->is_eliminated(), "should be done already");
}
// Optimize test; set region slot 2
slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0);
mem_phi->init_req(2, mem);
}
+ const TypeOopPtr* objptr = _igvn.type(obj)->make_oopptr();
+ if (objptr->can_be_inline_type()) {
+ // Deoptimize and re-execute if a value
+ assert(EnableValhalla, "should only be used if inline types are enabled");
+ Node* mark = make_load(slow_path, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type());
+ Node* value_mask = _igvn.MakeConX(markWord::always_locked_pattern);
+ Node* is_value = _igvn.transform(new AndXNode(mark, value_mask));
+ Node* cmp = _igvn.transform(new CmpXNode(is_value, value_mask));
+ Node* bol = _igvn.transform(new BoolNode(cmp, BoolTest::eq));
+ Node* unc_ctrl = generate_slow_guard(&slow_path, bol, NULL);
+
+ int trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_class_check, Deoptimization::Action_none);
+ address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point();
+ const TypePtr* no_memory_effects = NULL;
+ JVMState* jvms = lock->jvms();
+ CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap",
+ jvms->bci(), no_memory_effects);
+
+ unc->init_req(TypeFunc::Control, unc_ctrl);
+ unc->init_req(TypeFunc::I_O, lock->i_o());
+ unc->init_req(TypeFunc::Memory, mem); // may gc ptrs
+ unc->init_req(TypeFunc::FramePtr, lock->in(TypeFunc::FramePtr));
+ unc->init_req(TypeFunc::ReturnAdr, lock->in(TypeFunc::ReturnAdr));
+ unc->init_req(TypeFunc::Parms+0, _igvn.intcon(trap_request));
+ unc->set_cnt(PROB_UNLIKELY_MAG(4));
+ unc->copy_call_debug_info(&_igvn, lock);
+
+ assert(unc->peek_monitor_box() == box, "wrong monitor");
+ assert(unc->peek_monitor_obj() == obj, "wrong monitor");
+
+ // pop monitor and push obj back on stack: we trap before the monitorenter
+ unc->pop_monitor();
+ unc->grow_stack(unc->jvms(), 1);
+ unc->set_stack(unc->jvms(), unc->jvms()->stk_size()-1, obj);
+
+ _igvn.register_new_node_with_optimizer(unc);
+
+ Node* ctrl = _igvn.transform(new ProjNode(unc, TypeFunc::Control));
+ Node* halt = _igvn.transform(new HaltNode(ctrl, lock->in(TypeFunc::FramePtr), "monitor enter on value-type"));
+ C->root()->add_req(halt);
+ }
+
// Make slow path call
CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(),
OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path,
obj, box, NULL);
mem_phi->init_req(2, mem);
transform_later(mem_phi);
_igvn.replace_node(_memproj_fallthrough, mem_phi);
}
+ // An inline type might be returned from the call but we don't know its
+ // type. Either we get a buffered inline type (and nothing needs to be done)
+ // or one of the inlines being returned is the klass of the inline type
+ // and we need to allocate an inline type instance of that type and
+ // initialize it with other values being returned. In that case, we
+ // first try a fast path allocation and initialize the value with the
+ // inline klass's pack handler or we fall back to a runtime call.
+ void PhaseMacroExpand::expand_mh_intrinsic_return(CallStaticJavaNode* call) {
+ assert(call->method()->is_method_handle_intrinsic(), "must be a method handle intrinsic call");
+ Node* ret = call->proj_out_or_null(TypeFunc::Parms);
+ if (ret == NULL) {
+ return;
+ }
+ const TypeFunc* tf = call->_tf;
+ const TypeTuple* domain = OptoRuntime::store_inline_type_fields_Type()->domain_cc();
+ const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain);
+ call->_tf = new_tf;
+ // Make sure the change of type is applied before projections are processed by igvn
+ _igvn.set_type(call, call->Value(&_igvn));
+ _igvn.set_type(ret, ret->Value(&_igvn));
+
+ // Before any new projection is added:
+ CallProjections* projs = call->extract_projections(true, true);
+
+ Node* ctl = new Node(1);
+ Node* mem = new Node(1);
+ Node* io = new Node(1);
+ Node* ex_ctl = new Node(1);
+ Node* ex_mem = new Node(1);
+ Node* ex_io = new Node(1);
+ Node* res = new Node(1);
+
+ Node* cast = transform_later(new CastP2XNode(ctl, res));
+ Node* mask = MakeConX(0x1);
+ Node* masked = transform_later(new AndXNode(cast, mask));
+ Node* cmp = transform_later(new CmpXNode(masked, mask));
+ Node* bol = transform_later(new BoolNode(cmp, BoolTest::eq));
+ IfNode* allocation_iff = new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN);
+ transform_later(allocation_iff);
+ Node* allocation_ctl = transform_later(new IfTrueNode(allocation_iff));
+ Node* no_allocation_ctl = transform_later(new IfFalseNode(allocation_iff));
+
+ Node* no_allocation_res = transform_later(new CheckCastPPNode(no_allocation_ctl, res, TypeInstPtr::BOTTOM));
+
+ Node* mask2 = MakeConX(-2);
+ Node* masked2 = transform_later(new AndXNode(cast, mask2));
+ Node* rawklassptr = transform_later(new CastX2PNode(masked2));
+ Node* klass_node = transform_later(new CheckCastPPNode(allocation_ctl, rawklassptr, TypeKlassPtr::OBJECT_OR_NULL));
+
+ Node* slowpath_bol = NULL;
+ Node* top_adr = NULL;
+ Node* old_top = NULL;
+ Node* new_top = NULL;
+ if (UseTLAB) {
+ Node* end_adr = NULL;
+ set_eden_pointers(top_adr, end_adr);
+ Node* end = make_load(ctl, mem, end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
+ old_top = new LoadPNode(ctl, mem, top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
+ transform_later(old_top);
+ Node* layout_val = make_load(NULL, mem, klass_node, in_bytes(Klass::layout_helper_offset()), TypeInt::INT, T_INT);
+ Node* size_in_bytes = ConvI2X(layout_val);
+ new_top = new AddPNode(top(), old_top, size_in_bytes);
+ transform_later(new_top);
+ Node* slowpath_cmp = new CmpPNode(new_top, end);
+ transform_later(slowpath_cmp);
+ slowpath_bol = new BoolNode(slowpath_cmp, BoolTest::ge);
+ transform_later(slowpath_bol);
+ } else {
+ slowpath_bol = intcon(1);
+ top_adr = top();
+ old_top = top();
+ new_top = top();
+ }
+ IfNode* slowpath_iff = new IfNode(allocation_ctl, slowpath_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
+ transform_later(slowpath_iff);
+
+ Node* slowpath_true = new IfTrueNode(slowpath_iff);
+ transform_later(slowpath_true);
+
+ CallStaticJavaNode* slow_call = new CallStaticJavaNode(OptoRuntime::store_inline_type_fields_Type(),
+ StubRoutines::store_inline_type_fields_to_buf(),
+ "store_inline_type_fields",
+ call->jvms()->bci(),
+ TypePtr::BOTTOM);
+ slow_call->init_req(TypeFunc::Control, slowpath_true);
+ slow_call->init_req(TypeFunc::Memory, mem);
+ slow_call->init_req(TypeFunc::I_O, io);
+ slow_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
+ slow_call->init_req(TypeFunc::ReturnAdr, call->in(TypeFunc::ReturnAdr));
+ slow_call->init_req(TypeFunc::Parms, res);
+
+ Node* slow_ctl = transform_later(new ProjNode(slow_call, TypeFunc::Control));
+ Node* slow_mem = transform_later(new ProjNode(slow_call, TypeFunc::Memory));
+ Node* slow_io = transform_later(new ProjNode(slow_call, TypeFunc::I_O));
+ Node* slow_res = transform_later(new ProjNode(slow_call, TypeFunc::Parms));
+ Node* slow_catc = transform_later(new CatchNode(slow_ctl, slow_io, 2));
+ Node* slow_norm = transform_later(new CatchProjNode(slow_catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci));
+ Node* slow_excp = transform_later(new CatchProjNode(slow_catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci));
+
+ Node* ex_r = new RegionNode(3);
+ Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM);
+ Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO);
+ ex_r->init_req(1, slow_excp);
+ ex_mem_phi->init_req(1, slow_mem);
+ ex_io_phi->init_req(1, slow_io);
+ ex_r->init_req(2, ex_ctl);
+ ex_mem_phi->init_req(2, ex_mem);
+ ex_io_phi->init_req(2, ex_io);
+
+ transform_later(ex_r);
+ transform_later(ex_mem_phi);
+ transform_later(ex_io_phi);
+
+ Node* slowpath_false = new IfFalseNode(slowpath_iff);
+ transform_later(slowpath_false);
+ Node* rawmem = new StorePNode(slowpath_false, mem, top_adr, TypeRawPtr::BOTTOM, new_top, MemNode::unordered);
+ transform_later(rawmem);
+ Node* mark_node = makecon(TypeRawPtr::make((address)markWord::always_locked_prototype().value()));
+ rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS);
+ rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA);
+ if (UseCompressedClassPointers) {
+ rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT);
+ }
+ Node* fixed_block = make_load(slowpath_false, rawmem, klass_node, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
+ Node* pack_handler = make_load(slowpath_false, rawmem, fixed_block, in_bytes(InlineKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS);
+
+ CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_inline_type_Type(),
+ NULL,
+ "pack handler",
+ TypeRawPtr::BOTTOM);
+ handler_call->init_req(TypeFunc::Control, slowpath_false);
+ handler_call->init_req(TypeFunc::Memory, rawmem);
+ handler_call->init_req(TypeFunc::I_O, top());
+ handler_call->init_req(TypeFunc::FramePtr, call->in(TypeFunc::FramePtr));
+ handler_call->init_req(TypeFunc::ReturnAdr, top());
+ handler_call->init_req(TypeFunc::Parms, pack_handler);
+ handler_call->init_req(TypeFunc::Parms+1, old_top);
+
+ // We don't know how many values are returned. This assumes the
+ // worst case, that all available registers are used.
+ for (uint i = TypeFunc::Parms+1; i < domain->cnt(); i++) {
+ if (domain->field_at(i) == Type::HALF) {
+ slow_call->init_req(i, top());
+ handler_call->init_req(i+1, top());
+ continue;
+ }
+ Node* proj = transform_later(new ProjNode(call, i));
+ slow_call->init_req(i, proj);
+ handler_call->init_req(i+1, proj);
+ }
+
+ // We can safepoint at that new call
+ slow_call->copy_call_debug_info(&_igvn, call);
+ transform_later(slow_call);
+ transform_later(handler_call);
+
+ Node* handler_ctl = transform_later(new ProjNode(handler_call, TypeFunc::Control));
+ rawmem = transform_later(new ProjNode(handler_call, TypeFunc::Memory));
+ Node* slowpath_false_res = transform_later(new ProjNode(handler_call, TypeFunc::Parms));
+
+ MergeMemNode* slowpath_false_mem = MergeMemNode::make(mem);
+ slowpath_false_mem->set_memory_at(Compile::AliasIdxRaw, rawmem);
+ transform_later(slowpath_false_mem);
+
+ Node* r = new RegionNode(4);
+ Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
+ Node* io_phi = new PhiNode(r, Type::ABIO);
+ Node* res_phi = new PhiNode(r, TypeInstPtr::BOTTOM);
+
+ r->init_req(1, no_allocation_ctl);
+ mem_phi->init_req(1, mem);
+ io_phi->init_req(1, io);
+ res_phi->init_req(1, no_allocation_res);
+ r->init_req(2, slow_norm);
+ mem_phi->init_req(2, slow_mem);
+ io_phi->init_req(2, slow_io);
+ res_phi->init_req(2, slow_res);
+ r->init_req(3, handler_ctl);
+ mem_phi->init_req(3, slowpath_false_mem);
+ io_phi->init_req(3, io);
+ res_phi->init_req(3, slowpath_false_res);
+
+ transform_later(r);
+ transform_later(mem_phi);
+ transform_later(io_phi);
+ transform_later(res_phi);
+
+ assert(projs->nb_resproj == 1, "unexpected number of results");
+ _igvn.replace_in_uses(projs->fallthrough_catchproj, r);
+ _igvn.replace_in_uses(projs->fallthrough_memproj, mem_phi);
+ _igvn.replace_in_uses(projs->fallthrough_ioproj, io_phi);
+ _igvn.replace_in_uses(projs->resproj[0], res_phi);
+ _igvn.replace_in_uses(projs->catchall_catchproj, ex_r);
+ _igvn.replace_in_uses(projs->catchall_memproj, ex_mem_phi);
+ _igvn.replace_in_uses(projs->catchall_ioproj, ex_io_phi);
+
+ _igvn.replace_node(ctl, projs->fallthrough_catchproj);
+ _igvn.replace_node(mem, projs->fallthrough_memproj);
+ _igvn.replace_node(io, projs->fallthrough_ioproj);
+ _igvn.replace_node(res, projs->resproj[0]);
+ _igvn.replace_node(ex_ctl, projs->catchall_catchproj);
+ _igvn.replace_node(ex_mem, projs->catchall_memproj);
+ _igvn.replace_node(ex_io, projs->catchall_ioproj);
+ }
+
void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) {
assert(check->in(SubTypeCheckNode::Control) == NULL, "should be pinned");
Node* bol = check->unique_out();
Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass);
Node* superklass = check->in(SubTypeCheckNode::SuperKlass);
Node* subklass = NULL;
if (_igvn.type(obj_or_subklass)->isa_klassptr()) {
subklass = obj_or_subklass;
} else {
Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes());
- subklass = _igvn.transform(LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), k_adr, TypeInstPtr::KLASS));
+ subklass = _igvn.transform(LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
}
Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, NULL, _igvn);
_igvn.replace_input_of(iff, 0, C->top());
switch (n->class_id()) {
case Node::Class_Allocate:
case Node::Class_AllocateArray:
success = eliminate_allocate_node(n->as_Allocate());
break;
- case Node::Class_CallStaticJava:
- success = eliminate_boxing_node(n->as_CallStaticJava());
+ case Node::Class_CallStaticJava: {
+ CallStaticJavaNode* call = n->as_CallStaticJava();
+ if (!call->method()->is_method_handle_intrinsic()) {
+ success = eliminate_boxing_node(n->as_CallStaticJava());
+ }
break;
+ }
case Node::Class_Lock:
case Node::Class_Unlock:
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
_has_locks = true;
break;
// Remove it from macro list and put on IGVN worklist to optimize.
C->remove_macro_node(n);
_igvn._worklist.push(n);
success = true;
} else if (n->Opcode() == Op_CallStaticJava) {
- // Remove it from macro list and put on IGVN worklist to optimize.
- C->remove_macro_node(n);
- _igvn._worklist.push(n);
- success = true;
+ CallStaticJavaNode* call = n->as_CallStaticJava();
+ if (!call->method()->is_method_handle_intrinsic()) {
+ // Remove it from macro list and put on IGVN worklist to optimize.
+ C->remove_macro_node(n);
+ _igvn._worklist.push(n);
+ success = true;
+ }
} else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
_igvn.replace_node(n, n->in(1));
success = true;
#if INCLUDE_RTM_OPT
} else if ((n->Opcode() == Op_Opaque3) && ((Opaque3Node*)n)->rtm_opt()) {
break;
case Node::Class_SubTypeCheck:
expand_subtypecheck_node(n->as_SubTypeCheck());
assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
break;
+ case Node::Class_CallStaticJava:
+ expand_mh_intrinsic_return(n->as_CallStaticJava());
+ C->remove_macro_node(n);
+ assert(C->macro_count() == (old_macro_count - 1), "expansion must have deleted one node from macro list");
+ break;
default:
assert(false, "unknown node type in macro list");
}
assert(C->macro_count() < macro_count, "must have deleted a node from macro list");
if (C->failing()) return true;
< prev index next >