< prev index next > src/hotspot/share/opto/compile.hpp
Print this page
class AddPNode;
class Block;
class Bundle;
class CallGenerator;
+ class CallNode;
class CloneMap;
class ConnectionGraph;
class IdealGraphPrinter;
class InlineTree;
class Int_Array;
class TypePtr;
class TypeOopPtr;
class TypeFunc;
class TypeVect;
class Unique_Node_List;
+ class InlineTypeBaseNode;
class nmethod;
class WarmCallInfo;
class Node_Stack;
struct Final_Reshape_Counts;
// JSR 292
bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
RTMState _rtm_state; // State of Restricted Transactional Memory usage
int _loop_opts_cnt; // loop opts round
bool _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
+ bool _has_flattened_accesses; // Any known flattened array accesses?
+ bool _flattened_accesses_share_alias; // Initially all flattened array share a single slice
// Compilation environment.
Arena _comp_arena; // Arena with lifetime equivalent to Compile
void* _barrier_set_state; // Potential GC barrier state for Compile
ciEnv* _env; // CI interface
GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching.
GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency
GrowableArray<Node*>* _opaque4_nodes; // List of Opaque4 nodes that have a default value
+ GrowableArray<Node*>* _inline_type_nodes; // List of InlineType nodes
ConnectionGraph* _congraph;
#ifndef PRODUCT
IdealGraphPrinter* _printer;
static IdealGraphPrinter* _debug_file_printer;
static IdealGraphPrinter* _debug_network_printer;
bool profile_rtm() const { return _rtm_state == ProfileRTM; }
uint max_node_limit() const { return (uint)_max_node_limit; }
void set_max_node_limit(uint n) { _max_node_limit = n; }
bool clinit_barrier_on_entry() { return _clinit_barrier_on_entry; }
void set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
+ void set_flattened_accesses() { _has_flattened_accesses = true; }
+ bool flattened_accesses_share_alias() const { return _flattened_accesses_share_alias; }
+ void set_flattened_accesses_share_alias(bool z) { _flattened_accesses_share_alias = z; }
+
+ // Support for scalarized inline type calling convention
+ bool has_scalarized_args() const { return _method != NULL && _method->has_scalarized_args(); }
+ bool needs_stack_repair() const { return _method != NULL && _method->get_Method()->c2_needs_stack_repair(); }
// check the CompilerOracle for special behaviours for this compile
bool method_has_option(const char * option) {
return method() != NULL && method()->has_option(option);
}
}
Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx); }
int opaque4_count() const { return _opaque4_nodes->length(); }
void remove_opaque4_nodes(PhaseIterGVN &igvn);
+ // Keep track of inline type nodes for later processing
+ void add_inline_type(Node* n);
+ void remove_inline_type(Node* n);
+ void process_inline_types(PhaseIterGVN &igvn, bool post_ea = false);
+
+ void adjust_flattened_array_access_aliases(PhaseIterGVN& igvn);
+
void sort_macro_nodes();
// remove the opaque nodes that protect the predicates so that the unused checks and
// uncommon traps will be eliminated from the graph.
void cleanup_loop_predicates(PhaseIterGVN &igvn);
_last_tf_m = m;
_last_tf = tf;
}
AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
! AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
bool have_alias_type(const TypePtr* adr_type);
AliasType* alias_type(ciField* field);
! int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
// Building nodes
void rethrow_exceptions(JVMState* jvms);
_last_tf_m = m;
_last_tf = tf;
}
AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
! AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL, bool uncached = false) { return find_alias_type(adr_type, false, field, uncached); }
bool have_alias_type(const TypePtr* adr_type);
AliasType* alias_type(ciField* field);
! int get_alias_index(const TypePtr* at, bool uncached = false) { return alias_type(at, NULL, uncached)->index(); }
const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
// Building nodes
void rethrow_exceptions(JVMState* jvms);
// Management of the AliasType table.
void grow_alias_types();
AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
! AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
void verify_top(Node*) const PRODUCT_RETURN;
// Intrinsic setup.
void register_library_intrinsics(); // initializer
// Management of the AliasType table.
void grow_alias_types();
AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
! AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field, bool uncached = false);
void verify_top(Node*) const PRODUCT_RETURN;
// Intrinsic setup.
void register_library_intrinsics(); // initializer
Node* ctrl = NULL);
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl);
+ Node* optimize_acmp(PhaseGVN* phase, Node* a, Node* b);
+
// Auxiliary method for randomized fuzzing/stressing
static bool randomized_select(int count);
// supporting clone_map
CloneMap& clone_map();
< prev index next >