1 /*
2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_OPTO_COMPILE_HPP
26 #define SHARE_OPTO_COMPILE_HPP
27 
28 #include "asm/codeBuffer.hpp"
29 #include "ci/compilerInterface.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "code/exceptionHandlerTable.hpp"
32 #include "compiler/compilerOracle.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "libadt/dict.hpp"
35 #include "libadt/vectset.hpp"
36 #include "jfr/jfrEvents.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/methodData.hpp"
39 #include "opto/idealGraphPrinter.hpp"
40 #include "opto/phasetype.hpp"
41 #include "opto/phase.hpp"
42 #include "opto/regmask.hpp"
43 #include "runtime/deoptimization.hpp"
44 #include "runtime/timerTrace.hpp"
45 #include "runtime/vmThread.hpp"
46 #include "utilities/ticks.hpp"
47 
48 class AddPNode;
49 class Block;
50 class Bundle;
51 class C2Compiler;
52 class CallGenerator;
53 class CloneMap;
54 class ConnectionGraph;
55 class IdealGraphPrinter;
56 class InlineTree;
57 class Int_Array;
58 class Matcher;
59 class MachConstantNode;
60 class MachConstantBaseNode;
61 class MachNode;
62 class MachOper;
63 class MachSafePointNode;
64 class Node;
65 class Node_Array;
66 class Node_Notes;
67 class NodeCloneInfo;
68 class OptoReg;
69 class PhaseCFG;
70 class PhaseGVN;
71 class PhaseIterGVN;
72 class PhaseRegAlloc;
73 class PhaseCCP;
74 class PhaseCCP_DCE;
75 class RootNode;
76 class relocInfo;
77 class Scope;
78 class StartNode;
79 class SafePointNode;
80 class JVMState;
81 class Type;
82 class TypeData;
83 class TypeInt;
84 class TypePtr;
85 class TypeOopPtr;
86 class TypeFunc;
87 class Unique_Node_List;
88 class nmethod;
89 class WarmCallInfo;
90 class Node_Stack;
91 struct Final_Reshape_Counts;
92 
93 enum LoopOptsMode {
94   LoopOptsDefault,
95   LoopOptsNone,
96   LoopOptsShenandoahExpand,
97   LoopOptsShenandoahPostExpand,
98   LoopOptsSkipSplitIf,
99   LoopOptsVerify
100 };
101 
102 typedef unsigned int node_idx_t;
103 class NodeCloneInfo {
104  private:
105   uint64_t _idx_clone_orig;
106  public:
107 
set_idx(node_idx_t idx)108   void set_idx(node_idx_t idx) {
109     _idx_clone_orig = (_idx_clone_orig & CONST64(0xFFFFFFFF00000000)) | idx;
110   }
idx() const111   node_idx_t idx() const { return (node_idx_t)(_idx_clone_orig & 0xFFFFFFFF); }
112 
set_gen(int generation)113   void set_gen(int generation) {
114     uint64_t g = (uint64_t)generation << 32;
115     _idx_clone_orig = (_idx_clone_orig & 0xFFFFFFFF) | g;
116   }
gen() const117   int gen() const { return (int)(_idx_clone_orig >> 32); }
118 
set(uint64_t x)119   void set(uint64_t x) { _idx_clone_orig = x; }
set(node_idx_t x,int g)120   void set(node_idx_t x, int g) { set_idx(x); set_gen(g); }
get() const121   uint64_t get() const { return _idx_clone_orig; }
122 
NodeCloneInfo(uint64_t idx_clone_orig)123   NodeCloneInfo(uint64_t idx_clone_orig) : _idx_clone_orig(idx_clone_orig) {}
NodeCloneInfo(node_idx_t x,int g)124   NodeCloneInfo(node_idx_t x, int g) : _idx_clone_orig(0) { set(x, g); }
125 
126   void dump() const;
127 };
128 
129 class CloneMap {
130   friend class Compile;
131  private:
132   bool      _debug;
133   Dict*     _dict;
134   int       _clone_idx;   // current cloning iteration/generation in loop unroll
135  public:
_2p(node_idx_t key) const136   void*     _2p(node_idx_t key)   const          { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy
_2_node_idx_t(const void * k) const137   node_idx_t _2_node_idx_t(const void* k) const  { return (node_idx_t)(intptr_t)k; }
dict() const138   Dict*     dict()                const          { return _dict; }
insert(node_idx_t key,uint64_t val)139   void insert(node_idx_t key, uint64_t val)      { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); }
insert(node_idx_t key,NodeCloneInfo & ci)140   void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); }
remove(node_idx_t key)141   void remove(node_idx_t key)                    { _dict->Delete(_2p(key)); }
value(node_idx_t key) const142   uint64_t value(node_idx_t key)  const          { return (uint64_t)_dict->operator[](_2p(key)); }
idx(node_idx_t key) const143   node_idx_t idx(node_idx_t key)  const          { return NodeCloneInfo(value(key)).idx(); }
gen(node_idx_t key) const144   int gen(node_idx_t key)         const          { return NodeCloneInfo(value(key)).gen(); }
gen(const void * k) const145   int gen(const void* k)          const          { return gen(_2_node_idx_t(k)); }
146   int max_gen()                   const;
147   void clone(Node* old, Node* nnn, int gen);
148   void verify_insert_and_clone(Node* old, Node* nnn, int gen);
149   void dump(node_idx_t key)       const;
150 
clone_idx() const151   int  clone_idx() const                         { return _clone_idx; }
set_clone_idx(int x)152   void set_clone_idx(int x)                      { _clone_idx = x; }
is_debug() const153   bool is_debug()                 const          { return _debug; }
set_debug(bool debug)154   void set_debug(bool debug)                     { _debug = debug; }
155   static const char* debug_option_name;
156 
same_idx(node_idx_t k1,node_idx_t k2) const157   bool same_idx(node_idx_t k1, node_idx_t k2)  const { return idx(k1) == idx(k2); }
same_gen(node_idx_t k1,node_idx_t k2) const158   bool same_gen(node_idx_t k1, node_idx_t k2)  const { return gen(k1) == gen(k2); }
159 };
160 
161 //------------------------------Compile----------------------------------------
162 // This class defines a top-level Compiler invocation.
163 
164 class Compile : public Phase {
165   friend class VMStructs;
166 
167  public:
168   // Fixed alias indexes.  (See also MergeMemNode.)
169   enum {
170     AliasIdxTop = 1,  // pseudo-index, aliases to nothing (used as sentinel value)
171     AliasIdxBot = 2,  // pseudo-index, aliases to everything
172     AliasIdxRaw = 3   // hard-wired index for TypeRawPtr::BOTTOM
173   };
174 
175   // Variant of TraceTime(NULL, &_t_accumulator, CITime);
176   // Integrated with logging.  If logging is turned on, and CITimeVerbose is true,
177   // then brackets are put into the log, with time stamps and node counts.
178   // (The time collection itself is always conditionalized on CITime.)
179   class TracePhase : public TraceTime {
180    private:
181     Compile*    C;
182     CompileLog* _log;
183     const char* _phase_name;
184     bool _dolog;
185    public:
186     TracePhase(const char* name, elapsedTimer* accumulator);
187     ~TracePhase();
188   };
189 
190   // Information per category of alias (memory slice)
191   class AliasType {
192    private:
193     friend class Compile;
194 
195     int             _index;         // unique index, used with MergeMemNode
196     const TypePtr*  _adr_type;      // normalized address type
197     ciField*        _field;         // relevant instance field, or null if none
198     const Type*     _element;       // relevant array element type, or null if none
199     bool            _is_rewritable; // false if the memory is write-once only
200     int             _general_index; // if this is type is an instance, the general
201                                     // type that this is an instance of
202 
203     void Init(int i, const TypePtr* at);
204 
205    public:
index() const206     int             index()         const { return _index; }
adr_type() const207     const TypePtr*  adr_type()      const { return _adr_type; }
field() const208     ciField*        field()         const { return _field; }
element() const209     const Type*     element()       const { return _element; }
is_rewritable() const210     bool            is_rewritable() const { return _is_rewritable; }
is_volatile() const211     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
general_index() const212     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
213 
set_rewritable(bool z)214     void set_rewritable(bool z) { _is_rewritable = z; }
set_field(ciField * f)215     void set_field(ciField* f) {
216       assert(!_field,"");
217       _field = f;
218       if (f->is_final() || f->is_stable()) {
219         // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
220         _is_rewritable = false;
221       }
222     }
set_element(const Type * e)223     void set_element(const Type* e) {
224       assert(_element == NULL, "");
225       _element = e;
226     }
227 
228     BasicType basic_type() const;
229 
230     void print_on(outputStream* st) PRODUCT_RETURN;
231   };
232 
233   enum {
234     logAliasCacheSize = 6,
235     AliasCacheSize = (1<<logAliasCacheSize)
236   };
237   struct AliasCacheEntry { const TypePtr* _adr_type; int _index; };  // simple duple type
238   enum {
239     trapHistLength = MethodData::_trap_hist_limit
240   };
241 
242   // Constant entry of the constant table.
243   class Constant {
244   private:
245     BasicType _type;
246     union {
247       jvalue    _value;
248       Metadata* _metadata;
249     } _v;
250     int       _offset;         // offset of this constant (in bytes) relative to the constant table base.
251     float     _freq;
252     bool      _can_be_reused;  // true (default) if the value can be shared with other users.
253 
254   public:
Constant()255     Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _v._value.l = 0; }
Constant(BasicType type,jvalue value,float freq=0.0f,bool can_be_reused=true)256     Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) :
257       _type(type),
258       _offset(-1),
259       _freq(freq),
260       _can_be_reused(can_be_reused)
261     {
262       assert(type != T_METADATA, "wrong constructor");
263       _v._value = value;
264     }
Constant(Metadata * metadata,bool can_be_reused=true)265     Constant(Metadata* metadata, bool can_be_reused = true) :
266       _type(T_METADATA),
267       _offset(-1),
268       _freq(0.0f),
269       _can_be_reused(can_be_reused)
270     {
271       _v._metadata = metadata;
272     }
273 
274     bool operator==(const Constant& other);
275 
type() const276     BasicType type()      const    { return _type; }
277 
get_jint() const278     jint    get_jint()    const    { return _v._value.i; }
get_jlong() const279     jlong   get_jlong()   const    { return _v._value.j; }
get_jfloat() const280     jfloat  get_jfloat()  const    { return _v._value.f; }
get_jdouble() const281     jdouble get_jdouble() const    { return _v._value.d; }
get_jobject() const282     jobject get_jobject() const    { return _v._value.l; }
283 
get_metadata() const284     Metadata* get_metadata() const { return _v._metadata; }
285 
offset() const286     int         offset()  const    { return _offset; }
set_offset(int offset)287     void    set_offset(int offset) {        _offset = offset; }
288 
freq() const289     float       freq()    const    { return _freq;         }
inc_freq(float freq)290     void    inc_freq(float freq)   {        _freq += freq; }
291 
can_be_reused() const292     bool    can_be_reused() const  { return _can_be_reused; }
293   };
294 
295   // Constant table.
296   class ConstantTable {
297   private:
298     GrowableArray<Constant> _constants;          // Constants of this table.
299     int                     _size;               // Size in bytes the emitted constant table takes (including padding).
300     int                     _table_base_offset;  // Offset of the table base that gets added to the constant offsets.
301     int                     _nof_jump_tables;    // Number of jump-tables in this constant table.
302 
303     static int qsort_comparator(Constant* a, Constant* b);
304 
305     // We use negative frequencies to keep the order of the
306     // jump-tables in which they were added.  Otherwise we get into
307     // trouble with relocation.
next_jump_table_freq()308     float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); }
309 
310   public:
ConstantTable()311     ConstantTable() :
312       _size(-1),
313       _table_base_offset(-1),  // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit).
314       _nof_jump_tables(0)
315     {}
316 
size() const317     int size() const { assert(_size != -1, "not calculated yet"); return _size; }
318 
319     int calculate_table_base_offset() const;  // AD specific
set_table_base_offset(int x)320     void set_table_base_offset(int x)  { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; }
table_base_offset() const321     int      table_base_offset() const { assert(_table_base_offset != -1, "not set yet");                      return _table_base_offset; }
322 
323     void emit(CodeBuffer& cb);
324 
325     // Returns the offset of the last entry (the top) of the constant table.
top_offset() const326     int  top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); }
327 
328     void calculate_offsets_and_size();
329     int  find_offset(Constant& con) const;
330 
331     void     add(Constant& con);
332     Constant add(MachConstantNode* n, BasicType type, jvalue value);
333     Constant add(Metadata* metadata);
334     Constant add(MachConstantNode* n, MachOper* oper);
add(MachConstantNode * n,jint i)335     Constant add(MachConstantNode* n, jint i) {
336       jvalue value; value.i = i;
337       return add(n, T_INT, value);
338     }
add(MachConstantNode * n,jlong j)339     Constant add(MachConstantNode* n, jlong j) {
340       jvalue value; value.j = j;
341       return add(n, T_LONG, value);
342     }
add(MachConstantNode * n,jfloat f)343     Constant add(MachConstantNode* n, jfloat f) {
344       jvalue value; value.f = f;
345       return add(n, T_FLOAT, value);
346     }
add(MachConstantNode * n,jdouble d)347     Constant add(MachConstantNode* n, jdouble d) {
348       jvalue value; value.d = d;
349       return add(n, T_DOUBLE, value);
350     }
351 
352     // Jump-table
353     Constant  add_jump_table(MachConstantNode* n);
354     void     fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
355   };
356 
357  private:
358   // Fixed parameters to this compilation.
359   const int             _compile_id;
360   const bool            _save_argument_registers; // save/restore arg regs for trampolines
361   const bool            _subsume_loads;         // Load can be matched as part of a larger op.
362   const bool            _do_escape_analysis;    // Do escape analysis.
363   const bool            _eliminate_boxing;      // Do boxing elimination.
364   ciMethod*             _method;                // The method being compiled.
365   int                   _entry_bci;             // entry bci for osr methods.
366   const TypeFunc*       _tf;                    // My kind of signature
367   InlineTree*           _ilt;                   // Ditto (temporary).
368   address               _stub_function;         // VM entry for stub being compiled, or NULL
369   const char*           _stub_name;             // Name of stub or adapter being compiled, or NULL
370   address               _stub_entry_point;      // Compile code entry for generated stub, or NULL
371 
372   // Control of this compilation.
373   int                   _max_inline_size;       // Max inline size for this compilation
374   int                   _freq_inline_size;      // Max hot method inline size for this compilation
375   int                   _fixed_slots;           // count of frame slots not allocated by the register
376                                                 // allocator i.e. locks, original deopt pc, etc.
377   uintx                 _max_node_limit;        // Max unique node count during a single compilation.
378   // For deopt
379   int                   _orig_pc_slot;
380   int                   _orig_pc_slot_offset_in_bytes;
381 
382   int                   _major_progress;        // Count of something big happening
383   bool                  _inlining_progress;     // progress doing incremental inlining?
384   bool                  _inlining_incrementally;// Are we doing incremental inlining (post parse)
385   bool                  _do_cleanup;            // Cleanup is needed before proceeding with incremental inlining
386   bool                  _has_loops;             // True if the method _may_ have some loops
387   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
388   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
389   bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
390   bool                  _has_boxed_value;       // True if a boxed object is allocated
391   bool                  _has_reserved_stack_access; // True if the method or an inlined method is annotated with ReservedStackAccess
392   uint                  _max_vector_size;       // Maximum size of generated vectors
393   bool                  _clear_upper_avx;       // Clear upper bits of ymm registers using vzeroupper
394   uint                  _trap_hist[trapHistLength];  // Cumulative traps
395   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
396   uint                  _decompile_count;       // Cumulative decompilation counts.
397   bool                  _do_inlining;           // True if we intend to do inlining
398   bool                  _do_scheduling;         // True if we intend to do scheduling
399   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
400   bool                  _do_count_invocations;  // True if we generate code to count invocations
401   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
402   bool                  _do_vector_loop;        // True if allowed to execute loop in parallel iterations
403   bool                  _use_cmove;             // True if CMove should be used without profitability analysis
404   bool                  _age_code;              // True if we need to profile code age (decrement the aging counter)
405   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
406   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
407   bool                  _print_inlining;        // True if we should print inlining for this compilation
408   bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
409 #ifndef PRODUCT
410   bool                  _trace_opto_output;
411   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
412 #endif
413   bool                  _has_irreducible_loop;  // Found irreducible loops
414   // JSR 292
415   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
416   RTMState              _rtm_state;             // State of Restricted Transactional Memory usage
417   int                   _loop_opts_cnt;         // loop opts round
418   bool                  _clinit_barrier_on_entry; // True if clinit barrier is needed on nmethod entry
419 
420   // Compilation environment.
421   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
422   void*                 _barrier_set_state;     // Potential GC barrier state for Compile
423   ciEnv*                _env;                   // CI interface
424   DirectiveSet*         _directive;             // Compiler directive
425   CompileLog*           _log;                   // from CompilerThread
426   const char*           _failure_reason;        // for record_failure/failing pattern
427   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
428   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
429   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
430   GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
431   GrowableArray<Node*>* _range_check_casts;     // List of CastII nodes with a range check dependency
432   GrowableArray<Node*>* _opaque4_nodes;         // List of Opaque4 nodes that have a default value
433   ConnectionGraph*      _congraph;
434 #ifndef PRODUCT
435   IdealGraphPrinter*    _printer;
436 #endif
437 
438 
439   // Node management
440   uint                  _unique;                // Counter for unique Node indices
441   VectorSet             _dead_node_list;        // Set of dead nodes
442   uint                  _dead_node_count;       // Number of dead nodes; VectorSet::Size() is O(N).
443                                                 // So use this to keep count and make the call O(1).
444   DEBUG_ONLY( Unique_Node_List* _modified_nodes; )  // List of nodes which inputs were modified
445 
446   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
447   Arena                 _node_arena;            // Arena for new-space Nodes
448   Arena                 _old_arena;             // Arena for old-space Nodes, lifetime during xform
449   RootNode*             _root;                  // Unique root of compilation, or NULL after bail-out.
450   Node*                 _top;                   // Unique top node.  (Reset by various phases.)
451 
452   Node*                 _immutable_memory;      // Initial memory state
453 
454   Node*                 _recent_alloc_obj;
455   Node*                 _recent_alloc_ctl;
456 
457   // Constant table
458   ConstantTable         _constant_table;        // The constant table for this compile.
459   MachConstantBaseNode* _mach_constant_base_node;  // Constant table base node singleton.
460 
461 
462   // Blocked array of debugging and profiling information,
463   // tracked per node.
464   enum { _log2_node_notes_block_size = 8,
465          _node_notes_block_size = (1<<_log2_node_notes_block_size)
466   };
467   GrowableArray<Node_Notes*>* _node_note_array;
468   Node_Notes*           _default_node_notes;  // default notes for new nodes
469 
470   // After parsing and every bulk phase we hang onto the Root instruction.
471   // The RootNode instruction is where the whole program begins.  It produces
472   // the initial Control and BOTTOM for everybody else.
473 
474   // Type management
475   Arena                 _Compile_types;         // Arena for all types
476   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
477   Dict*                 _type_dict;             // Intern table
478   CloneMap              _clone_map;             // used for recording history of cloned nodes
479   void*                 _type_hwm;              // Last allocation (see Type::operator new/delete)
480   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
481   ciMethod*             _last_tf_m;             // Cache for
482   const TypeFunc*       _last_tf;               //  TypeFunc::make
483   AliasType**           _alias_types;           // List of alias types seen so far.
484   int                   _num_alias_types;       // Logical length of _alias_types
485   int                   _max_alias_types;       // Physical length of _alias_types
486   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
487 
488   // Parsing, optimization
489   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
490   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
491   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
492 
493   GrowableArray<CallGenerator*> _late_inlines;        // List of CallGenerators to be revisited after
494                                                       // main parsing has finished.
495   GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
496 
497   GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
498 
499   int                           _late_inlines_pos;    // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
500   uint                          _number_of_mh_late_inlines; // number of method handle late inlining still pending
501 
502 
503   // Inlining may not happen in parse order which would make
504   // PrintInlining output confusing. Keep track of PrintInlining
505   // pieces in order.
506   class PrintInliningBuffer : public ResourceObj {
507    private:
508     CallGenerator* _cg;
509     stringStream* _ss;
510 
511    public:
PrintInliningBuffer()512     PrintInliningBuffer()
513       : _cg(NULL) { _ss = new stringStream(); }
514 
freeStream()515     void freeStream() { _ss->~stringStream(); _ss = NULL; }
516 
ss() const517     stringStream* ss() const { return _ss; }
cg() const518     CallGenerator* cg() const { return _cg; }
set_cg(CallGenerator * cg)519     void set_cg(CallGenerator* cg) { _cg = cg; }
520   };
521 
522   stringStream* _print_inlining_stream;
523   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
524   int _print_inlining_idx;
525   char* _print_inlining_output;
526 
527   // Only keep nodes in the expensive node list that need to be optimized
528   void cleanup_expensive_nodes(PhaseIterGVN &igvn);
529   // Use for sorting expensive nodes to bring similar nodes together
530   static int cmp_expensive_nodes(Node** n1, Node** n2);
531   // Expensive nodes list already sorted?
532   bool expensive_nodes_sorted() const;
533   // Remove the speculative part of types and clean up the graph
534   void remove_speculative_types(PhaseIterGVN &igvn);
535 
536   void* _replay_inline_data; // Pointer to data loaded from file
537 
538   void print_inlining_stream_free();
539   void print_inlining_init();
540   void print_inlining_reinit();
541   void print_inlining_commit();
542   void print_inlining_push();
543   PrintInliningBuffer& print_inlining_current();
544 
545   void log_late_inline_failure(CallGenerator* cg, const char* msg);
546 
547  public:
548 
barrier_set_state() const549   void* barrier_set_state() const { return _barrier_set_state; }
550 
print_inlining_stream() const551   outputStream* print_inlining_stream() const {
552     assert(print_inlining() || print_intrinsics(), "PrintInlining off?");
553     return _print_inlining_stream;
554   }
555 
556   void print_inlining_update(CallGenerator* cg);
557   void print_inlining_update_delayed(CallGenerator* cg);
558   void print_inlining_move_to(CallGenerator* cg);
559   void print_inlining_assert_ready();
560   void print_inlining_reset();
561 
print_inlining(ciMethod * method,int inline_level,int bci,const char * msg=NULL)562   void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
563     stringStream ss;
564     CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg);
565     print_inlining_stream()->print("%s", ss.as_string());
566   }
567 
568 #ifndef PRODUCT
printer()569   IdealGraphPrinter* printer() { return _printer; }
570 #endif
571 
572   void log_late_inline(CallGenerator* cg);
573   void log_inline_id(CallGenerator* cg);
574   void log_inline_failure(const char* msg);
575 
replay_inline_data() const576   void* replay_inline_data() const { return _replay_inline_data; }
577 
578   // Dump inlining replay data to the stream.
579   void dump_inline_data(outputStream* out);
580 
581  private:
582   // Matching, CFG layout, allocation, code generation
583   PhaseCFG*             _cfg;                   // Results of CFG finding
584   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
585   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
586   int                   _java_calls;            // Number of java calls in the method
587   int                   _inner_loops;           // Number of inner loops in the method
588   Matcher*              _matcher;               // Engine to map ideal to machine instructions
589   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
590   int                   _frame_slots;           // Size of total frame in stack slots
591   CodeOffsets           _code_offsets;          // Offsets into the code for various interesting entries
592   RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
593   Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
594   void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
595   int                   _interpreter_frame_size;
596 
597   uint                  _node_bundling_limit;
598   Bundle*               _node_bundling_base;    // Information for instruction bundling
599 
600   // Instruction bits passed off to the VM
601   int                   _method_size;           // Size of nmethod code segment in bytes
602   CodeBuffer            _code_buffer;           // Where the code is assembled
603   int                   _first_block_size;      // Size of unvalidated entry point code / OSR poison code
604   ExceptionHandlerTable _handler_table;         // Table of native-code exception handlers
605   ImplicitExceptionTable _inc_table;            // Table of implicit null checks in native code
606   OopMapSet*            _oop_map_set;           // Table of oop maps (one for each safepoint location)
607   static int            _CompiledZap_count;     // counter compared against CompileZap[First/Last]
608   BufferBlob*           _scratch_buffer_blob;   // For temporary code buffers.
609   relocInfo*            _scratch_locs_memory;   // For temporary code buffers.
610   int                   _scratch_const_size;    // For temporary code buffers.
611   bool                  _in_scratch_emit_size;  // true when in scratch_emit_size.
612 
613   void reshape_address(AddPNode* n);
614 
615  public:
616   // Accessors
617 
618   // The Compile instance currently active in this (compiler) thread.
current()619   static Compile* current() {
620     return (Compile*) ciEnv::current()->compiler_data();
621   }
622 
623   // ID for this compilation.  Useful for setting breakpoints in the debugger.
compile_id() const624   int               compile_id() const          { return _compile_id; }
directive() const625   DirectiveSet*     directive() const           { return _directive; }
626 
627   // Does this compilation allow instructions to subsume loads?  User
628   // instructions that subsume a load may result in an unschedulable
629   // instruction sequence.
subsume_loads() const630   bool              subsume_loads() const       { return _subsume_loads; }
631   /** Do escape analysis. */
do_escape_analysis() const632   bool              do_escape_analysis() const  { return _do_escape_analysis; }
633   /** Do boxing elimination. */
eliminate_boxing() const634   bool              eliminate_boxing() const    { return _eliminate_boxing; }
635   /** Do aggressive boxing elimination. */
aggressive_unboxing() const636   bool              aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
save_argument_registers() const637   bool              save_argument_registers() const { return _save_argument_registers; }
638 
639 
640   // Other fixed compilation parameters.
method() const641   ciMethod*         method() const              { return _method; }
entry_bci() const642   int               entry_bci() const           { return _entry_bci; }
is_osr_compilation() const643   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
is_method_compilation() const644   bool              is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
tf() const645   const TypeFunc*   tf() const                  { assert(_tf!=NULL, ""); return _tf; }
init_tf(const TypeFunc * tf)646   void         init_tf(const TypeFunc* tf)      { assert(_tf==NULL, ""); _tf = tf; }
ilt() const647   InlineTree*       ilt() const                 { return _ilt; }
stub_function() const648   address           stub_function() const       { return _stub_function; }
stub_name() const649   const char*       stub_name() const           { return _stub_name; }
stub_entry_point() const650   address           stub_entry_point() const    { return _stub_entry_point; }
651 
652   // Control of this compilation.
fixed_slots() const653   int               fixed_slots() const         { assert(_fixed_slots >= 0, "");         return _fixed_slots; }
set_fixed_slots(int n)654   void          set_fixed_slots(int n)          { _fixed_slots = n; }
major_progress() const655   int               major_progress() const      { return _major_progress; }
set_inlining_progress(bool z)656   void          set_inlining_progress(bool z)   { _inlining_progress = z; }
inlining_progress() const657   int               inlining_progress() const   { return _inlining_progress; }
set_inlining_incrementally(bool z)658   void          set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
inlining_incrementally() const659   int               inlining_incrementally() const { return _inlining_incrementally; }
set_do_cleanup(bool z)660   void          set_do_cleanup(bool z)          { _do_cleanup = z; }
do_cleanup() const661   int               do_cleanup() const          { return _do_cleanup; }
set_major_progress()662   void          set_major_progress()            { _major_progress++; }
restore_major_progress(int progress)663   void          restore_major_progress(int progress) { _major_progress += progress; }
clear_major_progress()664   void        clear_major_progress()            { _major_progress = 0; }
max_inline_size() const665   int               max_inline_size() const     { return _max_inline_size; }
set_freq_inline_size(int n)666   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
freq_inline_size() const667   int               freq_inline_size() const    { return _freq_inline_size; }
set_max_inline_size(int n)668   void          set_max_inline_size(int n)      { _max_inline_size = n; }
has_loops() const669   bool              has_loops() const           { return _has_loops; }
set_has_loops(bool z)670   void          set_has_loops(bool z)           { _has_loops = z; }
has_split_ifs() const671   bool              has_split_ifs() const       { return _has_split_ifs; }
set_has_split_ifs(bool z)672   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
has_unsafe_access() const673   bool              has_unsafe_access() const   { return _has_unsafe_access; }
set_has_unsafe_access(bool z)674   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
has_stringbuilder() const675   bool              has_stringbuilder() const   { return _has_stringbuilder; }
set_has_stringbuilder(bool z)676   void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
has_boxed_value() const677   bool              has_boxed_value() const     { return _has_boxed_value; }
set_has_boxed_value(bool z)678   void          set_has_boxed_value(bool z)     { _has_boxed_value = z; }
has_reserved_stack_access() const679   bool              has_reserved_stack_access() const { return _has_reserved_stack_access; }
set_has_reserved_stack_access(bool z)680   void          set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; }
max_vector_size() const681   uint              max_vector_size() const     { return _max_vector_size; }
set_max_vector_size(uint s)682   void          set_max_vector_size(uint s)     { _max_vector_size = s; }
clear_upper_avx() const683   bool              clear_upper_avx() const     { return _clear_upper_avx; }
set_clear_upper_avx(bool s)684   void          set_clear_upper_avx(bool s)     { _clear_upper_avx = s; }
set_trap_count(uint r,uint c)685   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
trap_count(uint r) const686   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
trap_can_recompile() const687   bool              trap_can_recompile() const  { return _trap_can_recompile; }
set_trap_can_recompile(bool z)688   void          set_trap_can_recompile(bool z)  { _trap_can_recompile = z; }
decompile_count() const689   uint              decompile_count() const     { return _decompile_count; }
set_decompile_count(uint c)690   void          set_decompile_count(uint c)     { _decompile_count = c; }
691   bool              allow_range_check_smearing() const;
do_inlining() const692   bool              do_inlining() const         { return _do_inlining; }
set_do_inlining(bool z)693   void          set_do_inlining(bool z)         { _do_inlining = z; }
do_scheduling() const694   bool              do_scheduling() const       { return _do_scheduling; }
set_do_scheduling(bool z)695   void          set_do_scheduling(bool z)       { _do_scheduling = z; }
do_freq_based_layout() const696   bool              do_freq_based_layout() const{ return _do_freq_based_layout; }
set_do_freq_based_layout(bool z)697   void          set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
do_count_invocations() const698   bool              do_count_invocations() const{ return _do_count_invocations; }
set_do_count_invocations(bool z)699   void          set_do_count_invocations(bool z){ _do_count_invocations = z; }
do_method_data_update() const700   bool              do_method_data_update() const { return _do_method_data_update; }
set_do_method_data_update(bool z)701   void          set_do_method_data_update(bool z) { _do_method_data_update = z; }
do_vector_loop() const702   bool              do_vector_loop() const      { return _do_vector_loop; }
set_do_vector_loop(bool z)703   void          set_do_vector_loop(bool z)      { _do_vector_loop = z; }
use_cmove() const704   bool              use_cmove() const           { return _use_cmove; }
set_use_cmove(bool z)705   void          set_use_cmove(bool z)           { _use_cmove = z; }
age_code() const706   bool              age_code() const             { return _age_code; }
set_age_code(bool z)707   void          set_age_code(bool z)             { _age_code = z; }
AliasLevel() const708   int               AliasLevel() const           { return _AliasLevel; }
print_assembly() const709   bool              print_assembly() const       { return _print_assembly; }
set_print_assembly(bool z)710   void          set_print_assembly(bool z)       { _print_assembly = z; }
print_inlining() const711   bool              print_inlining() const       { return _print_inlining; }
set_print_inlining(bool z)712   void          set_print_inlining(bool z)       { _print_inlining = z; }
print_intrinsics() const713   bool              print_intrinsics() const     { return _print_intrinsics; }
set_print_intrinsics(bool z)714   void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
rtm_state() const715   RTMState          rtm_state()  const           { return _rtm_state; }
set_rtm_state(RTMState s)716   void          set_rtm_state(RTMState s)        { _rtm_state = s; }
use_rtm() const717   bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
profile_rtm() const718   bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
max_node_limit() const719   uint              max_node_limit() const       { return (uint)_max_node_limit; }
set_max_node_limit(uint n)720   void          set_max_node_limit(uint n)       { _max_node_limit = n; }
clinit_barrier_on_entry()721   bool              clinit_barrier_on_entry()       { return _clinit_barrier_on_entry; }
set_clinit_barrier_on_entry(bool z)722   void          set_clinit_barrier_on_entry(bool z) { _clinit_barrier_on_entry = z; }
723 
724   // check the CompilerOracle for special behaviours for this compile
method_has_option(const char * option)725   bool          method_has_option(const char * option) {
726     return method() != NULL && method()->has_option(option);
727   }
728 
729 #ifndef PRODUCT
trace_opto_output() const730   bool          trace_opto_output() const       { return _trace_opto_output; }
parsed_irreducible_loop() const731   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
set_parsed_irreducible_loop(bool z)732   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
733   int _in_dump_cnt;  // Required for dumping ir nodes.
734 #endif
has_irreducible_loop() const735   bool              has_irreducible_loop() const { return _has_irreducible_loop; }
set_has_irreducible_loop(bool z)736   void          set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
737 
738   // JSR 292
has_method_handle_invokes() const739   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
set_has_method_handle_invokes(bool z)740   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
741 
742   Ticks _latest_stage_start_counter;
743 
begin_method()744   void begin_method() {
745 #ifndef PRODUCT
746     if (_printer && _printer->should_print(1)) {
747       _printer->begin_method();
748     }
749 #endif
750     C->_latest_stage_start_counter.stamp();
751   }
752 
should_print(int level=1)753   bool should_print(int level = 1) {
754 #ifndef PRODUCT
755     return (_printer && _printer->should_print(level));
756 #else
757     return false;
758 #endif
759   }
760 
print_method(CompilerPhaseType cpt,int level=1,int idx=0)761   void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0) {
762     EventCompilerPhase event;
763     if (event.should_commit()) {
764       event.set_starttime(C->_latest_stage_start_counter);
765       event.set_phase((u1) cpt);
766       event.set_compileId(C->_compile_id);
767       event.set_phaseLevel(level);
768       event.commit();
769     }
770 
771 #ifndef PRODUCT
772     if (should_print(level)) {
773       char output[1024];
774       if (idx != 0) {
775         sprintf(output, "%s:%d", CompilerPhaseTypeHelper::to_string(cpt), idx);
776       } else {
777         sprintf(output, "%s", CompilerPhaseTypeHelper::to_string(cpt));
778       }
779       _printer->print_method(output, level);
780     }
781 #endif
782     C->_latest_stage_start_counter.stamp();
783   }
784 
end_method(int level=1)785   void end_method(int level = 1) {
786     EventCompilerPhase event;
787     if (event.should_commit()) {
788       event.set_starttime(C->_latest_stage_start_counter);
789       event.set_phase((u1) PHASE_END);
790       event.set_compileId(C->_compile_id);
791       event.set_phaseLevel(level);
792       event.commit();
793     }
794 #ifndef PRODUCT
795     if (_printer && _printer->should_print(level)) {
796       _printer->end_method();
797     }
798 #endif
799   }
800 
macro_count() const801   int           macro_count()             const { return _macro_nodes->length(); }
predicate_count() const802   int           predicate_count()         const { return _predicate_opaqs->length();}
expensive_count() const803   int           expensive_count()         const { return _expensive_nodes->length(); }
macro_node(int idx) const804   Node*         macro_node(int idx)       const { return _macro_nodes->at(idx); }
predicate_opaque1_node(int idx) const805   Node*         predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
expensive_node(int idx) const806   Node*         expensive_node(int idx)   const { return _expensive_nodes->at(idx); }
congraph()807   ConnectionGraph* congraph()                   { return _congraph;}
set_congraph(ConnectionGraph * congraph)808   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
add_macro_node(Node * n)809   void add_macro_node(Node * n) {
810     //assert(n->is_macro(), "must be a macro node");
811     assert(!_macro_nodes->contains(n), "duplicate entry in expand list");
812     _macro_nodes->append(n);
813   }
remove_macro_node(Node * n)814   void remove_macro_node(Node * n) {
815     // this function may be called twice for a node so check
816     // that the node is in the array before attempting to remove it
817     if (_macro_nodes->contains(n))
818       _macro_nodes->remove(n);
819     // remove from _predicate_opaqs list also if it is there
820     if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
821       _predicate_opaqs->remove(n);
822     }
823   }
824   void add_expensive_node(Node * n);
remove_expensive_node(Node * n)825   void remove_expensive_node(Node * n) {
826     if (_expensive_nodes->contains(n)) {
827       _expensive_nodes->remove(n);
828     }
829   }
add_predicate_opaq(Node * n)830   void add_predicate_opaq(Node * n) {
831     assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1");
832     assert(_macro_nodes->contains(n), "should have already been in macro list");
833     _predicate_opaqs->append(n);
834   }
835 
836   // Range check dependent CastII nodes that can be removed after loop optimizations
837   void add_range_check_cast(Node* n);
remove_range_check_cast(Node * n)838   void remove_range_check_cast(Node* n) {
839     if (_range_check_casts->contains(n)) {
840       _range_check_casts->remove(n);
841     }
842   }
range_check_cast_node(int idx) const843   Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx);  }
range_check_cast_count() const844   int   range_check_cast_count()       const { return _range_check_casts->length(); }
845   // Remove all range check dependent CastIINodes.
846   void  remove_range_check_casts(PhaseIterGVN &igvn);
847 
848   void add_opaque4_node(Node* n);
remove_opaque4_node(Node * n)849   void remove_opaque4_node(Node* n) {
850     if (_opaque4_nodes->contains(n)) {
851       _opaque4_nodes->remove(n);
852     }
853   }
opaque4_node(int idx) const854   Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx);  }
opaque4_count() const855   int   opaque4_count()       const { return _opaque4_nodes->length(); }
856   void  remove_opaque4_nodes(PhaseIterGVN &igvn);
857 
858   // remove the opaque nodes that protect the predicates so that the unused checks and
859   // uncommon traps will be eliminated from the graph.
860   void cleanup_loop_predicates(PhaseIterGVN &igvn);
is_predicate_opaq(Node * n)861   bool is_predicate_opaq(Node * n) {
862     return _predicate_opaqs->contains(n);
863   }
864 
865   // Are there candidate expensive nodes for optimization?
866   bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
867   // Check whether n1 and n2 are similar
868   static int cmp_expensive_nodes(Node* n1, Node* n2);
869   // Sort expensive nodes to locate similar expensive nodes
870   void sort_expensive_nodes();
871 
872   // Compilation environment.
comp_arena()873   Arena*      comp_arena()           { return &_comp_arena; }
env() const874   ciEnv*      env() const            { return _env; }
log() const875   CompileLog* log() const            { return _log; }
failing() const876   bool        failing() const        { return _env->failing() || _failure_reason != NULL; }
failure_reason() const877   const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
878 
failure_reason_is(const char * r) const879   bool failure_reason_is(const char* r) const {
880     return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0);
881   }
882 
883   void record_failure(const char* reason);
record_method_not_compilable(const char * reason)884   void record_method_not_compilable(const char* reason) {
885     // Bailouts cover "all_tiers" when TieredCompilation is off.
886     env()->record_method_not_compilable(reason, !TieredCompilation);
887     // Record failure reason.
888     record_failure(reason);
889   }
check_node_count(uint margin,const char * reason)890   bool check_node_count(uint margin, const char* reason) {
891     if (live_nodes() + margin > max_node_limit()) {
892       record_method_not_compilable(reason);
893       return true;
894     } else {
895       return false;
896     }
897   }
898 
899   // Node management
unique() const900   uint         unique() const              { return _unique; }
next_unique()901   uint         next_unique()               { return _unique++; }
set_unique(uint i)902   void         set_unique(uint i)          { _unique = i; }
debug_idx()903   static int   debug_idx()                 { return debug_only(_debug_idx)+0; }
set_debug_idx(int i)904   static void  set_debug_idx(int i)        { debug_only(_debug_idx = i); }
node_arena()905   Arena*       node_arena()                { return &_node_arena; }
old_arena()906   Arena*       old_arena()                 { return &_old_arena; }
root() const907   RootNode*    root() const                { return _root; }
set_root(RootNode * r)908   void         set_root(RootNode* r)       { _root = r; }
909   StartNode*   start() const;              // (Derived from root.)
910   void         init_start(StartNode* s);
911   Node*        immutable_memory();
912 
recent_alloc_ctl() const913   Node*        recent_alloc_ctl() const    { return _recent_alloc_ctl; }
recent_alloc_obj() const914   Node*        recent_alloc_obj() const    { return _recent_alloc_obj; }
set_recent_alloc(Node * ctl,Node * obj)915   void         set_recent_alloc(Node* ctl, Node* obj) {
916                                                   _recent_alloc_ctl = ctl;
917                                                   _recent_alloc_obj = obj;
918                                            }
record_dead_node(uint idx)919   void         record_dead_node(uint idx)  { if (_dead_node_list.test_set(idx)) return;
920                                              _dead_node_count++;
921                                            }
is_dead_node(uint idx)922   bool         is_dead_node(uint idx)      { return _dead_node_list.test(idx) != 0; }
dead_node_count()923   uint         dead_node_count()           { return _dead_node_count; }
reset_dead_node_list()924   void         reset_dead_node_list()      { _dead_node_list.Reset();
925                                              _dead_node_count = 0;
926                                            }
live_nodes() const927   uint          live_nodes() const         {
928     int  val = _unique - _dead_node_count;
929     assert (val >= 0, "number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count);
930             return (uint) val;
931                                            }
932 #ifdef ASSERT
933   uint         count_live_nodes_by_graph_walk();
934   void         print_missing_nodes();
935 #endif
936 
937   // Record modified nodes to check that they are put on IGVN worklist
938   void         record_modified_node(Node* n) NOT_DEBUG_RETURN;
939   void         remove_modified_node(Node* n) NOT_DEBUG_RETURN;
DEBUG_ONLY(Unique_Node_List * modified_nodes ()const{ return _modified_nodes; } )940   DEBUG_ONLY( Unique_Node_List*   modified_nodes() const { return _modified_nodes; } )
941 
942   // Constant table
943   ConstantTable&   constant_table() { return _constant_table; }
944 
945   MachConstantBaseNode*     mach_constant_base_node();
has_mach_constant_base_node() const946   bool                  has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
947   // Generated by adlc, true if CallNode requires MachConstantBase.
948   bool                      needs_clone_jvms();
949 
950   // Handy undefined Node
top() const951   Node*             top() const                 { return _top; }
952 
953   // these are used by guys who need to know about creation and transformation of top:
cached_top_node()954   Node*             cached_top_node()           { return _top; }
955   void          set_cached_top_node(Node* tn);
956 
node_note_array() const957   GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
set_node_note_array(GrowableArray<Node_Notes * > * arr)958   void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
default_node_notes() const959   Node_Notes* default_node_notes() const        { return _default_node_notes; }
set_default_node_notes(Node_Notes * n)960   void    set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
961 
node_notes_at(int idx)962   Node_Notes*       node_notes_at(int idx) {
963     return locate_node_notes(_node_note_array, idx, false);
964   }
965   inline bool   set_node_notes_at(int idx, Node_Notes* value);
966 
967   // Copy notes from source to dest, if they exist.
968   // Overwrite dest only if source provides something.
969   // Return true if information was moved.
970   bool copy_node_notes_to(Node* dest, Node* source);
971 
972   // Workhorse function to sort out the blocked Node_Notes array:
973   inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
974                                        int idx, bool can_grow = false);
975 
976   void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
977 
978   // Type management
type_arena()979   Arena*            type_arena()                { return _type_arena; }
type_dict()980   Dict*             type_dict()                 { return _type_dict; }
type_hwm()981   void*             type_hwm()                  { return _type_hwm; }
type_last_size()982   size_t            type_last_size()            { return _type_last_size; }
num_alias_types()983   int               num_alias_types()           { return _num_alias_types; }
984 
init_type_arena()985   void          init_type_arena()                       { _type_arena = &_Compile_types; }
set_type_arena(Arena * a)986   void          set_type_arena(Arena* a)                { _type_arena = a; }
set_type_dict(Dict * d)987   void          set_type_dict(Dict* d)                  { _type_dict = d; }
set_type_hwm(void * p)988   void          set_type_hwm(void* p)                   { _type_hwm = p; }
set_type_last_size(size_t sz)989   void          set_type_last_size(size_t sz)           { _type_last_size = sz; }
990 
last_tf(ciMethod * m)991   const TypeFunc* last_tf(ciMethod* m) {
992     return (m == _last_tf_m) ? _last_tf : NULL;
993   }
set_last_tf(ciMethod * m,const TypeFunc * tf)994   void set_last_tf(ciMethod* m, const TypeFunc* tf) {
995     assert(m != NULL || tf == NULL, "");
996     _last_tf_m = m;
997     _last_tf = tf;
998   }
999 
alias_type(int idx)1000   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
alias_type(const TypePtr * adr_type,ciField * field=NULL)1001   AliasType*        alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
1002   bool         have_alias_type(const TypePtr* adr_type);
1003   AliasType*        alias_type(ciField*         field);
1004 
get_alias_index(const TypePtr * at)1005   int               get_alias_index(const TypePtr* at)  { return alias_type(at)->index(); }
get_adr_type(uint aidx)1006   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
get_general_index(uint aidx)1007   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
1008 
1009   // Building nodes
1010   void              rethrow_exceptions(JVMState* jvms);
1011   void              return_values(JVMState* jvms);
1012   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
1013 
1014   // Decide how to build a call.
1015   // The profile factor is a discount to apply to this site's interp. profile.
1016   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
1017                                    JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
1018                                    bool allow_intrinsics = true, bool delayed_forbidden = false);
should_delay_inlining(ciMethod * call_method,JVMState * jvms)1019   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
1020     return should_delay_string_inlining(call_method, jvms) ||
1021            should_delay_boxing_inlining(call_method, jvms);
1022   }
1023   bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
1024   bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
1025 
1026   // Helper functions to identify inlining potential at call-site
1027   ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
1028                                   ciKlass* holder, ciMethod* callee,
1029                                   const TypeOopPtr* receiver_type, bool is_virtual,
1030                                   bool &call_does_dispatch, int &vtable_index,
1031                                   bool check_access = true);
1032   ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
1033                               ciMethod* callee, const TypeOopPtr* receiver_type,
1034                               bool check_access = true);
1035 
1036   // Report if there were too many traps at a current method and bci.
1037   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
1038   // If there is no MDO at all, report no trap unless told to assume it.
1039   bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
1040   // This version, unspecific to a particular bci, asks if
1041   // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
1042   bool too_many_traps(Deoptimization::DeoptReason reason,
1043                       // Privately used parameter for logging:
1044                       ciMethodData* logmd = NULL);
1045   // Report if there were too many recompiles at a method and bci.
1046   bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
1047   // Report if there were too many traps or recompiles at a method and bci.
too_many_traps_or_recompiles(ciMethod * method,int bci,Deoptimization::DeoptReason reason)1048   bool too_many_traps_or_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) {
1049     return too_many_traps(method, bci, reason) ||
1050            too_many_recompiles(method, bci, reason);
1051   }
1052   // Return a bitset with the reasons where deoptimization is allowed,
1053   // i.e., where there were not too many uncommon traps.
1054   int _allowed_reasons;
allowed_deopt_reasons()1055   int      allowed_deopt_reasons() { return _allowed_reasons; }
1056   void set_allowed_deopt_reasons();
1057 
1058   // Parsing, optimization
initial_gvn()1059   PhaseGVN*         initial_gvn()               { return _initial_gvn; }
for_igvn()1060   Unique_Node_List* for_igvn()                  { return _for_igvn; }
1061   inline void       record_for_igvn(Node* n);   // Body is after class Unique_Node_List.
set_initial_gvn(PhaseGVN * gvn)1062   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
set_for_igvn(Unique_Node_List * for_igvn)1063   void          set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
1064 
1065   // Replace n by nn using initial_gvn, calling hash_delete and
1066   // record_for_igvn as needed.
1067   void gvn_replace_by(Node* n, Node* nn);
1068 
1069 
1070   void              identify_useful_nodes(Unique_Node_List &useful);
1071   void              update_dead_node_list(Unique_Node_List &useful);
1072   void              remove_useless_nodes (Unique_Node_List &useful);
1073 
warm_calls() const1074   WarmCallInfo*     warm_calls() const          { return _warm_calls; }
set_warm_calls(WarmCallInfo * l)1075   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
1076   WarmCallInfo* pop_warm_call();
1077 
1078   // Record this CallGenerator for inlining at the end of parsing.
add_late_inline(CallGenerator * cg)1079   void              add_late_inline(CallGenerator* cg)        {
1080     _late_inlines.insert_before(_late_inlines_pos, cg);
1081     _late_inlines_pos++;
1082   }
1083 
prepend_late_inline(CallGenerator * cg)1084   void              prepend_late_inline(CallGenerator* cg)    {
1085     _late_inlines.insert_before(0, cg);
1086   }
1087 
add_string_late_inline(CallGenerator * cg)1088   void              add_string_late_inline(CallGenerator* cg) {
1089     _string_late_inlines.push(cg);
1090   }
1091 
add_boxing_late_inline(CallGenerator * cg)1092   void              add_boxing_late_inline(CallGenerator* cg) {
1093     _boxing_late_inlines.push(cg);
1094   }
1095 
1096   void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
1097 
1098   void process_print_inlining();
1099   void dump_print_inlining();
1100 
over_inlining_cutoff() const1101   bool over_inlining_cutoff() const {
1102     if (!inlining_incrementally()) {
1103       return unique() > (uint)NodeCountInliningCutoff;
1104     } else {
1105       // Give some room for incremental inlining algorithm to "breathe"
1106       // and avoid thrashing when live node count is close to the limit.
1107       // Keep in mind that live_nodes() isn't accurate during inlining until
1108       // dead node elimination step happens (see Compile::inline_incrementally).
1109       return live_nodes() > (uint)LiveNodeCountInliningCutoff * 11 / 10;
1110     }
1111   }
1112 
inc_number_of_mh_late_inlines()1113   void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
dec_number_of_mh_late_inlines()1114   void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
has_mh_late_inlines() const1115   bool has_mh_late_inlines() const     { return _number_of_mh_late_inlines > 0; }
1116 
1117   bool inline_incrementally_one();
1118   void inline_incrementally_cleanup(PhaseIterGVN& igvn);
1119   void inline_incrementally(PhaseIterGVN& igvn);
1120   void inline_string_calls(bool parse_time);
1121   void inline_boxing_calls(PhaseIterGVN& igvn);
1122   bool optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode);
1123   void remove_root_to_sfpts_edges(PhaseIterGVN& igvn);
1124 
1125   // Matching, CFG layout, allocation, code generation
cfg()1126   PhaseCFG*         cfg()                       { return _cfg; }
select_24_bit_instr() const1127   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
in_24_bit_fp_mode() const1128   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
has_java_calls() const1129   bool              has_java_calls() const      { return _java_calls > 0; }
java_calls() const1130   int               java_calls() const          { return _java_calls; }
inner_loops() const1131   int               inner_loops() const         { return _inner_loops; }
matcher()1132   Matcher*          matcher()                   { return _matcher; }
regalloc()1133   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
frame_slots() const1134   int               frame_slots() const         { return _frame_slots; }
1135   int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
frame_size_in_bytes() const1136   int               frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
FIRST_STACK_mask()1137   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
indexSet_arena()1138   Arena*            indexSet_arena()            { return _indexSet_arena; }
indexSet_free_block_list()1139   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
node_bundling_limit()1140   uint              node_bundling_limit()       { return _node_bundling_limit; }
node_bundling_base()1141   Bundle*           node_bundling_base()        { return _node_bundling_base; }
set_node_bundling_limit(uint n)1142   void          set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
set_node_bundling_base(Bundle * b)1143   void          set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
1144   bool          starts_bundle(const Node *n) const;
1145   bool          need_stack_bang(int frame_size_in_bytes) const;
1146   bool          need_register_stack_bang() const;
1147 
update_interpreter_frame_size(int size)1148   void  update_interpreter_frame_size(int size) {
1149     if (_interpreter_frame_size < size) {
1150       _interpreter_frame_size = size;
1151     }
1152   }
1153   int           bang_size_in_bytes() const;
1154 
set_matcher(Matcher * m)1155   void          set_matcher(Matcher* m)                 { _matcher = m; }
1156 //void          set_regalloc(PhaseRegAlloc* ra)           { _regalloc = ra; }
set_indexSet_arena(Arena * a)1157   void          set_indexSet_arena(Arena* a)            { _indexSet_arena = a; }
set_indexSet_free_block_list(void * p)1158   void          set_indexSet_free_block_list(void* p)   { _indexSet_free_block_list = p; }
1159 
1160   // Remember if this compilation changes hardware mode to 24-bit precision
set_24_bit_selection_and_mode(bool selection,bool mode)1161   void set_24_bit_selection_and_mode(bool selection, bool mode) {
1162     _select_24_bit_instr = selection;
1163     _in_24_bit_fp_mode   = mode;
1164   }
1165 
set_java_calls(int z)1166   void  set_java_calls(int z) { _java_calls  = z; }
set_inner_loops(int z)1167   void set_inner_loops(int z) { _inner_loops = z; }
1168 
1169   // Instruction bits passed off to the VM
code_size()1170   int               code_size()                 { return _method_size; }
code_buffer()1171   CodeBuffer*       code_buffer()               { return &_code_buffer; }
first_block_size()1172   int               first_block_size()          { return _first_block_size; }
set_frame_complete(int off)1173   void              set_frame_complete(int off) { if (!in_scratch_emit_size()) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } }
handler_table()1174   ExceptionHandlerTable*  handler_table()       { return &_handler_table; }
inc_table()1175   ImplicitExceptionTable* inc_table()           { return &_inc_table; }
oop_map_set()1176   OopMapSet*        oop_map_set()               { return _oop_map_set; }
debug_info()1177   DebugInformationRecorder* debug_info()        { return env()->debug_info(); }
dependencies()1178   Dependencies*     dependencies()              { return env()->dependencies(); }
CompiledZap_count()1179   static int        CompiledZap_count()         { return _CompiledZap_count; }
scratch_buffer_blob()1180   BufferBlob*       scratch_buffer_blob()       { return _scratch_buffer_blob; }
1181   void         init_scratch_buffer_blob(int const_size);
1182   void        clear_scratch_buffer_blob();
set_scratch_buffer_blob(BufferBlob * b)1183   void          set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
scratch_locs_memory()1184   relocInfo*        scratch_locs_memory()       { return _scratch_locs_memory; }
set_scratch_locs_memory(relocInfo * b)1185   void          set_scratch_locs_memory(relocInfo* b)  { _scratch_locs_memory = b; }
1186 
1187   // emit to scratch blob, report resulting size
1188   uint              scratch_emit_size(const Node* n);
set_in_scratch_emit_size(bool x)1189   void       set_in_scratch_emit_size(bool x)   {        _in_scratch_emit_size = x; }
in_scratch_emit_size() const1190   bool           in_scratch_emit_size() const   { return _in_scratch_emit_size;     }
1191 
1192   enum ScratchBufferBlob {
1193     MAX_inst_size       = 2048,
1194     MAX_locs_size       = 128, // number of relocInfo elements
1195     MAX_const_size      = 128,
1196     MAX_stubs_size      = 128
1197   };
1198 
1199   // Major entry point.  Given a Scope, compile the associated method.
1200   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
1201   // replacement, entry_bci indicates the bytecode for which to compile a
1202   // continuation.
1203   Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
1204           int entry_bci, bool subsume_loads, bool do_escape_analysis,
1205           bool eliminate_boxing, DirectiveSet* directive);
1206 
1207   // Second major entry point.  From the TypeFunc signature, generate code
1208   // to pass arguments from the Java calling convention to the C calling
1209   // convention.
1210   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1211           address stub_function, const char *stub_name,
1212           int is_fancy_jump, bool pass_tls,
1213           bool save_arg_registers, bool return_pc, DirectiveSet* directive);
1214 
1215   // From the TypeFunc signature, generate code to pass arguments
1216   // from Compiled calling convention to Interpreter's calling convention
1217   void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
1218 
1219   // From the TypeFunc signature, generate code to pass arguments
1220   // from Interpreter's calling convention to Compiler's calling convention
1221   void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
1222 
1223   // Are we compiling a method?
has_method()1224   bool has_method() { return method() != NULL; }
1225 
1226   // Maybe print some information about this compile.
1227   void print_compile_messages();
1228 
1229   // Final graph reshaping, a post-pass after the regular optimizer is done.
1230   bool final_graph_reshaping();
1231 
1232   // returns true if adr is completely contained in the given alias category
1233   bool must_alias(const TypePtr* adr, int alias_idx);
1234 
1235   // returns true if adr overlaps with the given alias category
1236   bool can_alias(const TypePtr* adr, int alias_idx);
1237 
1238   // Driver for converting compiler's IR into machine code bits
1239   void Output();
1240 
1241   // Accessors for node bundling info.
1242   Bundle* node_bundling(const Node *n);
1243   bool valid_bundle_info(const Node *n);
1244 
1245   // Schedule and Bundle the instructions
1246   void ScheduleAndBundle();
1247 
1248   // Build OopMaps for each GC point
1249   void BuildOopMaps();
1250 
1251   // Append debug info for the node "local" at safepoint node "sfpt" to the
1252   // "array",   May also consult and add to "objs", which describes the
1253   // scalar-replaced objects.
1254   void FillLocArray( int idx, MachSafePointNode* sfpt,
1255                      Node *local, GrowableArray<ScopeValue*> *array,
1256                      GrowableArray<ScopeValue*> *objs );
1257 
1258   // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL.
1259   static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id);
1260   // Requres that "objs" does not contains an ObjectValue whose id matches
1261   // that of "sv.  Appends "sv".
1262   static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
1263                                      ObjectValue* sv );
1264 
1265   // Process an OopMap Element while emitting nodes
1266   void Process_OopMap_Node(MachNode *mach, int code_offset);
1267 
1268   class BufferSizingData {
1269   public:
1270     int _stub;
1271     int _code;
1272     int _const;
1273     int _reloc;
1274 
BufferSizingData()1275       BufferSizingData() :
1276       _stub(0),
1277       _code(0),
1278       _const(0),
1279       _reloc(0)
1280       { };
1281   };
1282 
1283   // Initialize code buffer
1284   void        estimate_buffer_size(int& const_req);
1285   CodeBuffer* init_buffer(BufferSizingData& buf_sizes);
1286 
1287   // Write out basic block data to code buffer
1288   void fill_buffer(CodeBuffer* cb, uint* blk_starts);
1289 
1290   // Determine which variable sized branches can be shortened
1291   void shorten_branches(uint* blk_starts, BufferSizingData& buf_sizes);
1292 
1293   // Compute the size of first NumberOfLoopInstrToAlign instructions
1294   // at the head of a loop.
1295   void compute_loop_first_inst_sizes();
1296 
1297   // Compute the information for the exception tables
1298   void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels);
1299 
1300   // Stack slots that may be unused by the calling convention but must
1301   // otherwise be preserved.  On Intel this includes the return address.
1302   // On PowerPC it includes the 4 words holding the old TOC & LR glue.
1303   uint in_preserve_stack_slots();
1304 
1305   // "Top of Stack" slots that may be unused by the calling convention but must
1306   // otherwise be preserved.
1307   // On Intel these are not necessary and the value can be zero.
1308   // On Sparc this describes the words reserved for storing a register window
1309   // when an interrupt occurs.
1310   static uint out_preserve_stack_slots();
1311 
1312   // Number of outgoing stack slots killed above the out_preserve_stack_slots
1313   // for calls to C.  Supports the var-args backing area for register parms.
1314   uint varargs_C_out_slots_killed() const;
1315 
1316   // Number of Stack Slots consumed by a synchronization entry
1317   int sync_stack_slots() const;
1318 
1319   // Compute the name of old_SP.  See <arch>.ad for frame layout.
1320   OptoReg::Name compute_old_SP();
1321 
1322  private:
1323   // Phase control:
1324   void Init(int aliaslevel);                     // Prepare for a single compilation
1325   int  Inline_Warm();                            // Find more inlining work.
1326   void Finish_Warm();                            // Give up on further inlines.
1327   void Optimize();                               // Given a graph, optimize it
1328   void Code_Gen();                               // Generate code from a graph
1329 
1330   // Management of the AliasType table.
1331   void grow_alias_types();
1332   AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1333   const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1334   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1335 
1336   void verify_top(Node*) const PRODUCT_RETURN;
1337 
1338   // Intrinsic setup.
1339   void           register_library_intrinsics();                            // initializer
1340   CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor
1341   int            intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found);  // helper
1342   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
1343   void           register_intrinsic(CallGenerator* cg);                    // update fn
1344 
1345 #ifndef PRODUCT
1346   static juint  _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
1347   static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
1348 #endif
1349   // Function calls made by the public function final_graph_reshaping.
1350   // No need to be made public as they are not called elsewhere.
1351   void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
1352   void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop);
1353   void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
1354   void eliminate_redundant_card_marks(Node* n);
1355 
1356  public:
1357 
1358   // Note:  Histogram array size is about 1 Kb.
1359   enum {                        // flag bits:
1360     _intrinsic_worked = 1,      // succeeded at least once
1361     _intrinsic_failed = 2,      // tried it but it failed
1362     _intrinsic_disabled = 4,    // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1363     _intrinsic_virtual = 8,     // was seen in the virtual form (rare)
1364     _intrinsic_both = 16        // was seen in the non-virtual form (usual)
1365   };
1366   // Update histogram.  Return boolean if this is a first-time occurrence.
1367   static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1368                                           bool is_virtual, int flags) PRODUCT_RETURN0;
1369   static void print_intrinsic_statistics() PRODUCT_RETURN;
1370 
1371   // Graph verification code
1372   // Walk the node list, verifying that there is a one-to-one
1373   // correspondence between Use-Def edges and Def-Use edges
1374   // The option no_dead_code enables stronger checks that the
1375   // graph is strongly connected from root in both directions.
1376   void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1377 
1378   // End-of-run dumps.
1379   static void print_statistics() PRODUCT_RETURN;
1380 
1381   // Dump formatted assembly
1382 #if defined(SUPPORT_OPTO_ASSEMBLY)
1383   void dump_asm_on(outputStream* ost, int* pcs, uint pc_limit);
dump_asm(int * pcs=NULL,uint pc_limit=0)1384   void dump_asm(int* pcs = NULL, uint pc_limit = 0) { dump_asm_on(tty, pcs, pc_limit); }
1385 #else
dump_asm_on(outputStream * ost,int * pcs,uint pc_limit)1386   void dump_asm_on(outputStream* ost, int* pcs, uint pc_limit) { return; }
dump_asm(int * pcs=NULL,uint pc_limit=0)1387   void dump_asm(int* pcs = NULL, uint pc_limit = 0) { return; }
1388 #endif
1389   void dump_pc(int *pcs, int pc_limit, Node *n);
1390 
1391   // Verify ADLC assumptions during startup
1392   static void adlc_verification() PRODUCT_RETURN;
1393 
1394   // Definitions of pd methods
1395   static void pd_compiler2_init();
1396 
1397   // Static parse-time type checking logic for gen_subtype_check:
1398   enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
1399   int static_subtype_check(ciKlass* superk, ciKlass* subk);
1400 
1401   static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype,
1402                               // Optional control dependency (for example, on range check)
1403                               Node* ctrl = NULL);
1404 
1405   // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1406   static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl);
1407 
1408   // Auxiliary method for randomized fuzzing/stressing
1409   static bool randomized_select(int count);
1410 
1411   // supporting clone_map
1412   CloneMap&     clone_map();
1413   void          set_clone_map(Dict* d);
1414 
1415   bool needs_clinit_barrier(ciField* ik,         ciMethod* accessing_method);
1416   bool needs_clinit_barrier(ciMethod* ik,        ciMethod* accessing_method);
1417   bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
1418 };
1419 
1420 #endif // SHARE_OPTO_COMPILE_HPP
1421