1 /*
2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_OPTO_PARSE_HPP
26 #define SHARE_VM_OPTO_PARSE_HPP
27 
28 #include "ci/ciMethodData.hpp"
29 #include "ci/ciTypeFlow.hpp"
30 #include "compiler/methodLiveness.hpp"
31 #include "libadt/vectset.hpp"
32 #include "oops/generateOopMap.hpp"
33 #include "opto/graphKit.hpp"
34 #include "opto/subnode.hpp"
35 
36 class BytecodeParseHistogram;
37 class InlineTree;
38 class Parse;
39 class SwitchRange;
40 
41 
42 //------------------------------InlineTree-------------------------------------
43 class InlineTree : public ResourceObj {
44   friend class VMStructs;
45 
46   Compile*    C;                  // cache
47   JVMState*   _caller_jvms;       // state of caller
48   ciMethod*   _method;            // method being called by the caller_jvms
49   InlineTree* _caller_tree;
50   uint        _count_inline_bcs;  // Accumulated count of inlined bytecodes
51   // Call-site count / interpreter invocation count, scaled recursively.
52   // Always between 0.0 and 1.0.  Represents the percentage of the method's
53   // total execution time used at this call site.
54   const float _site_invoke_ratio;
55   const int   _max_inline_level;  // the maximum inline level for this sub-tree (may be adjusted)
56   float compute_callee_frequency( int caller_bci ) const;
57 
58   GrowableArray<InlineTree*> _subtrees;
59 
60   void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
61   const char* _msg;
62 protected:
63   InlineTree(Compile* C,
64              const InlineTree* caller_tree,
65              ciMethod* callee_method,
66              JVMState* caller_jvms,
67              int caller_bci,
68              float site_invoke_ratio,
69              int max_inline_level);
70   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
71                                            JVMState* caller_jvms,
72                                            int caller_bci);
73   bool        try_to_inline(ciMethod* callee_method,
74                             ciMethod* caller_method,
75                             int caller_bci,
76                             JVMState* jvms,
77                             ciCallProfile& profile,
78                             WarmCallInfo* wci_result,
79                             bool& should_delay);
80   bool        should_inline(ciMethod* callee_method,
81                             ciMethod* caller_method,
82                             int caller_bci,
83                             ciCallProfile& profile,
84                             WarmCallInfo* wci_result);
85   bool        should_not_inline(ciMethod* callee_method,
86                                 ciMethod* caller_method,
87                                 JVMState* jvms,
88                                 WarmCallInfo* wci_result);
89   void        print_inlining(ciMethod* callee_method, int caller_bci,
90                              ciMethod* caller_method, bool success) const;
91 
caller_tree() const92   InlineTree* caller_tree()       const { return _caller_tree;  }
93   InlineTree* callee_at(int bci, ciMethod* m) const;
inline_level() const94   int         inline_level()      const { return stack_depth(); }
stack_depth() const95   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
msg() const96   const char* msg()               const { return _msg; }
set_msg(const char * msg)97   void        set_msg(const char* msg)  { _msg = msg; }
98 public:
99   static const char* check_can_parse(ciMethod* callee);
100 
101   static InlineTree* build_inline_tree_root();
102   static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
103 
104   // For temporary (stack-allocated, stateless) ilts:
105   InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
106 
107   // InlineTree enum
108   enum InlineStyle {
109     Inline_do_not_inline             =   0, //
110     Inline_cha_is_monomorphic        =   1, //
111     Inline_type_profile_monomorphic  =   2  //
112   };
113 
114   // See if it is OK to inline.
115   // The receiver is the inline tree for the caller.
116   //
117   // The result is a temperature indication.  If it is hot or cold,
118   // inlining is immediate or undesirable.  Otherwise, the info block
119   // returned is newly allocated and may be enqueued.
120   //
121   // If the method is inlinable, a new inline subtree is created on the fly,
122   // and may be accessed by find_subtree_from_root.
123   // The call_method is the dest_method for a special or static invocation.
124   // The call_method is an optimized virtual method candidate otherwise.
125   WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
126 
127   // Information about inlined method
caller_jvms() const128   JVMState*   caller_jvms()       const { return _caller_jvms; }
method() const129   ciMethod   *method()            const { return _method; }
caller_bci() const130   int         caller_bci()        const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
count_inline_bcs() const131   uint        count_inline_bcs()  const { return _count_inline_bcs; }
site_invoke_ratio() const132   float       site_invoke_ratio() const { return _site_invoke_ratio; };
133 
134 #ifndef PRODUCT
135 private:
136   uint        _count_inlines;     // Count of inlined methods
137 public:
138   // Debug information collected during parse
count_inlines() const139   uint        count_inlines()     const { return _count_inlines; };
140 #endif
subtrees()141   GrowableArray<InlineTree*> subtrees() { return _subtrees; }
142 
143   void print_value_on(outputStream* st) const PRODUCT_RETURN;
144 
145   bool        _forced_inline;     // Inlining was forced by CompilerOracle, ciReplay or annotation
forced_inline() const146   bool        forced_inline()     const { return _forced_inline; }
147   // Count number of nodes in this subtree
148   int         count() const;
149   // Dump inlining replay data to the stream.
150   void dump_replay_data(outputStream* out);
151 };
152 
153 
154 //-----------------------------------------------------------------------------
155 //------------------------------Parse------------------------------------------
156 // Parse bytecodes, build a Graph
157 class Parse : public GraphKit {
158  public:
159   // Per-block information needed by the parser:
160   class Block {
161    private:
162     ciTypeFlow::Block* _flow;
163     int                _pred_count;     // how many predecessors in CFG?
164     int                _preds_parsed;   // how many of these have been parsed?
165     uint               _count;          // how many times executed?  Currently only set by _goto's
166     bool               _is_parsed;      // has this block been parsed yet?
167     bool               _is_handler;     // is this block an exception handler?
168     bool               _has_merged_backedge; // does this block have merged backedge?
169     SafePointNode*     _start_map;      // all values flowing into this block
170     MethodLivenessResult _live_locals;  // lazily initialized liveness bitmap
171 
172     int                _num_successors; // Includes only normal control flow.
173     int                _all_successors; // Include exception paths also.
174     Block**            _successors;
175 
176     // Use init_node/init_graph to initialize Blocks.
177     // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
Block()178     Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
179 
180    public:
181 
182     // Set up the block data structure itself.
183     void init_node(Parse* outer, int po);
184     // Set up the block's relations to other blocks.
185     void init_graph(Parse* outer);
186 
flow() const187     ciTypeFlow::Block* flow() const        { return _flow; }
pred_count() const188     int pred_count() const                 { return _pred_count; }
preds_parsed() const189     int preds_parsed() const               { return _preds_parsed; }
is_parsed() const190     bool is_parsed() const                 { return _is_parsed; }
is_handler() const191     bool is_handler() const                { return _is_handler; }
set_count(uint x)192     void set_count( uint x )               { _count = x; }
count() const193     uint count() const                     { return _count; }
194 
start_map() const195     SafePointNode* start_map() const       { assert(is_merged(),"");   return _start_map; }
set_start_map(SafePointNode * m)196     void set_start_map(SafePointNode* m)   { assert(!is_merged(), ""); _start_map = m; }
197 
198     // True after any predecessor flows control into this block
is_merged() const199     bool is_merged() const                 { return _start_map != NULL; }
200 
201 #ifdef ASSERT
202     // True after backedge predecessor flows control into this block
has_merged_backedge() const203     bool has_merged_backedge() const       { return _has_merged_backedge; }
mark_merged_backedge(Block * pred)204     void mark_merged_backedge(Block* pred) {
205       assert(is_SEL_head(), "should be loop head");
206       if (pred != NULL && is_SEL_backedge(pred)) {
207         assert(is_parsed(), "block should be parsed before merging backedges");
208         _has_merged_backedge = true;
209       }
210     }
211 #endif
212 
213     // True when all non-exception predecessors have been parsed.
is_ready() const214     bool is_ready() const                  { return preds_parsed() == pred_count(); }
215 
num_successors() const216     int num_successors() const             { return _num_successors; }
all_successors() const217     int all_successors() const             { return _all_successors; }
successor_at(int i) const218     Block* successor_at(int i) const {
219       assert((uint)i < (uint)all_successors(), "");
220       return _successors[i];
221     }
222     Block* successor_for_bci(int bci);
223 
start() const224     int start() const                      { return flow()->start(); }
limit() const225     int limit() const                      { return flow()->limit(); }
rpo() const226     int rpo() const                        { return flow()->rpo(); }
start_sp() const227     int start_sp() const                   { return flow()->stack_size(); }
228 
is_loop_head() const229     bool is_loop_head() const              { return flow()->is_loop_head(); }
is_SEL_head() const230     bool is_SEL_head() const               { return flow()->is_single_entry_loop_head(); }
is_SEL_backedge(Block * pred) const231     bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
is_invariant_local(uint i) const232     bool is_invariant_local(uint i) const  {
233       const JVMState* jvms = start_map()->jvms();
234       if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
235       return flow()->is_invariant_local(i - jvms->locoff());
236     }
can_elide_SEL_phi(uint i) const237     bool can_elide_SEL_phi(uint i) const  { assert(is_SEL_head(),""); return is_invariant_local(i); }
238 
peek(int off=0) const239     const Type* peek(int off=0) const      { return stack_type_at(start_sp() - (off+1)); }
240 
241     const Type* stack_type_at(int i) const;
242     const Type* local_type_at(int i) const;
get_type(ciType * t)243     static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
244 
has_trap_at(int bci) const245     bool has_trap_at(int bci) const        { return flow()->has_trap() && flow()->trap_bci() == bci; }
246 
247     // Call this just before parsing a block.
mark_parsed()248     void mark_parsed() {
249       assert(!_is_parsed, "must parse each block exactly once");
250       _is_parsed = true;
251     }
252 
253     // Return the phi/region input index for the "current" pred,
254     // and bump the pred number.  For historical reasons these index
255     // numbers are handed out in descending order.  The last index is
256     // always PhiNode::Input (i.e., 1).  The value returned is known
257     // as a "path number" because it distinguishes by which path we are
258     // entering the block.
next_path_num()259     int next_path_num() {
260       assert(preds_parsed() < pred_count(), "too many preds?");
261       return pred_count() - _preds_parsed++;
262     }
263 
264     // Add a previously unaccounted predecessor to this block.
265     // This operates by increasing the size of the block's region
266     // and all its phi nodes (if any).  The value returned is a
267     // path number ("pnum").
268     int add_new_path();
269 
270     // Initialize me by recording the parser's map.  My own map must be NULL.
271     void record_state(Parse* outer);
272   };
273 
274 #ifndef PRODUCT
275   // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
276   class BytecodeParseHistogram : public ResourceObj {
277    private:
278     enum BPHType {
279       BPH_transforms,
280       BPH_values
281     };
282     static bool _initialized;
283     static uint _bytecodes_parsed [Bytecodes::number_of_codes];
284     static uint _nodes_constructed[Bytecodes::number_of_codes];
285     static uint _nodes_transformed[Bytecodes::number_of_codes];
286     static uint _new_values       [Bytecodes::number_of_codes];
287 
288     Bytecodes::Code _initial_bytecode;
289     int             _initial_node_count;
290     int             _initial_transforms;
291     int             _initial_values;
292 
293     Parse     *_parser;
294     Compile   *_compiler;
295 
296     // Initialization
297     static void reset();
298 
299     // Return info being collected, select with global flag 'BytecodeParseInfo'
300     int current_count(BPHType info_selector);
301 
302    public:
303     BytecodeParseHistogram(Parse *p, Compile *c);
304     static bool initialized();
305 
306     // Record info when starting to parse one bytecode
307     void set_initial_state( Bytecodes::Code bc );
308     // Record results of parsing one bytecode
309     void record_change();
310 
311     // Profile printing
312     static void print(float cutoff = 0.01F); // cutoff in percent
313   };
314 
315   public:
316     // Record work done during parsing
317     BytecodeParseHistogram* _parse_histogram;
set_parse_histogram(BytecodeParseHistogram * bph)318     void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
parse_histogram()319     BytecodeParseHistogram* parse_histogram()      { return _parse_histogram; }
320 #endif
321 
322  private:
323   friend class Block;
324 
325   // Variables which characterize this compilation as a whole:
326 
327   JVMState*     _caller;        // JVMS which carries incoming args & state.
328   float         _expected_uses; // expected number of calls to this code
329   float         _prof_factor;   // discount applied to my profile counts
330   int           _depth;         // Inline tree depth, for debug printouts
331   const TypeFunc*_tf;           // My kind of function type
332   int           _entry_bci;     // the osr bci or InvocationEntryBci
333 
334   ciTypeFlow*   _flow;          // Results of previous flow pass.
335   Block*        _blocks;        // Array of basic-block structs.
336   int           _block_count;   // Number of elements in _blocks.
337 
338   GraphKit      _exits;         // Record all normal returns and throws here.
339   bool          _wrote_final;   // Did we write a final field?
340   bool          _wrote_volatile;     // Did we write a volatile field?
341   bool          _count_invocations;  // update and test invocation counter
342   bool          _method_data_update; // update method data oop
343   Node*         _alloc_with_final;   // An allocation node with final field
344 
345   // Variables which track Java semantics during bytecode parsing:
346 
347   Block*            _block;     // block currently getting parsed
348   ciBytecodeStream  _iter;      // stream of this method's bytecodes
349 
350   int           _blocks_merged; // Progress meter: state merges from BB preds
351   int           _blocks_parsed; // Progress meter: BBs actually parsed
352 
353   const FastLockNode* _synch_lock; // FastLockNode for synchronized method
354 
355 #ifndef PRODUCT
356   int _max_switch_depth;        // Debugging SwitchRanges.
357   int _est_switch_depth;        // Debugging SwitchRanges.
358 #endif
359 
360   bool         _first_return;                  // true if return is the first to be parsed
361   bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
362   uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
363 
364  public:
365   // Constructor
366   Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
367 
is_Parse() const368   virtual Parse* is_Parse() const { return (Parse*)this; }
369 
370   // Accessors.
caller() const371   JVMState*     caller()        const { return _caller; }
expected_uses() const372   float         expected_uses() const { return _expected_uses; }
prof_factor() const373   float         prof_factor()   const { return _prof_factor; }
depth() const374   int           depth()         const { return _depth; }
tf() const375   const TypeFunc* tf()          const { return _tf; }
376   //            entry_bci()     -- see osr_bci, etc.
377 
flow() const378   ciTypeFlow*   flow()          const { return _flow; }
379   //            blocks()        -- see rpo_at, start_block, etc.
block_count() const380   int           block_count()   const { return _block_count; }
381 
exits()382   GraphKit&     exits()               { return _exits; }
wrote_final() const383   bool          wrote_final() const   { return _wrote_final; }
set_wrote_final(bool z)384   void      set_wrote_final(bool z)   { _wrote_final = z; }
wrote_volatile() const385   bool          wrote_volatile() const { return _wrote_volatile; }
set_wrote_volatile(bool z)386   void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
count_invocations() const387   bool          count_invocations() const  { return _count_invocations; }
method_data_update() const388   bool          method_data_update() const { return _method_data_update; }
alloc_with_final() const389   Node*    alloc_with_final() const   { return _alloc_with_final; }
set_alloc_with_final(Node * n)390   void set_alloc_with_final(Node* n)  {
391     assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
392     _alloc_with_final = n;
393   }
394 
block() const395   Block*             block()    const { return _block; }
iter()396   ciBytecodeStream&  iter()           { return _iter; }
bc() const397   Bytecodes::Code    bc()       const { return _iter.cur_bc(); }
398 
set_block(Block * b)399   void set_block(Block* b)            { _block = b; }
400 
401   // Derived accessors:
is_normal_parse() const402   bool is_normal_parse() const  { return _entry_bci == InvocationEntryBci; }
is_osr_parse() const403   bool is_osr_parse() const     { return _entry_bci != InvocationEntryBci; }
osr_bci() const404   int osr_bci() const           { assert(is_osr_parse(),""); return _entry_bci; }
405 
406   void set_parse_bci(int bci);
407 
408   // Must this parse be aborted?
failing()409   bool failing()                { return C->failing(); }
410 
rpo_at(int rpo)411   Block* rpo_at(int rpo) {
412     assert(0 <= rpo && rpo < _block_count, "oob");
413     return &_blocks[rpo];
414   }
start_block()415   Block* start_block() {
416     return rpo_at(flow()->start_block()->rpo());
417   }
418   // Can return NULL if the flow pass did not complete a block.
successor_for_bci(int bci)419   Block* successor_for_bci(int bci) {
420     return block()->successor_for_bci(bci);
421   }
422 
423  private:
424   // Create a JVMS & map for the initial state of this method.
425   SafePointNode* create_entry_map();
426 
427   // OSR helpers
428   Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
429   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
430   void  load_interpreter_state(Node* osr_buf);
431 
432   // Functions for managing basic blocks:
433   void init_blocks();
434   void load_state_from(Block* b);
store_state_to(Block * b)435   void store_state_to(Block* b) { b->record_state(this); }
436 
437   // Parse all the basic blocks.
438   void do_all_blocks();
439 
440   // Parse the current basic block
441   void do_one_block();
442 
443   // Raise an error if we get a bad ciTypeFlow CFG.
444   void handle_missing_successor(int bci);
445 
446   // first actions (before BCI 0)
447   void do_method_entry();
448 
449   // implementation of monitorenter/monitorexit
450   void do_monitor_enter();
451   void do_monitor_exit();
452 
453   // Eagerly create phie throughout the state, to cope with back edges.
454   void ensure_phis_everywhere();
455 
456   // Merge the current mapping into the basic block starting at bci
457   void merge(          int target_bci);
458   // Same as plain merge, except that it allocates a new path number.
459   void merge_new_path( int target_bci);
460   // Merge the current mapping into an exception handler.
461   void merge_exception(int target_bci);
462   // Helper: Merge the current mapping into the given basic block
463   void merge_common(Block* target, int pnum);
464   // Helper functions for merging individual cells.
465   PhiNode *ensure_phi(       int idx, bool nocreate = false);
466   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
467   // Helper to merge the current memory state into the given basic block
468   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
469 
470   // Parse this bytecode, and alter the Parsers JVM->Node mapping
471   void do_one_bytecode();
472 
473   // helper function to generate array store check
474   void array_store_check();
475   // Helper function to generate array load
476   void array_load(BasicType etype);
477   // Helper function to generate array store
478   void array_store(BasicType etype);
479   // Helper function to compute array addressing
480   Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
481 
482   void rtm_deopt();
483 
484   // Pass current map to exits
485   void return_current(Node* value);
486 
487   // Register finalizers on return from Object.<init>
488   void call_register_finalizer();
489 
490   // Insert a compiler safepoint into the graph
491   void add_safepoint();
492 
493   // Insert a compiler safepoint into the graph, if there is a back-branch.
maybe_add_safepoint(int target_bci)494   void maybe_add_safepoint(int target_bci) {
495     if (UseLoopSafepoints && target_bci <= bci()) {
496       add_safepoint();
497     }
498   }
499 
500   // Note:  Intrinsic generation routines may be found in library_call.cpp.
501 
502   // Helper function to setup Ideal Call nodes
503   void do_call();
504 
505   // Helper function to uncommon-trap or bailout for non-compilable call-sites
506   bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
507 
508   // Helper function to setup for type-profile based inlining
509   bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
510 
511   // Helper functions for type checking bytecodes:
512   void  do_checkcast();
513   void  do_instanceof();
514 
515   // Helper functions for shifting & arithmetic
516   void modf();
517   void modd();
518   void l2f();
519 
520   void do_irem();
521 
522   // implementation of _get* and _put* bytecodes
do_getstatic()523   void do_getstatic() { do_field_access(true,  false); }
do_getfield()524   void do_getfield () { do_field_access(true,  true); }
do_putstatic()525   void do_putstatic() { do_field_access(false, false); }
do_putfield()526   void do_putfield () { do_field_access(false, true); }
527 
528   // common code for making initial checks and forming addresses
529   void do_field_access(bool is_get, bool is_field);
530   bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
531 
532   // common code for actually performing the load or store
533   void do_get_xxx(Node* obj, ciField* field, bool is_field);
534   void do_put_xxx(Node* obj, ciField* field, bool is_field);
535 
536   // loading from a constant field or the constant pool
537   // returns false if push failed (non-perm field constants only, not ldcs)
538   bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
539 
540   // implementation of object creation bytecodes
541   void emit_guard_for_new(ciInstanceKlass* klass);
542   void do_new();
543   void do_newarray(BasicType elemtype);
544   void do_anewarray();
545   void do_multianewarray();
546   Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
547 
548   // implementation of jsr/ret
549   void do_jsr();
550   void do_ret();
551 
552   float   dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
553   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
554   bool    seems_never_taken(float prob) const;
555   bool    path_is_suitable_for_uncommon_trap(float prob) const;
556   bool    seems_stable_comparison() const;
557 
558   void    do_ifnull(BoolTest::mask btest, Node* c);
559   void    do_if(BoolTest::mask btest, Node* c);
560   int     repush_if_args();
561   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
562                               Block* path, Block* other_path);
563   void    sharpen_type_after_if(BoolTest::mask btest,
564                                 Node* con, const Type* tcon,
565                                 Node* val, const Type* tval);
566   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
567   Node*   jump_if_join(Node* iffalse, Node* iftrue);
568   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
569   void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index);
570   void    jump_if_always_fork(int dest_bci_if_true, int prof_table_index);
571 
572   friend class SwitchRange;
573   void    do_tableswitch();
574   void    do_lookupswitch();
575   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
576   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
577 
578   // helper functions for methodData style profiling
579   void test_counter_against_threshold(Node* cnt, int limit);
580   void increment_and_test_invocation_counter(int limit);
581   void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
582   Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
583   void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
584   void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
585 
586   void profile_method_entry();
587   void profile_taken_branch(int target_bci, bool force_update = false);
588   void profile_not_taken_branch(bool force_update = false);
589   void profile_call(Node* receiver);
590   void profile_generic_call();
591   void profile_receiver_type(Node* receiver);
592   void profile_ret(int target_bci);
593   void profile_null_checkcast();
594   void profile_switch_case(int table_index);
595 
596   // helper function for call statistics
597   void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
598 
599   Node_Notes* make_node_notes(Node_Notes* caller_nn);
600 
601   // Helper functions for handling normal and abnormal exits.
602   void build_exits();
603 
604   // Fix up all exceptional control flow exiting a single bytecode.
605   void do_exceptions();
606 
607   // Fix up all exiting control flow at the end of the parse.
608   void do_exits();
609 
610   // Add Catch/CatchProjs
611   // The call is either a Java call or the VM's rethrow stub
612   void catch_call_exceptions(ciExceptionHandlerStream&);
613 
614   // Handle all exceptions thrown by the inlined method.
615   // Also handles exceptions for individual bytecodes.
616   void catch_inline_exceptions(SafePointNode* ex_map);
617 
618   // Merge the given map into correct exceptional exit state.
619   // Assumes that there is no applicable local handler.
620   void throw_to_exit(SafePointNode* ex_map);
621 
622   // Use speculative type to optimize CmpP node
623   Node* optimize_cmp_with_klass(Node* c);
624 
625  public:
626 #ifndef PRODUCT
627   // Handle PrintOpto, etc.
628   void show_parse_info();
629   void dump_map_adr_mem() const;
630   static void print_statistics(); // Print some performance counters
631   void dump();
632   void dump_bci(int bci);
633 #endif
634 };
635 
636 #endif // SHARE_VM_OPTO_PARSE_HPP
637