1 /*
2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
26 #define SHARE_VM_OPTO_CALLNODE_HPP
27 
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/replacednodes.hpp"
34 #include "opto/type.hpp"
35 
36 // Portions of code courtesy of Clifford Click
37 
38 // Optimization - Graph Style
39 
40 class Chaitin;
41 class NamedCounter;
42 class MultiNode;
43 class  SafePointNode;
44 class   CallNode;
45 class     CallJavaNode;
46 class       CallStaticJavaNode;
47 class       CallDynamicJavaNode;
48 class     CallRuntimeNode;
49 class       CallLeafNode;
50 class         CallLeafNoFPNode;
51 class     AllocateNode;
52 class       AllocateArrayNode;
53 class     BoxLockNode;
54 class     LockNode;
55 class     UnlockNode;
56 class JVMState;
57 class OopMap;
58 class State;
59 class StartNode;
60 class MachCallNode;
61 class FastLockNode;
62 
63 //------------------------------StartNode--------------------------------------
64 // The method start node
65 class StartNode : public MultiNode {
66   virtual uint cmp( const Node &n ) const;
67   virtual uint size_of() const; // Size is bigger
68 public:
69   const TypeTuple *_domain;
StartNode(Node * root,const TypeTuple * domain)70   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
71     init_class_id(Class_Start);
72     init_req(0,this);
73     init_req(1,root);
74   }
75   virtual int Opcode() const;
pinned() const76   virtual bool pinned() const { return true; };
77   virtual const Type *bottom_type() const;
adr_type() const78   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
79   virtual const Type* Value(PhaseGVN* phase) const;
80   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
81   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
82   virtual const RegMask &in_RegMask(uint) const;
83   virtual Node *match( const ProjNode *proj, const Matcher *m );
ideal_reg() const84   virtual uint ideal_reg() const { return 0; }
85 #ifndef PRODUCT
86   virtual void  dump_spec(outputStream *st) const;
87   virtual void  dump_compact_spec(outputStream *st) const;
88 #endif
89 };
90 
91 //------------------------------StartOSRNode-----------------------------------
92 // The method start node for on stack replacement code
93 class StartOSRNode : public StartNode {
94 public:
StartOSRNode(Node * root,const TypeTuple * domain)95   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
96   virtual int   Opcode() const;
97   static  const TypeTuple *osr_domain();
98 };
99 
100 
101 //------------------------------ParmNode---------------------------------------
102 // Incoming parameters
103 class ParmNode : public ProjNode {
104   static const char * const names[TypeFunc::Parms+1];
105 public:
ParmNode(StartNode * src,uint con)106   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
107     init_class_id(Class_Parm);
108   }
109   virtual int Opcode() const;
is_CFG() const110   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
111   virtual uint ideal_reg() const;
112 #ifndef PRODUCT
113   virtual void dump_spec(outputStream *st) const;
114   virtual void dump_compact_spec(outputStream *st) const;
115   virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
116 #endif
117 };
118 
119 
120 //------------------------------ReturnNode-------------------------------------
121 // Return from subroutine node
122 class ReturnNode : public Node {
123 public:
124   ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
125   virtual int Opcode() const;
is_CFG() const126   virtual bool  is_CFG() const { return true; }
hash() const127   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
depends_only_on_test() const128   virtual bool depends_only_on_test() const { return false; }
129   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
130   virtual const Type* Value(PhaseGVN* phase) const;
ideal_reg() const131   virtual uint ideal_reg() const { return NotAMachineReg; }
132   virtual uint match_edge(uint idx) const;
133 #ifndef PRODUCT
134   virtual void dump_req(outputStream *st = tty) const;
135 #endif
136 };
137 
138 
139 //------------------------------RethrowNode------------------------------------
140 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
141 // ends the current basic block like a ReturnNode.  Restores registers and
142 // unwinds stack.  Rethrow happens in the caller's method.
143 class RethrowNode : public Node {
144  public:
145   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
146   virtual int Opcode() const;
is_CFG() const147   virtual bool  is_CFG() const { return true; }
hash() const148   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
depends_only_on_test() const149   virtual bool depends_only_on_test() const { return false; }
150   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
151   virtual const Type* Value(PhaseGVN* phase) const;
152   virtual uint match_edge(uint idx) const;
ideal_reg() const153   virtual uint ideal_reg() const { return NotAMachineReg; }
154 #ifndef PRODUCT
155   virtual void dump_req(outputStream *st = tty) const;
156 #endif
157 };
158 
159 
160 //------------------------------TailCallNode-----------------------------------
161 // Pop stack frame and jump indirect
162 class TailCallNode : public ReturnNode {
163 public:
TailCallNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * retadr,Node * target,Node * moop)164   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
165     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
166     init_req(TypeFunc::Parms, target);
167     init_req(TypeFunc::Parms+1, moop);
168   }
169 
170   virtual int Opcode() const;
171   virtual uint match_edge(uint idx) const;
172 };
173 
174 //------------------------------TailJumpNode-----------------------------------
175 // Pop stack frame and jump indirect
176 class TailJumpNode : public ReturnNode {
177 public:
TailJumpNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * target,Node * ex_oop)178   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
179     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
180     init_req(TypeFunc::Parms, target);
181     init_req(TypeFunc::Parms+1, ex_oop);
182   }
183 
184   virtual int Opcode() const;
185   virtual uint match_edge(uint idx) const;
186 };
187 
188 //-------------------------------JVMState-------------------------------------
189 // A linked list of JVMState nodes captures the whole interpreter state,
190 // plus GC roots, for all active calls at some call site in this compilation
191 // unit.  (If there is no inlining, then the list has exactly one link.)
192 // This provides a way to map the optimized program back into the interpreter,
193 // or to let the GC mark the stack.
194 class JVMState : public ResourceObj {
195   friend class VMStructs;
196 public:
197   typedef enum {
198     Reexecute_Undefined = -1, // not defined -- will be translated into false later
199     Reexecute_False     =  0, // false       -- do not reexecute
200     Reexecute_True      =  1  // true        -- reexecute the bytecode
201   } ReexecuteState; //Reexecute State
202 
203 private:
204   JVMState*         _caller;    // List pointer for forming scope chains
205   uint              _depth;     // One more than caller depth, or one.
206   uint              _locoff;    // Offset to locals in input edge mapping
207   uint              _stkoff;    // Offset to stack in input edge mapping
208   uint              _monoff;    // Offset to monitors in input edge mapping
209   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
210   uint              _endoff;    // Offset to end of input edge mapping
211   uint              _sp;        // Jave Expression Stack Pointer for this state
212   int               _bci;       // Byte Code Index of this JVM point
213   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
214   ciMethod*         _method;    // Method Pointer
215   SafePointNode*    _map;       // Map node associated with this scope
216 public:
217   friend class Compile;
218   friend class PreserveReexecuteState;
219 
220   // Because JVMState objects live over the entire lifetime of the
221   // Compile object, they are allocated into the comp_arena, which
222   // does not get resource marked or reset during the compile process
operator new(size_t x,Compile * C)223   void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
operator delete(void *)224   void operator delete( void * ) { } // fast deallocation
225 
226   // Create a new JVMState, ready for abstract interpretation.
227   JVMState(ciMethod* method, JVMState* caller);
228   JVMState(int stack_size);  // root state; has a null method
229 
230   // Access functions for the JVM
231   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
232   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
locoff() const233   uint              locoff() const { return _locoff; }
stkoff() const234   uint              stkoff() const { return _stkoff; }
argoff() const235   uint              argoff() const { return _stkoff + _sp; }
monoff() const236   uint              monoff() const { return _monoff; }
scloff() const237   uint              scloff() const { return _scloff; }
endoff() const238   uint              endoff() const { return _endoff; }
oopoff() const239   uint              oopoff() const { return debug_end(); }
240 
loc_size() const241   int            loc_size() const { return stkoff() - locoff(); }
stk_size() const242   int            stk_size() const { return monoff() - stkoff(); }
mon_size() const243   int            mon_size() const { return scloff() - monoff(); }
scl_size() const244   int            scl_size() const { return endoff() - scloff(); }
245 
is_loc(uint i) const246   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
is_stk(uint i) const247   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
is_mon(uint i) const248   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
is_scl(uint i) const249   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
250 
sp() const251   uint                      sp() const { return _sp; }
bci() const252   int                      bci() const { return _bci; }
should_reexecute() const253   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
is_reexecute_undefined() const254   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
has_method() const255   bool              has_method() const { return _method != NULL; }
method() const256   ciMethod*             method() const { assert(has_method(), ""); return _method; }
caller() const257   JVMState*             caller() const { return _caller; }
map() const258   SafePointNode*           map() const { return _map; }
depth() const259   uint                   depth() const { return _depth; }
260   uint             debug_start() const; // returns locoff of root caller
261   uint               debug_end() const; // returns endoff of self
debug_size() const262   uint              debug_size() const {
263     return loc_size() + sp() + mon_size() + scl_size();
264   }
265   uint        debug_depth()  const; // returns sum of debug_size values at all depths
266 
267   // Returns the JVM state at the desired depth (1 == root).
268   JVMState* of_depth(int d) const;
269 
270   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
271   bool same_calls_as(const JVMState* that) const;
272 
273   // Monitors (monitors are stored as (boxNode, objNode) pairs
274   enum { logMonitorEdges = 1 };
nof_monitors() const275   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
monitor_depth() const276   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
monitor_box_offset(int idx) const277   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
monitor_obj_offset(int idx) const278   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
is_monitor_box(uint off) const279   bool is_monitor_box(uint off)    const {
280     assert(is_mon(off), "should be called only for monitor edge");
281     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
282   }
is_monitor_use(uint off) const283   bool is_monitor_use(uint off)    const { return (is_mon(off)
284                                                    && is_monitor_box(off))
285                                              || (caller() && caller()->is_monitor_use(off)); }
286 
287   // Initialization functions for the JVM
set_locoff(uint off)288   void              set_locoff(uint off) { _locoff = off; }
set_stkoff(uint off)289   void              set_stkoff(uint off) { _stkoff = off; }
set_monoff(uint off)290   void              set_monoff(uint off) { _monoff = off; }
set_scloff(uint off)291   void              set_scloff(uint off) { _scloff = off; }
set_endoff(uint off)292   void              set_endoff(uint off) { _endoff = off; }
set_offsets(uint off)293   void              set_offsets(uint off) {
294     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
295   }
set_map(SafePointNode * map)296   void              set_map(SafePointNode *map) { _map = map; }
set_sp(uint sp)297   void              set_sp(uint sp) { _sp = sp; }
298                     // _reexecute is initialized to "undefined" for a new bci
set_bci(int bci)299   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
set_should_reexecute(bool reexec)300   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
301 
302   // Miscellaneous utility functions
303   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
304   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
305   void      set_map_deep(SafePointNode *map);// reset map for all callers
306   void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
307   int       interpreter_frame_size() const;
308 
309 #ifndef PRODUCT
310   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
311   void      dump_spec(outputStream *st) const;
312   void      dump_on(outputStream* st) const;
dump() const313   void      dump() const {
314     dump_on(tty);
315   }
316 #endif
317 };
318 
319 //------------------------------SafePointNode----------------------------------
320 // A SafePointNode is a subclass of a MultiNode for convenience (and
321 // potential code sharing) only - conceptually it is independent of
322 // the Node semantics.
323 class SafePointNode : public MultiNode {
324   virtual uint           cmp( const Node &n ) const;
325   virtual uint           size_of() const;       // Size is bigger
326 
327 public:
SafePointNode(uint edges,JVMState * jvms,const TypePtr * adr_type=NULL)328   SafePointNode(uint edges, JVMState* jvms,
329                 // A plain safepoint advertises no memory effects (NULL):
330                 const TypePtr* adr_type = NULL)
331     : MultiNode( edges ),
332       _jvms(jvms),
333       _oop_map(NULL),
334       _adr_type(adr_type)
335   {
336     init_class_id(Class_SafePoint);
337   }
338 
339   OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
340   JVMState* const _jvms;      // Pointer to list of JVM State objects
341   const TypePtr*  _adr_type;  // What type of memory does this node produce?
342   ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
343 
344   // Many calls take *all* of memory as input,
345   // but some produce a limited subset of that memory as output.
346   // The adr_type reports the call's behavior as a store, not a load.
347 
jvms() const348   virtual JVMState* jvms() const { return _jvms; }
set_jvms(JVMState * s)349   void set_jvms(JVMState* s) {
350     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
351   }
oop_map() const352   OopMap *oop_map() const { return _oop_map; }
set_oop_map(OopMap * om)353   void set_oop_map(OopMap *om) { _oop_map = om; }
354 
355  private:
verify_input(JVMState * jvms,uint idx) const356   void verify_input(JVMState* jvms, uint idx) const {
357     assert(verify_jvms(jvms), "jvms must match");
358     Node* n = in(idx);
359     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
360            in(idx + 1)->is_top(), "2nd half of long/double");
361   }
362 
363  public:
364   // Functionality from old debug nodes which has changed
local(JVMState * jvms,uint idx) const365   Node *local(JVMState* jvms, uint idx) const {
366     verify_input(jvms, jvms->locoff() + idx);
367     return in(jvms->locoff() + idx);
368   }
stack(JVMState * jvms,uint idx) const369   Node *stack(JVMState* jvms, uint idx) const {
370     verify_input(jvms, jvms->stkoff() + idx);
371     return in(jvms->stkoff() + idx);
372   }
argument(JVMState * jvms,uint idx) const373   Node *argument(JVMState* jvms, uint idx) const {
374     verify_input(jvms, jvms->argoff() + idx);
375     return in(jvms->argoff() + idx);
376   }
monitor_box(JVMState * jvms,uint idx) const377   Node *monitor_box(JVMState* jvms, uint idx) const {
378     assert(verify_jvms(jvms), "jvms must match");
379     return in(jvms->monitor_box_offset(idx));
380   }
monitor_obj(JVMState * jvms,uint idx) const381   Node *monitor_obj(JVMState* jvms, uint idx) const {
382     assert(verify_jvms(jvms), "jvms must match");
383     return in(jvms->monitor_obj_offset(idx));
384   }
385 
386   void  set_local(JVMState* jvms, uint idx, Node *c);
387 
set_stack(JVMState * jvms,uint idx,Node * c)388   void  set_stack(JVMState* jvms, uint idx, Node *c) {
389     assert(verify_jvms(jvms), "jvms must match");
390     set_req(jvms->stkoff() + idx, c);
391   }
set_argument(JVMState * jvms,uint idx,Node * c)392   void  set_argument(JVMState* jvms, uint idx, Node *c) {
393     assert(verify_jvms(jvms), "jvms must match");
394     set_req(jvms->argoff() + idx, c);
395   }
ensure_stack(JVMState * jvms,uint stk_size)396   void ensure_stack(JVMState* jvms, uint stk_size) {
397     assert(verify_jvms(jvms), "jvms must match");
398     int grow_by = (int)stk_size - (int)jvms->stk_size();
399     if (grow_by > 0)  grow_stack(jvms, grow_by);
400   }
401   void grow_stack(JVMState* jvms, uint grow_by);
402   // Handle monitor stack
403   void push_monitor( const FastLockNode *lock );
404   void pop_monitor ();
405   Node *peek_monitor_box() const;
406   Node *peek_monitor_obj() const;
407 
408   // Access functions for the JVM
control() const409   Node *control  () const { return in(TypeFunc::Control  ); }
i_o() const410   Node *i_o      () const { return in(TypeFunc::I_O      ); }
memory() const411   Node *memory   () const { return in(TypeFunc::Memory   ); }
returnadr() const412   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
frameptr() const413   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
414 
set_control(Node * c)415   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
set_i_o(Node * c)416   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
set_memory(Node * c)417   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
418 
merged_memory() const419   MergeMemNode* merged_memory() const {
420     return in(TypeFunc::Memory)->as_MergeMem();
421   }
422 
423   // The parser marks useless maps as dead when it's done with them:
is_killed()424   bool is_killed() { return in(TypeFunc::Control) == NULL; }
425 
426   // Exception states bubbling out of subgraphs such as inlined calls
427   // are recorded here.  (There might be more than one, hence the "next".)
428   // This feature is used only for safepoints which serve as "maps"
429   // for JVM states during parsing, intrinsic expansion, etc.
430   SafePointNode*         next_exception() const;
431   void               set_next_exception(SafePointNode* n);
has_exceptions() const432   bool                   has_exceptions() const { return next_exception() != NULL; }
433 
434   // Helper methods to operate on replaced nodes
replaced_nodes() const435   ReplacedNodes replaced_nodes() const {
436     return _replaced_nodes;
437   }
438 
set_replaced_nodes(ReplacedNodes replaced_nodes)439   void set_replaced_nodes(ReplacedNodes replaced_nodes) {
440     _replaced_nodes = replaced_nodes;
441   }
442 
clone_replaced_nodes()443   void clone_replaced_nodes() {
444     _replaced_nodes.clone();
445   }
record_replaced_node(Node * initial,Node * improved)446   void record_replaced_node(Node* initial, Node* improved) {
447     _replaced_nodes.record(initial, improved);
448   }
transfer_replaced_nodes_from(SafePointNode * sfpt,uint idx=0)449   void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
450     _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
451   }
delete_replaced_nodes()452   void delete_replaced_nodes() {
453     _replaced_nodes.reset();
454   }
apply_replaced_nodes(uint idx)455   void apply_replaced_nodes(uint idx) {
456     _replaced_nodes.apply(this, idx);
457   }
merge_replaced_nodes_with(SafePointNode * sfpt)458   void merge_replaced_nodes_with(SafePointNode* sfpt) {
459     _replaced_nodes.merge_with(sfpt->_replaced_nodes);
460   }
has_replaced_nodes() const461   bool has_replaced_nodes() const {
462     return !_replaced_nodes.is_empty();
463   }
464 
465   void disconnect_from_root(PhaseIterGVN *igvn);
466 
467   // Standard Node stuff
468   virtual int            Opcode() const;
pinned() const469   virtual bool           pinned() const { return true; }
470   virtual const Type*    Value(PhaseGVN* phase) const;
bottom_type() const471   virtual const Type    *bottom_type() const { return Type::CONTROL; }
adr_type() const472   virtual const TypePtr *adr_type() const { return _adr_type; }
473   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
474   virtual Node*          Identity(PhaseGVN* phase);
ideal_reg() const475   virtual uint           ideal_reg() const { return 0; }
476   virtual const RegMask &in_RegMask(uint) const;
477   virtual const RegMask &out_RegMask() const;
478   virtual uint           match_edge(uint idx) const;
479 
480   static  bool           needs_polling_address_input();
481 
482 #ifndef PRODUCT
483   virtual void           dump_spec(outputStream *st) const;
484   virtual void           related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
485 #endif
486 };
487 
488 //------------------------------SafePointScalarObjectNode----------------------
489 // A SafePointScalarObjectNode represents the state of a scalarized object
490 // at a safepoint.
491 
492 class SafePointScalarObjectNode: public TypeNode {
493   uint _first_index; // First input edge relative index of a SafePoint node where
494                      // states of the scalarized object fields are collected.
495                      // It is relative to the last (youngest) jvms->_scloff.
496   uint _n_fields;    // Number of non-static fields of the scalarized object.
497   DEBUG_ONLY(AllocateNode* _alloc;)
498 
499   virtual uint hash() const ; // { return NO_HASH; }
500   virtual uint cmp( const Node &n ) const;
501 
first_index() const502   uint first_index() const { return _first_index; }
503 
504 public:
505   SafePointScalarObjectNode(const TypeOopPtr* tp,
506 #ifdef ASSERT
507                             AllocateNode* alloc,
508 #endif
509                             uint first_index, uint n_fields);
510   virtual int Opcode() const;
511   virtual uint           ideal_reg() const;
512   virtual const RegMask &in_RegMask(uint) const;
513   virtual const RegMask &out_RegMask() const;
514   virtual uint           match_edge(uint idx) const;
515 
first_index(JVMState * jvms) const516   uint first_index(JVMState* jvms) const {
517     assert(jvms != NULL, "missed JVMS");
518     return jvms->scloff() + _first_index;
519   }
n_fields() const520   uint n_fields()    const { return _n_fields; }
521 
522 #ifdef ASSERT
alloc() const523   AllocateNode* alloc() const { return _alloc; }
524 #endif
525 
size_of() const526   virtual uint size_of() const { return sizeof(*this); }
527 
528   // Assumes that "this" is an argument to a safepoint node "s", and that
529   // "new_call" is being created to correspond to "s".  But the difference
530   // between the start index of the jvmstates of "new_call" and "s" is
531   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
532   // corresponds appropriately to "this" in "new_call".  Assumes that
533   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
534   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
535   SafePointScalarObjectNode* clone(Dict* sosn_map) const;
536 
537 #ifndef PRODUCT
538   virtual void              dump_spec(outputStream *st) const;
539 #endif
540 };
541 
542 
543 // Simple container for the outgoing projections of a call.  Useful
544 // for serious surgery on calls.
545 class CallProjections : public StackObj {
546 public:
547   Node* fallthrough_proj;
548   Node* fallthrough_catchproj;
549   Node* fallthrough_memproj;
550   Node* fallthrough_ioproj;
551   Node* catchall_catchproj;
552   Node* catchall_memproj;
553   Node* catchall_ioproj;
554   Node* resproj;
555   Node* exobj;
556 };
557 
558 class CallGenerator;
559 
560 //------------------------------CallNode---------------------------------------
561 // Call nodes now subsume the function of debug nodes at callsites, so they
562 // contain the functionality of a full scope chain of debug nodes.
563 class CallNode : public SafePointNode {
564   friend class VMStructs;
565 
566 protected:
567   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase);
568 
569 public:
570   const TypeFunc *_tf;        // Function type
571   address      _entry_point;  // Address of method being called
572   float        _cnt;          // Estimate of number of times called
573   CallGenerator* _generator;  // corresponding CallGenerator for some late inline calls
574   const char *_name;           // Printable name, if _method is NULL
575 
CallNode(const TypeFunc * tf,address addr,const TypePtr * adr_type)576   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
577     : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
578       _tf(tf),
579       _entry_point(addr),
580       _cnt(COUNT_UNKNOWN),
581       _generator(NULL),
582       _name(NULL)
583   {
584     init_class_id(Class_Call);
585   }
586 
tf() const587   const TypeFunc* tf()         const { return _tf; }
entry_point() const588   const address  entry_point() const { return _entry_point; }
cnt() const589   const float    cnt()         const { return _cnt; }
generator() const590   CallGenerator* generator()   const { return _generator; }
591 
set_tf(const TypeFunc * tf)592   void set_tf(const TypeFunc* tf)       { _tf = tf; }
set_entry_point(address p)593   void set_entry_point(address p)       { _entry_point = p; }
set_cnt(float c)594   void set_cnt(float c)                 { _cnt = c; }
set_generator(CallGenerator * cg)595   void set_generator(CallGenerator* cg) { _generator = cg; }
596 
597   virtual const Type *bottom_type() const;
598   virtual const Type* Value(PhaseGVN* phase) const;
599   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
Identity(PhaseGVN * phase)600   virtual Node* Identity(PhaseGVN* phase) { return this; }
601   virtual uint        cmp( const Node &n ) const;
602   virtual uint        size_of() const = 0;
603   virtual void        calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
604   virtual Node       *match( const ProjNode *proj, const Matcher *m );
ideal_reg() const605   virtual uint        ideal_reg() const { return NotAMachineReg; }
606   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
607   // for some macro nodes whose expansion does not have a safepoint on the fast path.
guaranteed_safepoint()608   virtual bool        guaranteed_safepoint()  { return true; }
609   // For macro nodes, the JVMState gets modified during expansion. If calls
610   // use MachConstantBase, it gets modified during matching. So when cloning
611   // the node the JVMState must be cloned. Default is not to clone.
clone_jvms(Compile * C)612   virtual void clone_jvms(Compile* C) {
613     if (C->needs_clone_jvms() && jvms() != NULL) {
614       set_jvms(jvms()->clone_deep(C));
615       jvms()->set_map_deep(this);
616     }
617   }
618 
619   // Returns true if the call may modify n
620   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
621   // Does this node have a use of n other than in debug information?
622   bool                has_non_debug_use(Node *n);
623   // Returns the unique CheckCastPP of a call
624   // or result projection is there are several CheckCastPP
625   // or returns NULL if there is no one.
626   Node *result_cast();
627   // Does this node returns pointer?
returns_pointer() const628   bool returns_pointer() const {
629     const TypeTuple *r = tf()->range();
630     return (r->cnt() > TypeFunc::Parms &&
631             r->field_at(TypeFunc::Parms)->isa_ptr());
632   }
633 
634   // Collect all the interesting edges from a call for use in
635   // replacing the call by something else.  Used by macro expansion
636   // and the late inlining support.
637   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
638 
639   virtual uint match_edge(uint idx) const;
640 
641   bool is_call_to_arraycopystub() const;
642 
643 #ifndef PRODUCT
644   virtual void        dump_req(outputStream *st = tty) const;
645   virtual void        dump_spec(outputStream *st) const;
646 #endif
647 };
648 
649 
650 //------------------------------CallJavaNode-----------------------------------
651 // Make a static or dynamic subroutine call node using Java calling
652 // convention.  (The "Java" calling convention is the compiler's calling
653 // convention, as opposed to the interpreter's or that of native C.)
654 class CallJavaNode : public CallNode {
655   friend class VMStructs;
656 protected:
657   virtual uint cmp( const Node &n ) const;
658   virtual uint size_of() const; // Size is bigger
659 
660   bool    _optimized_virtual;
661   bool    _method_handle_invoke;
662   bool    _override_symbolic_info; // Override symbolic call site info from bytecode
663   ciMethod* _method;               // Method being direct called
664 public:
665   const int       _bci;         // Byte Code Index of call byte code
CallJavaNode(const TypeFunc * tf,address addr,ciMethod * method,int bci)666   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
667     : CallNode(tf, addr, TypePtr::BOTTOM),
668       _method(method), _bci(bci),
669       _optimized_virtual(false),
670       _method_handle_invoke(false),
671       _override_symbolic_info(false)
672   {
673     init_class_id(Class_CallJava);
674   }
675 
676   virtual int   Opcode() const;
method() const677   ciMethod* method() const                 { return _method; }
set_method(ciMethod * m)678   void  set_method(ciMethod *m)            { _method = m; }
set_optimized_virtual(bool f)679   void  set_optimized_virtual(bool f)      { _optimized_virtual = f; }
is_optimized_virtual() const680   bool  is_optimized_virtual() const       { return _optimized_virtual; }
set_method_handle_invoke(bool f)681   void  set_method_handle_invoke(bool f)   { _method_handle_invoke = f; }
is_method_handle_invoke() const682   bool  is_method_handle_invoke() const    { return _method_handle_invoke; }
set_override_symbolic_info(bool f)683   void  set_override_symbolic_info(bool f) { _override_symbolic_info = f; }
override_symbolic_info() const684   bool  override_symbolic_info() const     { return _override_symbolic_info; }
685 
686   DEBUG_ONLY( bool validate_symbolic_info() const; )
687 
688 #ifndef PRODUCT
689   virtual void  dump_spec(outputStream *st) const;
690   virtual void  dump_compact_spec(outputStream *st) const;
691 #endif
692 };
693 
694 //------------------------------CallStaticJavaNode-----------------------------
695 // Make a direct subroutine call using Java calling convention (for static
696 // calls and optimized virtual calls, plus calls to wrappers for run-time
697 // routines); generates static stub.
698 class CallStaticJavaNode : public CallJavaNode {
699   virtual uint cmp( const Node &n ) const;
700   virtual uint size_of() const; // Size is bigger
701 public:
CallStaticJavaNode(Compile * C,const TypeFunc * tf,address addr,ciMethod * method,int bci)702   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
703     : CallJavaNode(tf, addr, method, bci) {
704     init_class_id(Class_CallStaticJava);
705     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
706       init_flags(Flag_is_macro);
707       C->add_macro_node(this);
708     }
709     _is_scalar_replaceable = false;
710     _is_non_escaping = false;
711   }
CallStaticJavaNode(const TypeFunc * tf,address addr,const char * name,int bci,const TypePtr * adr_type)712   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
713                      const TypePtr* adr_type)
714     : CallJavaNode(tf, addr, NULL, bci) {
715     init_class_id(Class_CallStaticJava);
716     // This node calls a runtime stub, which often has narrow memory effects.
717     _adr_type = adr_type;
718     _is_scalar_replaceable = false;
719     _is_non_escaping = false;
720     _name = name;
721   }
722 
723   // Result of Escape Analysis
724   bool _is_scalar_replaceable;
725   bool _is_non_escaping;
726 
727   // If this is an uncommon trap, return the request code, else zero.
728   int uncommon_trap_request() const;
729   static int extract_uncommon_trap_request(const Node* call);
730 
is_boxing_method() const731   bool is_boxing_method() const {
732     return is_macro() && (method() != NULL) && method()->is_boxing_method();
733   }
734   // Later inlining modifies the JVMState, so we need to clone it
735   // when the call node is cloned (because it is macro node).
clone_jvms(Compile * C)736   virtual void  clone_jvms(Compile* C) {
737     if ((jvms() != NULL) && is_boxing_method()) {
738       set_jvms(jvms()->clone_deep(C));
739       jvms()->set_map_deep(this);
740     }
741   }
742 
743   virtual int         Opcode() const;
744 #ifndef PRODUCT
745   virtual void        dump_spec(outputStream *st) const;
746   virtual void        dump_compact_spec(outputStream *st) const;
747 #endif
748 };
749 
750 //------------------------------CallDynamicJavaNode----------------------------
751 // Make a dispatched call using Java calling convention.
752 class CallDynamicJavaNode : public CallJavaNode {
753   virtual uint cmp( const Node &n ) const;
754   virtual uint size_of() const; // Size is bigger
755 public:
CallDynamicJavaNode(const TypeFunc * tf,address addr,ciMethod * method,int vtable_index,int bci)756   CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
757     init_class_id(Class_CallDynamicJava);
758   }
759 
760   int _vtable_index;
761   virtual int   Opcode() const;
762 #ifndef PRODUCT
763   virtual void  dump_spec(outputStream *st) const;
764 #endif
765 };
766 
767 //------------------------------CallRuntimeNode--------------------------------
768 // Make a direct subroutine call node into compiled C++ code.
769 class CallRuntimeNode : public CallNode {
770   virtual uint cmp( const Node &n ) const;
771   virtual uint size_of() const; // Size is bigger
772 public:
CallRuntimeNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)773   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
774                   const TypePtr* adr_type)
775     : CallNode(tf, addr, adr_type)
776   {
777     init_class_id(Class_CallRuntime);
778     _name = name;
779   }
780 
781   virtual int   Opcode() const;
782   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
783 
784 #ifndef PRODUCT
785   virtual void  dump_spec(outputStream *st) const;
786 #endif
787 };
788 
789 //------------------------------CallLeafNode-----------------------------------
790 // Make a direct subroutine call node into compiled C++ code, without
791 // safepoints
792 class CallLeafNode : public CallRuntimeNode {
793 public:
CallLeafNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)794   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
795                const TypePtr* adr_type)
796     : CallRuntimeNode(tf, addr, name, adr_type)
797   {
798     init_class_id(Class_CallLeaf);
799   }
800   virtual int   Opcode() const;
guaranteed_safepoint()801   virtual bool        guaranteed_safepoint()  { return false; }
802 #ifndef PRODUCT
803   virtual void  dump_spec(outputStream *st) const;
804 #endif
805 };
806 
807 //------------------------------CallLeafNoFPNode-------------------------------
808 // CallLeafNode, not using floating point or using it in the same manner as
809 // the generated code
810 class CallLeafNoFPNode : public CallLeafNode {
811 public:
CallLeafNoFPNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)812   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
813                    const TypePtr* adr_type)
814     : CallLeafNode(tf, addr, name, adr_type)
815   {
816     init_class_id(Class_CallLeafNoFP);
817   }
818   virtual int   Opcode() const;
819 };
820 
821 
822 //------------------------------Allocate---------------------------------------
823 // High-level memory allocation
824 //
825 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
826 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
827 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
828 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
829 //  order to differentiate the uses of the projection on the normal control path from
830 //  those on the exception return path.
831 //
832 class AllocateNode : public CallNode {
833 public:
834   enum {
835     // Output:
836     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
837     // Inputs:
838     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
839     KlassNode,                        // type (maybe dynamic) of the obj.
840     InitialTest,                      // slow-path test (may be constant)
841     ALength,                          // array length (or TOP if none)
842     ParmLimit
843   };
844 
alloc_type(const Type * t)845   static const TypeFunc* alloc_type(const Type* t) {
846     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
847     fields[AllocSize]   = TypeInt::POS;
848     fields[KlassNode]   = TypeInstPtr::NOTNULL;
849     fields[InitialTest] = TypeInt::BOOL;
850     fields[ALength]     = t;  // length (can be a bad length)
851 
852     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
853 
854     // create result type (range)
855     fields = TypeTuple::fields(1);
856     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
857 
858     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
859 
860     return TypeFunc::make(domain, range);
861   }
862 
863   // Result of Escape Analysis
864   bool _is_scalar_replaceable;
865   bool _is_non_escaping;
866   // True when MemBar for new is redundant with MemBar at initialzer exit
867   bool _is_allocation_MemBar_redundant;
868 
869   virtual uint size_of() const; // Size is bigger
870   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
871                Node *size, Node *klass_node, Node *initial_test);
872   // Expansion modifies the JVMState, so we need to clone it
clone_jvms(Compile * C)873   virtual void  clone_jvms(Compile* C) {
874     if (jvms() != NULL) {
875       set_jvms(jvms()->clone_deep(C));
876       jvms()->set_map_deep(this);
877     }
878   }
879   virtual int Opcode() const;
ideal_reg() const880   virtual uint ideal_reg() const { return Op_RegP; }
guaranteed_safepoint()881   virtual bool        guaranteed_safepoint()  { return false; }
882 
883   // allocations do not modify their arguments
may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)884   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
885 
886   // Pattern-match a possible usage of AllocateNode.
887   // Return null if no allocation is recognized.
888   // The operand is the pointer produced by the (possible) allocation.
889   // It must be a projection of the Allocate or its subsequent CastPP.
890   // (Note:  This function is defined in file graphKit.cpp, near
891   // GraphKit::new_instance/new_array, whose output it recognizes.)
892   // The 'ptr' may not have an offset unless the 'offset' argument is given.
893   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
894 
895   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
896   // an offset, which is reported back to the caller.
897   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
898   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
899                                         intptr_t& offset);
900 
901   // Dig the klass operand out of a (possible) allocation site.
Ideal_klass(Node * ptr,PhaseTransform * phase)902   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
903     AllocateNode* allo = Ideal_allocation(ptr, phase);
904     return (allo == NULL) ? NULL : allo->in(KlassNode);
905   }
906 
907   // Conservatively small estimate of offset of first non-header byte.
minimum_header_size()908   int minimum_header_size() {
909     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
910                                 instanceOopDesc::base_offset_in_bytes();
911   }
912 
913   // Return the corresponding initialization barrier (or null if none).
914   // Walks out edges to find it...
915   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
916   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
917   InitializeNode* initialization();
918 
919   // Convenience for initialization->maybe_set_complete(phase)
920   bool maybe_set_complete(PhaseGVN* phase);
921 
922   // Return true if allocation doesn't escape thread, its escape state
923   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
924   // is true when its allocation's escape state is noEscape or
925   // ArgEscape. In case allocation's InitializeNode is NULL, check
926   // AlllocateNode._is_non_escaping flag.
927   // AlllocateNode._is_non_escaping is true when its escape state is
928   // noEscape.
does_not_escape_thread()929   bool does_not_escape_thread() {
930     InitializeNode* init = NULL;
931     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
932   }
933 
934   // If object doesn't escape in <.init> method and there is memory barrier
935   // inserted at exit of its <.init>, memory barrier for new is not necessary.
936   // Inovke this method when MemBar at exit of initializer and post-dominate
937   // allocation node.
938   void compute_MemBar_redundancy(ciMethod* initializer);
is_allocation_MemBar_redundant()939   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
940 };
941 
942 //------------------------------AllocateArray---------------------------------
943 //
944 // High-level array allocation
945 //
946 class AllocateArrayNode : public AllocateNode {
947 public:
AllocateArrayNode(Compile * C,const TypeFunc * atype,Node * ctrl,Node * mem,Node * abio,Node * size,Node * klass_node,Node * initial_test,Node * count_val)948   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
949                     Node* size, Node* klass_node, Node* initial_test,
950                     Node* count_val
951                     )
952     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
953                    initial_test)
954   {
955     init_class_id(Class_AllocateArray);
956     set_req(AllocateNode::ALength,        count_val);
957   }
958   virtual int Opcode() const;
959   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
960 
961   // Dig the length operand out of a array allocation site.
Ideal_length()962   Node* Ideal_length() {
963     return in(AllocateNode::ALength);
964   }
965 
966   // Dig the length operand out of a array allocation site and narrow the
967   // type with a CastII, if necesssary
968   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
969 
970   // Pattern-match a possible usage of AllocateArrayNode.
971   // Return null if no allocation is recognized.
Ideal_array_allocation(Node * ptr,PhaseTransform * phase)972   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
973     AllocateNode* allo = Ideal_allocation(ptr, phase);
974     return (allo == NULL || !allo->is_AllocateArray())
975            ? NULL : allo->as_AllocateArray();
976   }
977 };
978 
979 //------------------------------AbstractLockNode-----------------------------------
980 class AbstractLockNode: public CallNode {
981 private:
982   enum {
983     Regular = 0,  // Normal lock
984     NonEscObj,    // Lock is used for non escaping object
985     Coarsened,    // Lock was coarsened
986     Nested        // Nested lock
987   } _kind;
988 
989   static const char* _kind_names[Nested+1];
990 
991 #ifndef PRODUCT
992   NamedCounter* _counter;
993 #endif
994 
995 protected:
996   // helper functions for lock elimination
997   //
998 
999   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
1000                             GrowableArray<AbstractLockNode*> &lock_ops);
1001   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1002                                        GrowableArray<AbstractLockNode*> &lock_ops);
1003   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1004                                GrowableArray<AbstractLockNode*> &lock_ops);
1005   LockNode *find_matching_lock(UnlockNode* unlock);
1006 
1007   // Update the counter to indicate that this lock was eliminated.
1008   void set_eliminated_lock_counter() PRODUCT_RETURN;
1009 
1010 public:
AbstractLockNode(const TypeFunc * tf)1011   AbstractLockNode(const TypeFunc *tf)
1012     : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
1013       _kind(Regular)
1014   {
1015 #ifndef PRODUCT
1016     _counter = NULL;
1017 #endif
1018   }
1019   virtual int Opcode() const = 0;
obj_node() const1020   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
box_node() const1021   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
fastlock_node() const1022   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
set_box_node(Node * box)1023   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
1024 
sub(const Type * t1,const Type * t2) const1025   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
1026 
size_of() const1027   virtual uint size_of() const { return sizeof(*this); }
1028 
is_eliminated() const1029   bool is_eliminated()  const { return (_kind != Regular); }
is_non_esc_obj() const1030   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
is_coarsened() const1031   bool is_coarsened()   const { return (_kind == Coarsened); }
is_nested() const1032   bool is_nested()      const { return (_kind == Nested); }
1033 
1034   const char * kind_as_string() const;
1035   void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const;
1036 
set_non_esc_obj()1037   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
set_coarsened()1038   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
set_nested()1039   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
1040 
1041   // locking does not modify its arguments
may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)1042   virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
1043 
1044 #ifndef PRODUCT
1045   void create_lock_counter(JVMState* s);
counter() const1046   NamedCounter* counter() const { return _counter; }
1047   virtual void dump_spec(outputStream* st) const;
1048   virtual void dump_compact_spec(outputStream* st) const;
1049   virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const;
1050 #endif
1051 };
1052 
1053 //------------------------------Lock---------------------------------------
1054 // High-level lock operation
1055 //
1056 // This is a subclass of CallNode because it is a macro node which gets expanded
1057 // into a code sequence containing a call.  This node takes 3 "parameters":
1058 //    0  -  object to lock
1059 //    1 -   a BoxLockNode
1060 //    2 -   a FastLockNode
1061 //
1062 class LockNode : public AbstractLockNode {
1063 public:
1064 
lock_type()1065   static const TypeFunc *lock_type() {
1066     // create input type (domain)
1067     const Type **fields = TypeTuple::fields(3);
1068     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1069     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1070     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1071     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1072 
1073     // create result type (range)
1074     fields = TypeTuple::fields(0);
1075 
1076     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1077 
1078     return TypeFunc::make(domain,range);
1079   }
1080 
1081   virtual int Opcode() const;
1082   virtual uint size_of() const; // Size is bigger
LockNode(Compile * C,const TypeFunc * tf)1083   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1084     init_class_id(Class_Lock);
1085     init_flags(Flag_is_macro);
1086     C->add_macro_node(this);
1087   }
guaranteed_safepoint()1088   virtual bool        guaranteed_safepoint()  { return false; }
1089 
1090   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1091   // Expansion modifies the JVMState, so we need to clone it
clone_jvms(Compile * C)1092   virtual void  clone_jvms(Compile* C) {
1093     if (jvms() != NULL) {
1094       set_jvms(jvms()->clone_deep(C));
1095       jvms()->set_map_deep(this);
1096     }
1097   }
1098 
1099   bool is_nested_lock_region(); // Is this Lock nested?
1100   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1101 };
1102 
1103 //------------------------------Unlock---------------------------------------
1104 // High-level unlock operation
1105 class UnlockNode : public AbstractLockNode {
1106 private:
1107 #ifdef ASSERT
1108   JVMState* const _dbg_jvms;      // Pointer to list of JVM State objects
1109 #endif
1110 public:
1111   virtual int Opcode() const;
1112   virtual uint size_of() const; // Size is bigger
UnlockNode(Compile * C,const TypeFunc * tf)1113   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1114 #ifdef ASSERT
1115     , _dbg_jvms(NULL)
1116 #endif
1117   {
1118     init_class_id(Class_Unlock);
1119     init_flags(Flag_is_macro);
1120     C->add_macro_node(this);
1121   }
1122   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1123   // unlock is never a safepoint
guaranteed_safepoint()1124   virtual bool        guaranteed_safepoint()  { return false; }
1125 #ifdef ASSERT
set_dbg_jvms(JVMState * s)1126   void set_dbg_jvms(JVMState* s) {
1127     *(JVMState**)&_dbg_jvms = s;  // override const attribute in the accessor
1128   }
dbg_jvms() const1129   JVMState* dbg_jvms() const { return _dbg_jvms; }
1130 #else
dbg_jvms() const1131   JVMState* dbg_jvms() const { return NULL; }
1132 #endif
1133 };
1134 #endif // SHARE_VM_OPTO_CALLNODE_HPP
1135