1 /*
2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
26 #define SHARE_VM_OPTO_CALLNODE_HPP
27 
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/replacednodes.hpp"
34 #include "opto/type.hpp"
35 
36 // Portions of code courtesy of Clifford Click
37 
38 // Optimization - Graph Style
39 
40 class Chaitin;
41 class NamedCounter;
42 class MultiNode;
43 class  SafePointNode;
44 class   CallNode;
45 class     CallJavaNode;
46 class       CallStaticJavaNode;
47 class       CallDynamicJavaNode;
48 class     CallRuntimeNode;
49 class       CallLeafNode;
50 class         CallLeafNoFPNode;
51 class     AllocateNode;
52 class       AllocateArrayNode;
53 class     BoxLockNode;
54 class     LockNode;
55 class     UnlockNode;
56 class JVMState;
57 class OopMap;
58 class State;
59 class StartNode;
60 class MachCallNode;
61 class FastLockNode;
62 
63 //------------------------------StartNode--------------------------------------
64 // The method start node
65 class StartNode : public MultiNode {
66   virtual uint cmp( const Node &n ) const;
67   virtual uint size_of() const; // Size is bigger
68 public:
69   const TypeTuple *_domain;
StartNode(Node * root,const TypeTuple * domain)70   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
71     init_class_id(Class_Start);
72     init_req(0,this);
73     init_req(1,root);
74   }
75   virtual int Opcode() const;
pinned() const76   virtual bool pinned() const { return true; };
77   virtual const Type *bottom_type() const;
adr_type() const78   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
79   virtual const Type *Value( PhaseTransform *phase ) const;
80   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
81   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
82   virtual const RegMask &in_RegMask(uint) const;
83   virtual Node *match( const ProjNode *proj, const Matcher *m );
ideal_reg() const84   virtual uint ideal_reg() const { return 0; }
85 #ifndef PRODUCT
86   virtual void  dump_spec(outputStream *st) const;
87 #endif
88 };
89 
90 //------------------------------StartOSRNode-----------------------------------
91 // The method start node for on stack replacement code
92 class StartOSRNode : public StartNode {
93 public:
StartOSRNode(Node * root,const TypeTuple * domain)94   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
95   virtual int   Opcode() const;
96   static  const TypeTuple *osr_domain();
97 };
98 
99 
100 //------------------------------ParmNode---------------------------------------
101 // Incoming parameters
102 class ParmNode : public ProjNode {
103   static const char * const names[TypeFunc::Parms+1];
104 public:
ParmNode(StartNode * src,uint con)105   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
106     init_class_id(Class_Parm);
107   }
108   virtual int Opcode() const;
is_CFG() const109   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
110   virtual uint ideal_reg() const;
111 #ifndef PRODUCT
112   virtual void dump_spec(outputStream *st) const;
113 #endif
114 };
115 
116 
117 //------------------------------ReturnNode-------------------------------------
118 // Return from subroutine node
119 class ReturnNode : public Node {
120 public:
121   ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
122   virtual int Opcode() const;
is_CFG() const123   virtual bool  is_CFG() const { return true; }
hash() const124   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
depends_only_on_test() const125   virtual bool depends_only_on_test() const { return false; }
126   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
127   virtual const Type *Value( PhaseTransform *phase ) const;
ideal_reg() const128   virtual uint ideal_reg() const { return NotAMachineReg; }
129   virtual uint match_edge(uint idx) const;
130 #ifndef PRODUCT
131   virtual void dump_req(outputStream *st = tty) const;
132 #endif
133 };
134 
135 
136 //------------------------------RethrowNode------------------------------------
137 // Rethrow of exception at call site.  Ends a procedure before rethrowing;
138 // ends the current basic block like a ReturnNode.  Restores registers and
139 // unwinds stack.  Rethrow happens in the caller's method.
140 class RethrowNode : public Node {
141  public:
142   RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
143   virtual int Opcode() const;
is_CFG() const144   virtual bool  is_CFG() const { return true; }
hash() const145   virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
depends_only_on_test() const146   virtual bool depends_only_on_test() const { return false; }
147   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
148   virtual const Type *Value( PhaseTransform *phase ) const;
149   virtual uint match_edge(uint idx) const;
ideal_reg() const150   virtual uint ideal_reg() const { return NotAMachineReg; }
151 #ifndef PRODUCT
152   virtual void dump_req(outputStream *st = tty) const;
153 #endif
154 };
155 
156 
157 //------------------------------TailCallNode-----------------------------------
158 // Pop stack frame and jump indirect
159 class TailCallNode : public ReturnNode {
160 public:
TailCallNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * retadr,Node * target,Node * moop)161   TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
162     : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
163     init_req(TypeFunc::Parms, target);
164     init_req(TypeFunc::Parms+1, moop);
165   }
166 
167   virtual int Opcode() const;
168   virtual uint match_edge(uint idx) const;
169 };
170 
171 //------------------------------TailJumpNode-----------------------------------
172 // Pop stack frame and jump indirect
173 class TailJumpNode : public ReturnNode {
174 public:
TailJumpNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * target,Node * ex_oop)175   TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
176     : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
177     init_req(TypeFunc::Parms, target);
178     init_req(TypeFunc::Parms+1, ex_oop);
179   }
180 
181   virtual int Opcode() const;
182   virtual uint match_edge(uint idx) const;
183 };
184 
185 //-------------------------------JVMState-------------------------------------
186 // A linked list of JVMState nodes captures the whole interpreter state,
187 // plus GC roots, for all active calls at some call site in this compilation
188 // unit.  (If there is no inlining, then the list has exactly one link.)
189 // This provides a way to map the optimized program back into the interpreter,
190 // or to let the GC mark the stack.
191 class JVMState : public ResourceObj {
192   friend class VMStructs;
193 public:
194   typedef enum {
195     Reexecute_Undefined = -1, // not defined -- will be translated into false later
196     Reexecute_False     =  0, // false       -- do not reexecute
197     Reexecute_True      =  1  // true        -- reexecute the bytecode
198   } ReexecuteState; //Reexecute State
199 
200 private:
201   JVMState*         _caller;    // List pointer for forming scope chains
202   uint              _depth;     // One more than caller depth, or one.
203   uint              _locoff;    // Offset to locals in input edge mapping
204   uint              _stkoff;    // Offset to stack in input edge mapping
205   uint              _monoff;    // Offset to monitors in input edge mapping
206   uint              _scloff;    // Offset to fields of scalar objs in input edge mapping
207   uint              _endoff;    // Offset to end of input edge mapping
208   uint              _sp;        // Jave Expression Stack Pointer for this state
209   int               _bci;       // Byte Code Index of this JVM point
210   ReexecuteState    _reexecute; // Whether this bytecode need to be re-executed
211   ciMethod*         _method;    // Method Pointer
212   SafePointNode*    _map;       // Map node associated with this scope
213 public:
214   friend class Compile;
215   friend class PreserveReexecuteState;
216 
217   // Because JVMState objects live over the entire lifetime of the
218   // Compile object, they are allocated into the comp_arena, which
219   // does not get resource marked or reset during the compile process
operator new(size_t x,Compile * C)220   void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
operator delete(void *)221   void operator delete( void * ) { } // fast deallocation
222 
223   // Create a new JVMState, ready for abstract interpretation.
224   JVMState(ciMethod* method, JVMState* caller);
225   JVMState(int stack_size);  // root state; has a null method
226 
227   // Access functions for the JVM
228   // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
229   //       \ locoff    \ stkoff    \ argoff    \ monoff    \ scloff    \ endoff
locoff() const230   uint              locoff() const { return _locoff; }
stkoff() const231   uint              stkoff() const { return _stkoff; }
argoff() const232   uint              argoff() const { return _stkoff + _sp; }
monoff() const233   uint              monoff() const { return _monoff; }
scloff() const234   uint              scloff() const { return _scloff; }
endoff() const235   uint              endoff() const { return _endoff; }
oopoff() const236   uint              oopoff() const { return debug_end(); }
237 
loc_size() const238   int            loc_size() const { return stkoff() - locoff(); }
stk_size() const239   int            stk_size() const { return monoff() - stkoff(); }
mon_size() const240   int            mon_size() const { return scloff() - monoff(); }
scl_size() const241   int            scl_size() const { return endoff() - scloff(); }
242 
is_loc(uint i) const243   bool        is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
is_stk(uint i) const244   bool        is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
is_mon(uint i) const245   bool        is_mon(uint i) const { return monoff() <= i && i < scloff(); }
is_scl(uint i) const246   bool        is_scl(uint i) const { return scloff() <= i && i < endoff(); }
247 
sp() const248   uint                      sp() const { return _sp; }
bci() const249   int                      bci() const { return _bci; }
should_reexecute() const250   bool        should_reexecute() const { return _reexecute==Reexecute_True; }
is_reexecute_undefined() const251   bool  is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
has_method() const252   bool              has_method() const { return _method != NULL; }
method() const253   ciMethod*             method() const { assert(has_method(), ""); return _method; }
caller() const254   JVMState*             caller() const { return _caller; }
map() const255   SafePointNode*           map() const { return _map; }
depth() const256   uint                   depth() const { return _depth; }
257   uint             debug_start() const; // returns locoff of root caller
258   uint               debug_end() const; // returns endoff of self
debug_size() const259   uint              debug_size() const {
260     return loc_size() + sp() + mon_size() + scl_size();
261   }
262   uint        debug_depth()  const; // returns sum of debug_size values at all depths
263 
264   // Returns the JVM state at the desired depth (1 == root).
265   JVMState* of_depth(int d) const;
266 
267   // Tells if two JVM states have the same call chain (depth, methods, & bcis).
268   bool same_calls_as(const JVMState* that) const;
269 
270   // Monitors (monitors are stored as (boxNode, objNode) pairs
271   enum { logMonitorEdges = 1 };
nof_monitors() const272   int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
monitor_depth() const273   int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
monitor_box_offset(int idx) const274   int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
monitor_obj_offset(int idx) const275   int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
is_monitor_box(uint off) const276   bool is_monitor_box(uint off)    const {
277     assert(is_mon(off), "should be called only for monitor edge");
278     return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
279   }
is_monitor_use(uint off) const280   bool is_monitor_use(uint off)    const { return (is_mon(off)
281                                                    && is_monitor_box(off))
282                                              || (caller() && caller()->is_monitor_use(off)); }
283 
284   // Initialization functions for the JVM
set_locoff(uint off)285   void              set_locoff(uint off) { _locoff = off; }
set_stkoff(uint off)286   void              set_stkoff(uint off) { _stkoff = off; }
set_monoff(uint off)287   void              set_monoff(uint off) { _monoff = off; }
set_scloff(uint off)288   void              set_scloff(uint off) { _scloff = off; }
set_endoff(uint off)289   void              set_endoff(uint off) { _endoff = off; }
set_offsets(uint off)290   void              set_offsets(uint off) {
291     _locoff = _stkoff = _monoff = _scloff = _endoff = off;
292   }
set_map(SafePointNode * map)293   void              set_map(SafePointNode *map) { _map = map; }
set_sp(uint sp)294   void              set_sp(uint sp) { _sp = sp; }
295                     // _reexecute is initialized to "undefined" for a new bci
set_bci(int bci)296   void              set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
set_should_reexecute(bool reexec)297   void              set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
298 
299   // Miscellaneous utility functions
300   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
301   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
302   void      set_map_deep(SafePointNode *map);// reset map for all callers
303   void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
304   int       interpreter_frame_size() const;
305 
306 #ifndef PRODUCT
307   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
308   void      dump_spec(outputStream *st) const;
309   void      dump_on(outputStream* st) const;
dump() const310   void      dump() const {
311     dump_on(tty);
312   }
313 #endif
314 };
315 
316 //------------------------------SafePointNode----------------------------------
317 // A SafePointNode is a subclass of a MultiNode for convenience (and
318 // potential code sharing) only - conceptually it is independent of
319 // the Node semantics.
320 class SafePointNode : public MultiNode {
321   virtual uint           cmp( const Node &n ) const;
322   virtual uint           size_of() const;       // Size is bigger
323 
324 public:
SafePointNode(uint edges,JVMState * jvms,const TypePtr * adr_type=NULL)325   SafePointNode(uint edges, JVMState* jvms,
326                 // A plain safepoint advertises no memory effects (NULL):
327                 const TypePtr* adr_type = NULL)
328     : MultiNode( edges ),
329       _jvms(jvms),
330       _oop_map(NULL),
331       _adr_type(adr_type)
332   {
333     init_class_id(Class_SafePoint);
334   }
335 
336   OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
337   JVMState* const _jvms;      // Pointer to list of JVM State objects
338   const TypePtr*  _adr_type;  // What type of memory does this node produce?
339   ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
340 
341   // Many calls take *all* of memory as input,
342   // but some produce a limited subset of that memory as output.
343   // The adr_type reports the call's behavior as a store, not a load.
344 
jvms() const345   virtual JVMState* jvms() const { return _jvms; }
set_jvms(JVMState * s)346   void set_jvms(JVMState* s) {
347     *(JVMState**)&_jvms = s;  // override const attribute in the accessor
348   }
oop_map() const349   OopMap *oop_map() const { return _oop_map; }
set_oop_map(OopMap * om)350   void set_oop_map(OopMap *om) { _oop_map = om; }
351 
352  private:
verify_input(JVMState * jvms,uint idx) const353   void verify_input(JVMState* jvms, uint idx) const {
354     assert(verify_jvms(jvms), "jvms must match");
355     Node* n = in(idx);
356     assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
357            in(idx + 1)->is_top(), "2nd half of long/double");
358   }
359 
360  public:
361   // Functionality from old debug nodes which has changed
local(JVMState * jvms,uint idx) const362   Node *local(JVMState* jvms, uint idx) const {
363     verify_input(jvms, jvms->locoff() + idx);
364     return in(jvms->locoff() + idx);
365   }
stack(JVMState * jvms,uint idx) const366   Node *stack(JVMState* jvms, uint idx) const {
367     verify_input(jvms, jvms->stkoff() + idx);
368     return in(jvms->stkoff() + idx);
369   }
argument(JVMState * jvms,uint idx) const370   Node *argument(JVMState* jvms, uint idx) const {
371     verify_input(jvms, jvms->argoff() + idx);
372     return in(jvms->argoff() + idx);
373   }
monitor_box(JVMState * jvms,uint idx) const374   Node *monitor_box(JVMState* jvms, uint idx) const {
375     assert(verify_jvms(jvms), "jvms must match");
376     return in(jvms->monitor_box_offset(idx));
377   }
monitor_obj(JVMState * jvms,uint idx) const378   Node *monitor_obj(JVMState* jvms, uint idx) const {
379     assert(verify_jvms(jvms), "jvms must match");
380     return in(jvms->monitor_obj_offset(idx));
381   }
382 
383   void  set_local(JVMState* jvms, uint idx, Node *c);
384 
set_stack(JVMState * jvms,uint idx,Node * c)385   void  set_stack(JVMState* jvms, uint idx, Node *c) {
386     assert(verify_jvms(jvms), "jvms must match");
387     set_req(jvms->stkoff() + idx, c);
388   }
set_argument(JVMState * jvms,uint idx,Node * c)389   void  set_argument(JVMState* jvms, uint idx, Node *c) {
390     assert(verify_jvms(jvms), "jvms must match");
391     set_req(jvms->argoff() + idx, c);
392   }
ensure_stack(JVMState * jvms,uint stk_size)393   void ensure_stack(JVMState* jvms, uint stk_size) {
394     assert(verify_jvms(jvms), "jvms must match");
395     int grow_by = (int)stk_size - (int)jvms->stk_size();
396     if (grow_by > 0)  grow_stack(jvms, grow_by);
397   }
398   void grow_stack(JVMState* jvms, uint grow_by);
399   // Handle monitor stack
400   void push_monitor( const FastLockNode *lock );
401   void pop_monitor ();
402   Node *peek_monitor_box() const;
403   Node *peek_monitor_obj() const;
404 
405   // Access functions for the JVM
control() const406   Node *control  () const { return in(TypeFunc::Control  ); }
i_o() const407   Node *i_o      () const { return in(TypeFunc::I_O      ); }
memory() const408   Node *memory   () const { return in(TypeFunc::Memory   ); }
returnadr() const409   Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
frameptr() const410   Node *frameptr () const { return in(TypeFunc::FramePtr ); }
411 
set_control(Node * c)412   void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
set_i_o(Node * c)413   void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
set_memory(Node * c)414   void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
415 
merged_memory() const416   MergeMemNode* merged_memory() const {
417     return in(TypeFunc::Memory)->as_MergeMem();
418   }
419 
420   // The parser marks useless maps as dead when it's done with them:
is_killed()421   bool is_killed() { return in(TypeFunc::Control) == NULL; }
422 
423   // Exception states bubbling out of subgraphs such as inlined calls
424   // are recorded here.  (There might be more than one, hence the "next".)
425   // This feature is used only for safepoints which serve as "maps"
426   // for JVM states during parsing, intrinsic expansion, etc.
427   SafePointNode*         next_exception() const;
428   void               set_next_exception(SafePointNode* n);
has_exceptions() const429   bool                   has_exceptions() const { return next_exception() != NULL; }
430 
431   // Helper methods to operate on replaced nodes
replaced_nodes() const432   ReplacedNodes replaced_nodes() const {
433     return _replaced_nodes;
434   }
435 
set_replaced_nodes(ReplacedNodes replaced_nodes)436   void set_replaced_nodes(ReplacedNodes replaced_nodes) {
437     _replaced_nodes = replaced_nodes;
438   }
439 
clone_replaced_nodes()440   void clone_replaced_nodes() {
441     _replaced_nodes.clone();
442   }
record_replaced_node(Node * initial,Node * improved)443   void record_replaced_node(Node* initial, Node* improved) {
444     _replaced_nodes.record(initial, improved);
445   }
transfer_replaced_nodes_from(SafePointNode * sfpt,uint idx=0)446   void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
447     _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
448   }
delete_replaced_nodes()449   void delete_replaced_nodes() {
450     _replaced_nodes.reset();
451   }
apply_replaced_nodes(uint idx)452   void apply_replaced_nodes(uint idx) {
453     _replaced_nodes.apply(this, idx);
454   }
merge_replaced_nodes_with(SafePointNode * sfpt)455   void merge_replaced_nodes_with(SafePointNode* sfpt) {
456     _replaced_nodes.merge_with(sfpt->_replaced_nodes);
457   }
has_replaced_nodes() const458   bool has_replaced_nodes() const {
459     return !_replaced_nodes.is_empty();
460   }
461 
462   void disconnect_from_root(PhaseIterGVN *igvn);
463 
464   // Standard Node stuff
465   virtual int            Opcode() const;
pinned() const466   virtual bool           pinned() const { return true; }
467   virtual const Type    *Value( PhaseTransform *phase ) const;
bottom_type() const468   virtual const Type    *bottom_type() const { return Type::CONTROL; }
adr_type() const469   virtual const TypePtr *adr_type() const { return _adr_type; }
470   virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
471   virtual Node          *Identity( PhaseTransform *phase );
ideal_reg() const472   virtual uint           ideal_reg() const { return 0; }
473   virtual const RegMask &in_RegMask(uint) const;
474   virtual const RegMask &out_RegMask() const;
475   virtual uint           match_edge(uint idx) const;
476 
477   static  bool           needs_polling_address_input();
478 
479 #ifndef PRODUCT
480   virtual void           dump_spec(outputStream *st) const;
481 #endif
482 };
483 
484 //------------------------------SafePointScalarObjectNode----------------------
485 // A SafePointScalarObjectNode represents the state of a scalarized object
486 // at a safepoint.
487 
488 class SafePointScalarObjectNode: public TypeNode {
489   uint _first_index; // First input edge relative index of a SafePoint node where
490                      // states of the scalarized object fields are collected.
491                      // It is relative to the last (youngest) jvms->_scloff.
492   uint _n_fields;    // Number of non-static fields of the scalarized object.
493   DEBUG_ONLY(AllocateNode* _alloc;)
494 
495   virtual uint hash() const ; // { return NO_HASH; }
496   virtual uint cmp( const Node &n ) const;
497 
first_index() const498   uint first_index() const { return _first_index; }
499 
500 public:
501   SafePointScalarObjectNode(const TypeOopPtr* tp,
502 #ifdef ASSERT
503                             AllocateNode* alloc,
504 #endif
505                             uint first_index, uint n_fields);
506   virtual int Opcode() const;
507   virtual uint           ideal_reg() const;
508   virtual const RegMask &in_RegMask(uint) const;
509   virtual const RegMask &out_RegMask() const;
510   virtual uint           match_edge(uint idx) const;
511 
first_index(JVMState * jvms) const512   uint first_index(JVMState* jvms) const {
513     assert(jvms != NULL, "missed JVMS");
514     return jvms->scloff() + _first_index;
515   }
n_fields() const516   uint n_fields()    const { return _n_fields; }
517 
518 #ifdef ASSERT
alloc() const519   AllocateNode* alloc() const { return _alloc; }
520 #endif
521 
size_of() const522   virtual uint size_of() const { return sizeof(*this); }
523 
524   // Assumes that "this" is an argument to a safepoint node "s", and that
525   // "new_call" is being created to correspond to "s".  But the difference
526   // between the start index of the jvmstates of "new_call" and "s" is
527   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
528   // corresponds appropriately to "this" in "new_call".  Assumes that
529   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
530   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
531   SafePointScalarObjectNode* clone(Dict* sosn_map) const;
532 
533 #ifndef PRODUCT
534   virtual void              dump_spec(outputStream *st) const;
535 #endif
536 };
537 
538 
539 // Simple container for the outgoing projections of a call.  Useful
540 // for serious surgery on calls.
541 class CallProjections : public StackObj {
542 public:
543   Node* fallthrough_proj;
544   Node* fallthrough_catchproj;
545   Node* fallthrough_memproj;
546   Node* fallthrough_ioproj;
547   Node* catchall_catchproj;
548   Node* catchall_memproj;
549   Node* catchall_ioproj;
550   Node* resproj;
551   Node* exobj;
552 };
553 
554 class CallGenerator;
555 
556 //------------------------------CallNode---------------------------------------
557 // Call nodes now subsume the function of debug nodes at callsites, so they
558 // contain the functionality of a full scope chain of debug nodes.
559 class CallNode : public SafePointNode {
560   friend class VMStructs;
561 public:
562   const TypeFunc *_tf;        // Function type
563   address      _entry_point;  // Address of method being called
564   float        _cnt;          // Estimate of number of times called
565   CallGenerator* _generator;  // corresponding CallGenerator for some late inline calls
566 
CallNode(const TypeFunc * tf,address addr,const TypePtr * adr_type)567   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
568     : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
569       _tf(tf),
570       _entry_point(addr),
571       _cnt(COUNT_UNKNOWN),
572       _generator(NULL)
573   {
574     init_class_id(Class_Call);
575   }
576 
tf() const577   const TypeFunc* tf()         const { return _tf; }
entry_point() const578   const address  entry_point() const { return _entry_point; }
cnt() const579   const float    cnt()         const { return _cnt; }
generator() const580   CallGenerator* generator()   const { return _generator; }
581 
set_tf(const TypeFunc * tf)582   void set_tf(const TypeFunc* tf)       { _tf = tf; }
set_entry_point(address p)583   void set_entry_point(address p)       { _entry_point = p; }
set_cnt(float c)584   void set_cnt(float c)                 { _cnt = c; }
set_generator(CallGenerator * cg)585   void set_generator(CallGenerator* cg) { _generator = cg; }
586 
587   virtual const Type *bottom_type() const;
588   virtual const Type *Value( PhaseTransform *phase ) const;
589   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
Identity(PhaseTransform * phase)590   virtual Node *Identity( PhaseTransform *phase ) { return this; }
591   virtual uint        cmp( const Node &n ) const;
592   virtual uint        size_of() const = 0;
593   virtual void        calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
594   virtual Node       *match( const ProjNode *proj, const Matcher *m );
ideal_reg() const595   virtual uint        ideal_reg() const { return NotAMachineReg; }
596   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
597   // for some macro nodes whose expansion does not have a safepoint on the fast path.
guaranteed_safepoint()598   virtual bool        guaranteed_safepoint()  { return true; }
599   // For macro nodes, the JVMState gets modified during expansion. If calls
600   // use MachConstantBase, it gets modified during matching. So when cloning
601   // the node the JVMState must be cloned. Default is not to clone.
clone_jvms(Compile * C)602   virtual void clone_jvms(Compile* C) {
603     if (C->needs_clone_jvms() && jvms() != NULL) {
604       set_jvms(jvms()->clone_deep(C));
605       jvms()->set_map_deep(this);
606     }
607   }
608 
609   // Returns true if the call may modify n
610   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
611   // Does this node have a use of n other than in debug information?
612   bool                has_non_debug_use(Node *n);
613   // Returns the unique CheckCastPP of a call
614   // or result projection is there are several CheckCastPP
615   // or returns NULL if there is no one.
616   Node *result_cast();
617   // Does this node returns pointer?
returns_pointer() const618   bool returns_pointer() const {
619     const TypeTuple *r = tf()->range();
620     return (r->cnt() > TypeFunc::Parms &&
621             r->field_at(TypeFunc::Parms)->isa_ptr());
622   }
623 
624   // Collect all the interesting edges from a call for use in
625   // replacing the call by something else.  Used by macro expansion
626   // and the late inlining support.
627   void extract_projections(CallProjections* projs, bool separate_io_proj);
628 
629   virtual uint match_edge(uint idx) const;
630 
631 #ifndef PRODUCT
632   virtual void        dump_req(outputStream *st = tty) const;
633   virtual void        dump_spec(outputStream *st) const;
634 #endif
635 };
636 
637 
638 //------------------------------CallJavaNode-----------------------------------
639 // Make a static or dynamic subroutine call node using Java calling
640 // convention.  (The "Java" calling convention is the compiler's calling
641 // convention, as opposed to the interpreter's or that of native C.)
642 class CallJavaNode : public CallNode {
643   friend class VMStructs;
644 protected:
645   virtual uint cmp( const Node &n ) const;
646   virtual uint size_of() const; // Size is bigger
647 
648   bool    _optimized_virtual;
649   bool    _method_handle_invoke;
650   ciMethod* _method;            // Method being direct called
651 public:
652   const int       _bci;         // Byte Code Index of call byte code
CallJavaNode(const TypeFunc * tf,address addr,ciMethod * method,int bci)653   CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
654     : CallNode(tf, addr, TypePtr::BOTTOM),
655       _method(method), _bci(bci),
656       _optimized_virtual(false),
657       _method_handle_invoke(false)
658   {
659     init_class_id(Class_CallJava);
660   }
661 
662   virtual int   Opcode() const;
method() const663   ciMethod* method() const                { return _method; }
set_method(ciMethod * m)664   void  set_method(ciMethod *m)           { _method = m; }
set_optimized_virtual(bool f)665   void  set_optimized_virtual(bool f)     { _optimized_virtual = f; }
is_optimized_virtual() const666   bool  is_optimized_virtual() const      { return _optimized_virtual; }
set_method_handle_invoke(bool f)667   void  set_method_handle_invoke(bool f)  { _method_handle_invoke = f; }
is_method_handle_invoke() const668   bool  is_method_handle_invoke() const   { return _method_handle_invoke; }
669 
670 #ifndef PRODUCT
671   virtual void  dump_spec(outputStream *st) const;
672 #endif
673 };
674 
675 //------------------------------CallStaticJavaNode-----------------------------
676 // Make a direct subroutine call using Java calling convention (for static
677 // calls and optimized virtual calls, plus calls to wrappers for run-time
678 // routines); generates static stub.
679 class CallStaticJavaNode : public CallJavaNode {
680   virtual uint cmp( const Node &n ) const;
681   virtual uint size_of() const; // Size is bigger
682 public:
CallStaticJavaNode(Compile * C,const TypeFunc * tf,address addr,ciMethod * method,int bci)683   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
684     : CallJavaNode(tf, addr, method, bci), _name(NULL) {
685     init_class_id(Class_CallStaticJava);
686     if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
687       init_flags(Flag_is_macro);
688       C->add_macro_node(this);
689     }
690     _is_scalar_replaceable = false;
691     _is_non_escaping = false;
692   }
CallStaticJavaNode(const TypeFunc * tf,address addr,const char * name,int bci,const TypePtr * adr_type)693   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
694                      const TypePtr* adr_type)
695     : CallJavaNode(tf, addr, NULL, bci), _name(name) {
696     init_class_id(Class_CallStaticJava);
697     // This node calls a runtime stub, which often has narrow memory effects.
698     _adr_type = adr_type;
699     _is_scalar_replaceable = false;
700     _is_non_escaping = false;
701   }
702   const char *_name;      // Runtime wrapper name
703 
704   // Result of Escape Analysis
705   bool _is_scalar_replaceable;
706   bool _is_non_escaping;
707 
708   // If this is an uncommon trap, return the request code, else zero.
709   int uncommon_trap_request() const;
710   static int extract_uncommon_trap_request(const Node* call);
711 
is_boxing_method() const712   bool is_boxing_method() const {
713     return is_macro() && (method() != NULL) && method()->is_boxing_method();
714   }
715   // Later inlining modifies the JVMState, so we need to clone it
716   // when the call node is cloned (because it is macro node).
clone_jvms(Compile * C)717   virtual void  clone_jvms(Compile* C) {
718     if ((jvms() != NULL) && is_boxing_method()) {
719       set_jvms(jvms()->clone_deep(C));
720       jvms()->set_map_deep(this);
721     }
722   }
723 
724   virtual int         Opcode() const;
725 #ifndef PRODUCT
726   virtual void        dump_spec(outputStream *st) const;
727 #endif
728 };
729 
730 //------------------------------CallDynamicJavaNode----------------------------
731 // Make a dispatched call using Java calling convention.
732 class CallDynamicJavaNode : public CallJavaNode {
733   virtual uint cmp( const Node &n ) const;
734   virtual uint size_of() const; // Size is bigger
735 public:
CallDynamicJavaNode(const TypeFunc * tf,address addr,ciMethod * method,int vtable_index,int bci)736   CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
737     init_class_id(Class_CallDynamicJava);
738   }
739 
740   int _vtable_index;
741   virtual int   Opcode() const;
742 #ifndef PRODUCT
743   virtual void  dump_spec(outputStream *st) const;
744 #endif
745 };
746 
747 //------------------------------CallRuntimeNode--------------------------------
748 // Make a direct subroutine call node into compiled C++ code.
749 class CallRuntimeNode : public CallNode {
750   virtual uint cmp( const Node &n ) const;
751   virtual uint size_of() const; // Size is bigger
752 public:
CallRuntimeNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)753   CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
754                   const TypePtr* adr_type)
755     : CallNode(tf, addr, adr_type),
756       _name(name)
757   {
758     init_class_id(Class_CallRuntime);
759   }
760 
761   const char *_name;            // Printable name, if _method is NULL
762   virtual int   Opcode() const;
763   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
764 
765 #ifndef PRODUCT
766   virtual void  dump_spec(outputStream *st) const;
767 #endif
768 };
769 
770 //------------------------------CallLeafNode-----------------------------------
771 // Make a direct subroutine call node into compiled C++ code, without
772 // safepoints
773 class CallLeafNode : public CallRuntimeNode {
774 public:
CallLeafNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)775   CallLeafNode(const TypeFunc* tf, address addr, const char* name,
776                const TypePtr* adr_type)
777     : CallRuntimeNode(tf, addr, name, adr_type)
778   {
779     init_class_id(Class_CallLeaf);
780   }
781   virtual int   Opcode() const;
guaranteed_safepoint()782   virtual bool        guaranteed_safepoint()  { return false; }
783 #ifndef PRODUCT
784   virtual void  dump_spec(outputStream *st) const;
785 #endif
786 };
787 
788 //------------------------------CallLeafNoFPNode-------------------------------
789 // CallLeafNode, not using floating point or using it in the same manner as
790 // the generated code
791 class CallLeafNoFPNode : public CallLeafNode {
792 public:
CallLeafNoFPNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)793   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
794                    const TypePtr* adr_type)
795     : CallLeafNode(tf, addr, name, adr_type)
796   {
797   }
798   virtual int   Opcode() const;
799 };
800 
801 
802 //------------------------------Allocate---------------------------------------
803 // High-level memory allocation
804 //
805 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
806 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
807 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
808 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
809 //  order to differentiate the uses of the projection on the normal control path from
810 //  those on the exception return path.
811 //
812 class AllocateNode : public CallNode {
813 public:
814   enum {
815     // Output:
816     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
817     // Inputs:
818     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
819     KlassNode,                        // type (maybe dynamic) of the obj.
820     InitialTest,                      // slow-path test (may be constant)
821     ALength,                          // array length (or TOP if none)
822     ParmLimit
823   };
824 
alloc_type(const Type * t)825   static const TypeFunc* alloc_type(const Type* t) {
826     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
827     fields[AllocSize]   = TypeInt::POS;
828     fields[KlassNode]   = TypeInstPtr::NOTNULL;
829     fields[InitialTest] = TypeInt::BOOL;
830     fields[ALength]     = t;  // length (can be a bad length)
831 
832     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
833 
834     // create result type (range)
835     fields = TypeTuple::fields(1);
836     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
837 
838     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
839 
840     return TypeFunc::make(domain, range);
841   }
842 
843   // Result of Escape Analysis
844   bool _is_scalar_replaceable;
845   bool _is_non_escaping;
846 
847   virtual uint size_of() const; // Size is bigger
848   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
849                Node *size, Node *klass_node, Node *initial_test);
850   // Expansion modifies the JVMState, so we need to clone it
clone_jvms(Compile * C)851   virtual void  clone_jvms(Compile* C) {
852     if (jvms() != NULL) {
853       set_jvms(jvms()->clone_deep(C));
854       jvms()->set_map_deep(this);
855     }
856   }
857   virtual int Opcode() const;
ideal_reg() const858   virtual uint ideal_reg() const { return Op_RegP; }
guaranteed_safepoint()859   virtual bool        guaranteed_safepoint()  { return false; }
860 
861   // allocations do not modify their arguments
may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)862   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
863 
864   // Pattern-match a possible usage of AllocateNode.
865   // Return null if no allocation is recognized.
866   // The operand is the pointer produced by the (possible) allocation.
867   // It must be a projection of the Allocate or its subsequent CastPP.
868   // (Note:  This function is defined in file graphKit.cpp, near
869   // GraphKit::new_instance/new_array, whose output it recognizes.)
870   // The 'ptr' may not have an offset unless the 'offset' argument is given.
871   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
872 
873   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
874   // an offset, which is reported back to the caller.
875   // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
876   static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
877                                         intptr_t& offset);
878 
879   // Dig the klass operand out of a (possible) allocation site.
Ideal_klass(Node * ptr,PhaseTransform * phase)880   static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
881     AllocateNode* allo = Ideal_allocation(ptr, phase);
882     return (allo == NULL) ? NULL : allo->in(KlassNode);
883   }
884 
885   // Conservatively small estimate of offset of first non-header byte.
minimum_header_size()886   int minimum_header_size() {
887     return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
888                                 instanceOopDesc::base_offset_in_bytes();
889   }
890 
891   // Return the corresponding initialization barrier (or null if none).
892   // Walks out edges to find it...
893   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
894   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
895   InitializeNode* initialization();
896 
897   // Convenience for initialization->maybe_set_complete(phase)
898   bool maybe_set_complete(PhaseGVN* phase);
899 
900 #ifdef AARCH64
901   // Return true if allocation doesn't escape thread, its escape state
902   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
903   // is true when its allocation's escape state is noEscape or
904   // ArgEscape. In case allocation's InitializeNode is NULL, check
905   // AlllocateNode._is_non_escaping flag.
906   // AlllocateNode._is_non_escaping is true when its escape state is
907   // noEscape.
does_not_escape_thread()908   bool does_not_escape_thread() {
909     InitializeNode* init = NULL;
910     return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape());
911   }
912 #endif
913 };
914 
915 //------------------------------AllocateArray---------------------------------
916 //
917 // High-level array allocation
918 //
919 class AllocateArrayNode : public AllocateNode {
920 public:
AllocateArrayNode(Compile * C,const TypeFunc * atype,Node * ctrl,Node * mem,Node * abio,Node * size,Node * klass_node,Node * initial_test,Node * count_val)921   AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
922                     Node* size, Node* klass_node, Node* initial_test,
923                     Node* count_val
924                     )
925     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
926                    initial_test)
927   {
928     init_class_id(Class_AllocateArray);
929     set_req(AllocateNode::ALength,        count_val);
930   }
931   virtual int Opcode() const;
932   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
933 
934   // Dig the length operand out of a array allocation site.
Ideal_length()935   Node* Ideal_length() {
936     return in(AllocateNode::ALength);
937   }
938 
939   // Dig the length operand out of a array allocation site and narrow the
940   // type with a CastII, if necesssary
941   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
942 
943   // Pattern-match a possible usage of AllocateArrayNode.
944   // Return null if no allocation is recognized.
Ideal_array_allocation(Node * ptr,PhaseTransform * phase)945   static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
946     AllocateNode* allo = Ideal_allocation(ptr, phase);
947     return (allo == NULL || !allo->is_AllocateArray())
948            ? NULL : allo->as_AllocateArray();
949   }
950 };
951 
952 //------------------------------AbstractLockNode-----------------------------------
953 class AbstractLockNode: public CallNode {
954 private:
955   enum {
956     Regular = 0,  // Normal lock
957     NonEscObj,    // Lock is used for non escaping object
958     Coarsened,    // Lock was coarsened
959     Nested        // Nested lock
960   } _kind;
961 #ifndef PRODUCT
962   NamedCounter* _counter;
963 #endif
964 
965 protected:
966   // helper functions for lock elimination
967   //
968 
969   bool find_matching_unlock(const Node* ctrl, LockNode* lock,
970                             GrowableArray<AbstractLockNode*> &lock_ops);
971   bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
972                                        GrowableArray<AbstractLockNode*> &lock_ops);
973   bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
974                                GrowableArray<AbstractLockNode*> &lock_ops);
975   LockNode *find_matching_lock(UnlockNode* unlock);
976 
977   // Update the counter to indicate that this lock was eliminated.
978   void set_eliminated_lock_counter() PRODUCT_RETURN;
979 
980 public:
AbstractLockNode(const TypeFunc * tf)981   AbstractLockNode(const TypeFunc *tf)
982     : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
983       _kind(Regular)
984   {
985 #ifndef PRODUCT
986     _counter = NULL;
987 #endif
988   }
989   virtual int Opcode() const = 0;
obj_node() const990   Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
box_node() const991   Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
fastlock_node() const992   Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
set_box_node(Node * box)993   void     set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
994 
sub(const Type * t1,const Type * t2) const995   const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
996 
size_of() const997   virtual uint size_of() const { return sizeof(*this); }
998 
is_eliminated() const999   bool is_eliminated()  const { return (_kind != Regular); }
is_non_esc_obj() const1000   bool is_non_esc_obj() const { return (_kind == NonEscObj); }
is_coarsened() const1001   bool is_coarsened()   const { return (_kind == Coarsened); }
is_nested() const1002   bool is_nested()      const { return (_kind == Nested); }
1003 
1004   const char * kind_as_string() const;
1005   void log_lock_optimization(Compile* c, const char * tag) const;
1006 
set_non_esc_obj()1007   void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
set_coarsened()1008   void set_coarsened()   { _kind = Coarsened; set_eliminated_lock_counter(); }
set_nested()1009   void set_nested()      { _kind = Nested; set_eliminated_lock_counter(); }
1010 
1011   // locking does not modify its arguments
may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)1012   virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
1013 
1014 #ifndef PRODUCT
1015   void create_lock_counter(JVMState* s);
counter() const1016   NamedCounter* counter() const { return _counter; }
1017 #endif
1018 };
1019 
1020 //------------------------------Lock---------------------------------------
1021 // High-level lock operation
1022 //
1023 // This is a subclass of CallNode because it is a macro node which gets expanded
1024 // into a code sequence containing a call.  This node takes 3 "parameters":
1025 //    0  -  object to lock
1026 //    1 -   a BoxLockNode
1027 //    2 -   a FastLockNode
1028 //
1029 class LockNode : public AbstractLockNode {
1030 public:
1031 
lock_type()1032   static const TypeFunc *lock_type() {
1033     // create input type (domain)
1034     const Type **fields = TypeTuple::fields(3);
1035     fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
1036     fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
1037     fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
1038     const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
1039 
1040     // create result type (range)
1041     fields = TypeTuple::fields(0);
1042 
1043     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
1044 
1045     return TypeFunc::make(domain,range);
1046   }
1047 
1048   virtual int Opcode() const;
1049   virtual uint size_of() const; // Size is bigger
LockNode(Compile * C,const TypeFunc * tf)1050   LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1051     init_class_id(Class_Lock);
1052     init_flags(Flag_is_macro);
1053     C->add_macro_node(this);
1054   }
guaranteed_safepoint()1055   virtual bool        guaranteed_safepoint()  { return false; }
1056 
1057   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1058   // Expansion modifies the JVMState, so we need to clone it
clone_jvms(Compile * C)1059   virtual void  clone_jvms(Compile* C) {
1060     if (jvms() != NULL) {
1061       set_jvms(jvms()->clone_deep(C));
1062       jvms()->set_map_deep(this);
1063     }
1064   }
1065 
1066   bool is_nested_lock_region(); // Is this Lock nested?
1067   bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested?
1068 };
1069 
1070 //------------------------------Unlock---------------------------------------
1071 // High-level unlock operation
1072 class UnlockNode : public AbstractLockNode {
1073 private:
1074 #ifdef ASSERT
1075   JVMState* const _dbg_jvms;      // Pointer to list of JVM State objects
1076 #endif
1077 public:
1078   virtual int Opcode() const;
1079   virtual uint size_of() const; // Size is bigger
UnlockNode(Compile * C,const TypeFunc * tf)1080   UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf )
1081 #ifdef ASSERT
1082     , _dbg_jvms(NULL)
1083 #endif
1084   {
1085     init_class_id(Class_Unlock);
1086     init_flags(Flag_is_macro);
1087     C->add_macro_node(this);
1088   }
1089   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1090   // unlock is never a safepoint
guaranteed_safepoint()1091   virtual bool        guaranteed_safepoint()  { return false; }
1092 #ifdef ASSERT
set_dbg_jvms(JVMState * s)1093   void set_dbg_jvms(JVMState* s) {
1094     *(JVMState**)&_dbg_jvms = s;  // override const attribute in the accessor
1095   }
dbg_jvms() const1096   JVMState* dbg_jvms() const { return _dbg_jvms; }
1097 #else
dbg_jvms() const1098   JVMState* dbg_jvms() const { return NULL; }
1099 #endif
1100 };
1101 
1102 #endif // SHARE_VM_OPTO_CALLNODE_HPP
1103