1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_CALLNODE_HPP 26 #define SHARE_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 #include "utilities/growableArray.hpp" 36 37 // Portions of code courtesy of Clifford Click 38 39 // Optimization - Graph Style 40 41 class Chaitin; 42 class NamedCounter; 43 class MultiNode; 44 class SafePointNode; 45 class CallNode; 46 class CallJavaNode; 47 class CallStaticJavaNode; 48 class CallDynamicJavaNode; 49 class CallRuntimeNode; 50 class CallLeafNode; 51 class CallLeafNoFPNode; 52 class CallNativeNode; 53 class AllocateNode; 54 class AllocateArrayNode; 55 class BoxLockNode; 56 class LockNode; 57 class UnlockNode; 58 class JVMState; 59 class State; 60 class StartNode; 61 class MachCallNode; 62 class FastLockNode; 63 64 //------------------------------StartNode-------------------------------------- 65 // The method start node 66 class StartNode : public MultiNode { 67 virtual bool cmp( const Node &n ) const; 68 virtual uint size_of() const; // Size is bigger 69 public: 70 const TypeTuple *_domain; StartNode(Node * root,const TypeTuple * domain)71 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 72 init_class_id(Class_Start); 73 init_req(0,this); 74 init_req(1,root); 75 } 76 virtual int Opcode() const; pinned() const77 virtual bool pinned() const { return true; }; 78 virtual const Type *bottom_type() const; adr_type() const79 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 80 virtual const Type* Value(PhaseGVN* phase) const; 81 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 82 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 83 virtual const RegMask &in_RegMask(uint) const; 84 virtual Node *match( const ProjNode *proj, const Matcher *m ); ideal_reg() const85 virtual uint ideal_reg() const { return 0; } 86 #ifndef PRODUCT 87 virtual void dump_spec(outputStream *st) const; 88 virtual void dump_compact_spec(outputStream *st) const; 89 #endif 90 }; 91 92 //------------------------------StartOSRNode----------------------------------- 93 // The method start node for on stack replacement code 94 class StartOSRNode : public StartNode { 95 public: StartOSRNode(Node * root,const TypeTuple * domain)96 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 97 virtual int Opcode() const; 98 static const TypeTuple *osr_domain(); 99 }; 100 101 102 //------------------------------ParmNode--------------------------------------- 103 // Incoming parameters 104 class ParmNode : public ProjNode { 105 static const char * const names[TypeFunc::Parms+1]; 106 public: ParmNode(StartNode * src,uint con)107 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 108 init_class_id(Class_Parm); 109 } 110 virtual int Opcode() const; is_CFG() const111 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 112 virtual uint ideal_reg() const; 113 #ifndef PRODUCT 114 virtual void dump_spec(outputStream *st) const; 115 virtual void dump_compact_spec(outputStream *st) const; 116 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 117 #endif 118 }; 119 120 121 //------------------------------ReturnNode------------------------------------- 122 // Return from subroutine node 123 class ReturnNode : public Node { 124 public: 125 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 126 virtual int Opcode() const; is_CFG() const127 virtual bool is_CFG() const { return true; } hash() const128 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash depends_only_on_test() const129 virtual bool depends_only_on_test() const { return false; } 130 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 131 virtual const Type* Value(PhaseGVN* phase) const; ideal_reg() const132 virtual uint ideal_reg() const { return NotAMachineReg; } 133 virtual uint match_edge(uint idx) const; 134 #ifndef PRODUCT 135 virtual void dump_req(outputStream *st = tty) const; 136 #endif 137 }; 138 139 140 //------------------------------RethrowNode------------------------------------ 141 // Rethrow of exception at call site. Ends a procedure before rethrowing; 142 // ends the current basic block like a ReturnNode. Restores registers and 143 // unwinds stack. Rethrow happens in the caller's method. 144 class RethrowNode : public Node { 145 public: 146 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 147 virtual int Opcode() const; is_CFG() const148 virtual bool is_CFG() const { return true; } hash() const149 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash depends_only_on_test() const150 virtual bool depends_only_on_test() const { return false; } 151 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 152 virtual const Type* Value(PhaseGVN* phase) const; 153 virtual uint match_edge(uint idx) const; ideal_reg() const154 virtual uint ideal_reg() const { return NotAMachineReg; } 155 #ifndef PRODUCT 156 virtual void dump_req(outputStream *st = tty) const; 157 #endif 158 }; 159 160 161 //------------------------------TailCallNode----------------------------------- 162 // Pop stack frame and jump indirect 163 class TailCallNode : public ReturnNode { 164 public: TailCallNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * retadr,Node * target,Node * moop)165 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 166 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 167 init_req(TypeFunc::Parms, target); 168 init_req(TypeFunc::Parms+1, moop); 169 } 170 171 virtual int Opcode() const; 172 virtual uint match_edge(uint idx) const; 173 }; 174 175 //------------------------------TailJumpNode----------------------------------- 176 // Pop stack frame and jump indirect 177 class TailJumpNode : public ReturnNode { 178 public: TailJumpNode(Node * cntrl,Node * i_o,Node * memory,Node * frameptr,Node * target,Node * ex_oop)179 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 180 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 181 init_req(TypeFunc::Parms, target); 182 init_req(TypeFunc::Parms+1, ex_oop); 183 } 184 185 virtual int Opcode() const; 186 virtual uint match_edge(uint idx) const; 187 }; 188 189 //-------------------------------JVMState------------------------------------- 190 // A linked list of JVMState nodes captures the whole interpreter state, 191 // plus GC roots, for all active calls at some call site in this compilation 192 // unit. (If there is no inlining, then the list has exactly one link.) 193 // This provides a way to map the optimized program back into the interpreter, 194 // or to let the GC mark the stack. 195 class JVMState : public ResourceObj { 196 friend class VMStructs; 197 public: 198 typedef enum { 199 Reexecute_Undefined = -1, // not defined -- will be translated into false later 200 Reexecute_False = 0, // false -- do not reexecute 201 Reexecute_True = 1 // true -- reexecute the bytecode 202 } ReexecuteState; //Reexecute State 203 204 private: 205 JVMState* _caller; // List pointer for forming scope chains 206 uint _depth; // One more than caller depth, or one. 207 uint _locoff; // Offset to locals in input edge mapping 208 uint _stkoff; // Offset to stack in input edge mapping 209 uint _monoff; // Offset to monitors in input edge mapping 210 uint _scloff; // Offset to fields of scalar objs in input edge mapping 211 uint _endoff; // Offset to end of input edge mapping 212 uint _sp; // Jave Expression Stack Pointer for this state 213 int _bci; // Byte Code Index of this JVM point 214 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 215 ciMethod* _method; // Method Pointer 216 SafePointNode* _map; // Map node associated with this scope 217 public: 218 friend class Compile; 219 friend class PreserveReexecuteState; 220 221 // Because JVMState objects live over the entire lifetime of the 222 // Compile object, they are allocated into the comp_arena, which 223 // does not get resource marked or reset during the compile process operator new(size_t x,Compile * C)224 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } operator delete(void *)225 void operator delete( void * ) { } // fast deallocation 226 227 // Create a new JVMState, ready for abstract interpretation. 228 JVMState(ciMethod* method, JVMState* caller); 229 JVMState(int stack_size); // root state; has a null method 230 231 // Access functions for the JVM 232 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 233 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff locoff() const234 uint locoff() const { return _locoff; } stkoff() const235 uint stkoff() const { return _stkoff; } argoff() const236 uint argoff() const { return _stkoff + _sp; } monoff() const237 uint monoff() const { return _monoff; } scloff() const238 uint scloff() const { return _scloff; } endoff() const239 uint endoff() const { return _endoff; } oopoff() const240 uint oopoff() const { return debug_end(); } 241 loc_size() const242 int loc_size() const { return stkoff() - locoff(); } stk_size() const243 int stk_size() const { return monoff() - stkoff(); } mon_size() const244 int mon_size() const { return scloff() - monoff(); } scl_size() const245 int scl_size() const { return endoff() - scloff(); } 246 is_loc(uint i) const247 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } is_stk(uint i) const248 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } is_mon(uint i) const249 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } is_scl(uint i) const250 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 251 sp() const252 uint sp() const { return _sp; } bci() const253 int bci() const { return _bci; } should_reexecute() const254 bool should_reexecute() const { return _reexecute==Reexecute_True; } is_reexecute_undefined() const255 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } has_method() const256 bool has_method() const { return _method != NULL; } method() const257 ciMethod* method() const { assert(has_method(), ""); return _method; } caller() const258 JVMState* caller() const { return _caller; } map() const259 SafePointNode* map() const { return _map; } depth() const260 uint depth() const { return _depth; } 261 uint debug_start() const; // returns locoff of root caller 262 uint debug_end() const; // returns endoff of self debug_size() const263 uint debug_size() const { 264 return loc_size() + sp() + mon_size() + scl_size(); 265 } 266 uint debug_depth() const; // returns sum of debug_size values at all depths 267 268 // Returns the JVM state at the desired depth (1 == root). 269 JVMState* of_depth(int d) const; 270 271 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 272 bool same_calls_as(const JVMState* that) const; 273 274 // Monitors (monitors are stored as (boxNode, objNode) pairs 275 enum { logMonitorEdges = 1 }; nof_monitors() const276 int nof_monitors() const { return mon_size() >> logMonitorEdges; } monitor_depth() const277 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } monitor_box_offset(int idx) const278 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } monitor_obj_offset(int idx) const279 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } is_monitor_box(uint off) const280 bool is_monitor_box(uint off) const { 281 assert(is_mon(off), "should be called only for monitor edge"); 282 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 283 } is_monitor_use(uint off) const284 bool is_monitor_use(uint off) const { return (is_mon(off) 285 && is_monitor_box(off)) 286 || (caller() && caller()->is_monitor_use(off)); } 287 288 // Initialization functions for the JVM set_locoff(uint off)289 void set_locoff(uint off) { _locoff = off; } set_stkoff(uint off)290 void set_stkoff(uint off) { _stkoff = off; } set_monoff(uint off)291 void set_monoff(uint off) { _monoff = off; } set_scloff(uint off)292 void set_scloff(uint off) { _scloff = off; } set_endoff(uint off)293 void set_endoff(uint off) { _endoff = off; } set_offsets(uint off)294 void set_offsets(uint off) { 295 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 296 } set_map(SafePointNode * map)297 void set_map(SafePointNode *map) { _map = map; } set_sp(uint sp)298 void set_sp(uint sp) { _sp = sp; } 299 // _reexecute is initialized to "undefined" for a new bci set_bci(int bci)300 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } set_should_reexecute(bool reexec)301 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 302 303 // Miscellaneous utility functions 304 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 305 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 306 void set_map_deep(SafePointNode *map);// reset map for all callers 307 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 308 int interpreter_frame_size() const; 309 310 #ifndef PRODUCT 311 void print_method_with_lineno(outputStream* st, bool show_name) const; 312 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 313 void dump_spec(outputStream *st) const; 314 void dump_on(outputStream* st) const; dump() const315 void dump() const { 316 dump_on(tty); 317 } 318 #endif 319 }; 320 321 //------------------------------SafePointNode---------------------------------- 322 // A SafePointNode is a subclass of a MultiNode for convenience (and 323 // potential code sharing) only - conceptually it is independent of 324 // the Node semantics. 325 class SafePointNode : public MultiNode { 326 virtual bool cmp( const Node &n ) const; 327 virtual uint size_of() const; // Size is bigger 328 329 public: SafePointNode(uint edges,JVMState * jvms,const TypePtr * adr_type=NULL)330 SafePointNode(uint edges, JVMState* jvms, 331 // A plain safepoint advertises no memory effects (NULL): 332 const TypePtr* adr_type = NULL) 333 : MultiNode( edges ), 334 _jvms(jvms), 335 _adr_type(adr_type), 336 _has_ea_local_in_scope(false) 337 { 338 init_class_id(Class_SafePoint); 339 } 340 341 JVMState* const _jvms; // Pointer to list of JVM State objects 342 const TypePtr* _adr_type; // What type of memory does this node produce? 343 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 344 bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States 345 346 // Many calls take *all* of memory as input, 347 // but some produce a limited subset of that memory as output. 348 // The adr_type reports the call's behavior as a store, not a load. 349 jvms() const350 virtual JVMState* jvms() const { return _jvms; } set_jvms(JVMState * s)351 void set_jvms(JVMState* s) { 352 *(JVMState**)&_jvms = s; // override const attribute in the accessor 353 } 354 355 private: verify_input(JVMState * jvms,uint idx) const356 void verify_input(JVMState* jvms, uint idx) const { 357 assert(verify_jvms(jvms), "jvms must match"); 358 Node* n = in(idx); 359 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 360 in(idx + 1)->is_top(), "2nd half of long/double"); 361 } 362 363 public: 364 // Functionality from old debug nodes which has changed local(JVMState * jvms,uint idx) const365 Node *local(JVMState* jvms, uint idx) const { 366 verify_input(jvms, jvms->locoff() + idx); 367 return in(jvms->locoff() + idx); 368 } stack(JVMState * jvms,uint idx) const369 Node *stack(JVMState* jvms, uint idx) const { 370 verify_input(jvms, jvms->stkoff() + idx); 371 return in(jvms->stkoff() + idx); 372 } argument(JVMState * jvms,uint idx) const373 Node *argument(JVMState* jvms, uint idx) const { 374 verify_input(jvms, jvms->argoff() + idx); 375 return in(jvms->argoff() + idx); 376 } monitor_box(JVMState * jvms,uint idx) const377 Node *monitor_box(JVMState* jvms, uint idx) const { 378 assert(verify_jvms(jvms), "jvms must match"); 379 return in(jvms->monitor_box_offset(idx)); 380 } monitor_obj(JVMState * jvms,uint idx) const381 Node *monitor_obj(JVMState* jvms, uint idx) const { 382 assert(verify_jvms(jvms), "jvms must match"); 383 return in(jvms->monitor_obj_offset(idx)); 384 } 385 386 void set_local(JVMState* jvms, uint idx, Node *c); 387 set_stack(JVMState * jvms,uint idx,Node * c)388 void set_stack(JVMState* jvms, uint idx, Node *c) { 389 assert(verify_jvms(jvms), "jvms must match"); 390 set_req(jvms->stkoff() + idx, c); 391 } set_argument(JVMState * jvms,uint idx,Node * c)392 void set_argument(JVMState* jvms, uint idx, Node *c) { 393 assert(verify_jvms(jvms), "jvms must match"); 394 set_req(jvms->argoff() + idx, c); 395 } ensure_stack(JVMState * jvms,uint stk_size)396 void ensure_stack(JVMState* jvms, uint stk_size) { 397 assert(verify_jvms(jvms), "jvms must match"); 398 int grow_by = (int)stk_size - (int)jvms->stk_size(); 399 if (grow_by > 0) grow_stack(jvms, grow_by); 400 } 401 void grow_stack(JVMState* jvms, uint grow_by); 402 // Handle monitor stack 403 void push_monitor( const FastLockNode *lock ); 404 void pop_monitor (); 405 Node *peek_monitor_box() const; 406 Node *peek_monitor_obj() const; 407 408 // Access functions for the JVM control() const409 Node *control () const { return in(TypeFunc::Control ); } i_o() const410 Node *i_o () const { return in(TypeFunc::I_O ); } memory() const411 Node *memory () const { return in(TypeFunc::Memory ); } returnadr() const412 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } frameptr() const413 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 414 set_control(Node * c)415 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } set_i_o(Node * c)416 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } set_memory(Node * c)417 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 418 merged_memory() const419 MergeMemNode* merged_memory() const { 420 return in(TypeFunc::Memory)->as_MergeMem(); 421 } 422 423 // The parser marks useless maps as dead when it's done with them: is_killed()424 bool is_killed() { return in(TypeFunc::Control) == NULL; } 425 426 // Exception states bubbling out of subgraphs such as inlined calls 427 // are recorded here. (There might be more than one, hence the "next".) 428 // This feature is used only for safepoints which serve as "maps" 429 // for JVM states during parsing, intrinsic expansion, etc. 430 SafePointNode* next_exception() const; 431 void set_next_exception(SafePointNode* n); has_exceptions() const432 bool has_exceptions() const { return next_exception() != NULL; } 433 434 // Helper methods to operate on replaced nodes replaced_nodes() const435 ReplacedNodes replaced_nodes() const { 436 return _replaced_nodes; 437 } 438 set_replaced_nodes(ReplacedNodes replaced_nodes)439 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 440 _replaced_nodes = replaced_nodes; 441 } 442 clone_replaced_nodes()443 void clone_replaced_nodes() { 444 _replaced_nodes.clone(); 445 } record_replaced_node(Node * initial,Node * improved)446 void record_replaced_node(Node* initial, Node* improved) { 447 _replaced_nodes.record(initial, improved); 448 } transfer_replaced_nodes_from(SafePointNode * sfpt,uint idx=0)449 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 450 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 451 } delete_replaced_nodes()452 void delete_replaced_nodes() { 453 _replaced_nodes.reset(); 454 } apply_replaced_nodes(uint idx)455 void apply_replaced_nodes(uint idx) { 456 _replaced_nodes.apply(this, idx); 457 } merge_replaced_nodes_with(SafePointNode * sfpt)458 void merge_replaced_nodes_with(SafePointNode* sfpt) { 459 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 460 } has_replaced_nodes() const461 bool has_replaced_nodes() const { 462 return !_replaced_nodes.is_empty(); 463 } set_has_ea_local_in_scope(bool b)464 void set_has_ea_local_in_scope(bool b) { 465 _has_ea_local_in_scope = b; 466 } has_ea_local_in_scope() const467 bool has_ea_local_in_scope() const { 468 return _has_ea_local_in_scope; 469 } 470 471 void disconnect_from_root(PhaseIterGVN *igvn); 472 473 // Standard Node stuff 474 virtual int Opcode() const; pinned() const475 virtual bool pinned() const { return true; } 476 virtual const Type* Value(PhaseGVN* phase) const; bottom_type() const477 virtual const Type *bottom_type() const { return Type::CONTROL; } adr_type() const478 virtual const TypePtr *adr_type() const { return _adr_type; } 479 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 480 virtual Node* Identity(PhaseGVN* phase); ideal_reg() const481 virtual uint ideal_reg() const { return 0; } 482 virtual const RegMask &in_RegMask(uint) const; 483 virtual const RegMask &out_RegMask() const; 484 virtual uint match_edge(uint idx) const; 485 486 static bool needs_polling_address_input(); 487 488 #ifndef PRODUCT 489 virtual void dump_spec(outputStream *st) const; 490 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 491 #endif 492 }; 493 494 //------------------------------SafePointScalarObjectNode---------------------- 495 // A SafePointScalarObjectNode represents the state of a scalarized object 496 // at a safepoint. 497 498 class SafePointScalarObjectNode: public TypeNode { 499 uint _first_index; // First input edge relative index of a SafePoint node where 500 // states of the scalarized object fields are collected. 501 // It is relative to the last (youngest) jvms->_scloff. 502 uint _n_fields; // Number of non-static fields of the scalarized object. 503 DEBUG_ONLY(AllocateNode* _alloc;) 504 505 virtual uint hash() const ; // { return NO_HASH; } 506 virtual bool cmp( const Node &n ) const; 507 first_index() const508 uint first_index() const { return _first_index; } 509 510 public: 511 SafePointScalarObjectNode(const TypeOopPtr* tp, 512 #ifdef ASSERT 513 AllocateNode* alloc, 514 #endif 515 uint first_index, uint n_fields); 516 virtual int Opcode() const; 517 virtual uint ideal_reg() const; 518 virtual const RegMask &in_RegMask(uint) const; 519 virtual const RegMask &out_RegMask() const; 520 virtual uint match_edge(uint idx) const; 521 first_index(JVMState * jvms) const522 uint first_index(JVMState* jvms) const { 523 assert(jvms != NULL, "missed JVMS"); 524 return jvms->scloff() + _first_index; 525 } n_fields() const526 uint n_fields() const { return _n_fields; } 527 528 #ifdef ASSERT alloc() const529 AllocateNode* alloc() const { return _alloc; } 530 #endif 531 size_of() const532 virtual uint size_of() const { return sizeof(*this); } 533 534 // Assumes that "this" is an argument to a safepoint node "s", and that 535 // "new_call" is being created to correspond to "s". But the difference 536 // between the start index of the jvmstates of "new_call" and "s" is 537 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 538 // corresponds appropriately to "this" in "new_call". Assumes that 539 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 540 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 541 SafePointScalarObjectNode* clone(Dict* sosn_map, bool& new_node) const; 542 543 #ifndef PRODUCT 544 virtual void dump_spec(outputStream *st) const; 545 #endif 546 }; 547 548 549 // Simple container for the outgoing projections of a call. Useful 550 // for serious surgery on calls. 551 class CallProjections : public StackObj { 552 public: 553 Node* fallthrough_proj; 554 Node* fallthrough_catchproj; 555 Node* fallthrough_memproj; 556 Node* fallthrough_ioproj; 557 Node* catchall_catchproj; 558 Node* catchall_memproj; 559 Node* catchall_ioproj; 560 Node* resproj; 561 Node* exobj; 562 }; 563 564 class CallGenerator; 565 566 //------------------------------CallNode--------------------------------------- 567 // Call nodes now subsume the function of debug nodes at callsites, so they 568 // contain the functionality of a full scope chain of debug nodes. 569 class CallNode : public SafePointNode { 570 friend class VMStructs; 571 572 protected: 573 bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseTransform* phase); 574 575 public: 576 const TypeFunc* _tf; // Function type 577 address _entry_point; // Address of method being called 578 float _cnt; // Estimate of number of times called 579 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 580 const char* _name; // Printable name, if _method is NULL 581 CallNode(const TypeFunc * tf,address addr,const TypePtr * adr_type)582 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 583 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), 584 _tf(tf), 585 _entry_point(addr), 586 _cnt(COUNT_UNKNOWN), 587 _generator(NULL), 588 _name(NULL) 589 { 590 init_class_id(Class_Call); 591 } 592 tf() const593 const TypeFunc* tf() const { return _tf; } entry_point() const594 const address entry_point() const { return _entry_point; } cnt() const595 const float cnt() const { return _cnt; } generator() const596 CallGenerator* generator() const { return _generator; } 597 set_tf(const TypeFunc * tf)598 void set_tf(const TypeFunc* tf) { _tf = tf; } set_entry_point(address p)599 void set_entry_point(address p) { _entry_point = p; } set_cnt(float c)600 void set_cnt(float c) { _cnt = c; } set_generator(CallGenerator * cg)601 void set_generator(CallGenerator* cg) { _generator = cg; } 602 603 virtual const Type* bottom_type() const; 604 virtual const Type* Value(PhaseGVN* phase) const; 605 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); Identity(PhaseGVN * phase)606 virtual Node* Identity(PhaseGVN* phase) { return this; } 607 virtual bool cmp(const Node &n) const; 608 virtual uint size_of() const = 0; 609 virtual void calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const; 610 virtual Node* match(const ProjNode* proj, const Matcher* m); ideal_reg() const611 virtual uint ideal_reg() const { return NotAMachineReg; } 612 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 613 // for some macro nodes whose expansion does not have a safepoint on the fast path. guaranteed_safepoint()614 virtual bool guaranteed_safepoint() { return true; } 615 // For macro nodes, the JVMState gets modified during expansion. If calls 616 // use MachConstantBase, it gets modified during matching. So when cloning 617 // the node the JVMState must be cloned. Default is not to clone. needs_clone_jvms(Compile * C)618 virtual bool needs_clone_jvms(Compile* C) { return C->needs_clone_jvms(); } clone_jvms(Compile * C)619 void clone_jvms(Compile* C) { 620 if ((jvms() != NULL) && needs_clone_jvms(C)) { 621 set_jvms(jvms()->clone_deep(C)); 622 jvms()->set_map_deep(this); 623 } 624 } 625 626 // Returns true if the call may modify n 627 virtual bool may_modify(const TypeOopPtr* t_oop, PhaseTransform* phase); 628 // Does this node have a use of n other than in debug information? 629 bool has_non_debug_use(Node* n); 630 // Returns the unique CheckCastPP of a call 631 // or result projection is there are several CheckCastPP 632 // or returns NULL if there is no one. 633 Node* result_cast(); 634 // Does this node returns pointer? returns_pointer() const635 bool returns_pointer() const { 636 const TypeTuple* r = tf()->range(); 637 return (r->cnt() > TypeFunc::Parms && 638 r->field_at(TypeFunc::Parms)->isa_ptr()); 639 } 640 641 // Collect all the interesting edges from a call for use in 642 // replacing the call by something else. Used by macro expansion 643 // and the late inlining support. 644 void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true); 645 646 virtual uint match_edge(uint idx) const; 647 648 bool is_call_to_arraycopystub() const; 649 copy_call_debug_info(PhaseIterGVN * phase,SafePointNode * sfpt)650 virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {} 651 652 #ifndef PRODUCT 653 virtual void dump_req(outputStream* st = tty) const; 654 virtual void dump_spec(outputStream* st) const; 655 #endif 656 }; 657 658 659 //------------------------------CallJavaNode----------------------------------- 660 // Make a static or dynamic subroutine call node using Java calling 661 // convention. (The "Java" calling convention is the compiler's calling 662 // convention, as opposed to the interpreter's or that of native C.) 663 class CallJavaNode : public CallNode { 664 friend class VMStructs; 665 protected: 666 virtual bool cmp( const Node &n ) const; 667 virtual uint size_of() const; // Size is bigger 668 669 bool _optimized_virtual; 670 bool _method_handle_invoke; 671 bool _override_symbolic_info; // Override symbolic call site info from bytecode 672 ciMethod* _method; // Method being direct called 673 bool _arg_escape; // ArgEscape in parameter list 674 public: 675 const int _bci; // Byte Code Index of call byte code CallJavaNode(const TypeFunc * tf,address addr,ciMethod * method,int bci)676 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 677 : CallNode(tf, addr, TypePtr::BOTTOM), 678 _optimized_virtual(false), 679 _method_handle_invoke(false), 680 _override_symbolic_info(false), 681 _method(method), 682 _arg_escape(false), _bci(bci) 683 { 684 init_class_id(Class_CallJava); 685 } 686 687 virtual int Opcode() const; method() const688 ciMethod* method() const { return _method; } set_method(ciMethod * m)689 void set_method(ciMethod *m) { _method = m; } set_optimized_virtual(bool f)690 void set_optimized_virtual(bool f) { _optimized_virtual = f; } is_optimized_virtual() const691 bool is_optimized_virtual() const { return _optimized_virtual; } set_method_handle_invoke(bool f)692 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } is_method_handle_invoke() const693 bool is_method_handle_invoke() const { return _method_handle_invoke; } set_override_symbolic_info(bool f)694 void set_override_symbolic_info(bool f) { _override_symbolic_info = f; } override_symbolic_info() const695 bool override_symbolic_info() const { return _override_symbolic_info; } set_arg_escape(bool f)696 void set_arg_escape(bool f) { _arg_escape = f; } arg_escape() const697 bool arg_escape() const { return _arg_escape; } 698 void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt); 699 700 DEBUG_ONLY( bool validate_symbolic_info() const; ) 701 702 #ifndef PRODUCT 703 virtual void dump_spec(outputStream *st) const; 704 virtual void dump_compact_spec(outputStream *st) const; 705 #endif 706 }; 707 708 //------------------------------CallStaticJavaNode----------------------------- 709 // Make a direct subroutine call using Java calling convention (for static 710 // calls and optimized virtual calls, plus calls to wrappers for run-time 711 // routines); generates static stub. 712 class CallStaticJavaNode : public CallJavaNode { 713 virtual bool cmp( const Node &n ) const; 714 virtual uint size_of() const; // Size is bigger 715 public: CallStaticJavaNode(Compile * C,const TypeFunc * tf,address addr,ciMethod * method,int bci)716 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) 717 : CallJavaNode(tf, addr, method, bci) { 718 init_class_id(Class_CallStaticJava); 719 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 720 init_flags(Flag_is_macro); 721 C->add_macro_node(this); 722 } 723 } CallStaticJavaNode(const TypeFunc * tf,address addr,const char * name,int bci,const TypePtr * adr_type)724 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 725 const TypePtr* adr_type) 726 : CallJavaNode(tf, addr, NULL, bci) { 727 init_class_id(Class_CallStaticJava); 728 // This node calls a runtime stub, which often has narrow memory effects. 729 _adr_type = adr_type; 730 _name = name; 731 } 732 733 // If this is an uncommon trap, return the request code, else zero. 734 int uncommon_trap_request() const; 735 static int extract_uncommon_trap_request(const Node* call); 736 is_boxing_method() const737 bool is_boxing_method() const { 738 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 739 } 740 // Late inlining modifies the JVMState, so we need to clone it 741 // when the call node is cloned (because it is macro node). needs_clone_jvms(Compile * C)742 virtual bool needs_clone_jvms(Compile* C) { 743 return is_boxing_method() || CallNode::needs_clone_jvms(C); 744 } 745 746 virtual int Opcode() const; 747 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 748 749 #ifndef PRODUCT 750 virtual void dump_spec(outputStream *st) const; 751 virtual void dump_compact_spec(outputStream *st) const; 752 #endif 753 }; 754 755 //------------------------------CallDynamicJavaNode---------------------------- 756 // Make a dispatched call using Java calling convention. 757 class CallDynamicJavaNode : public CallJavaNode { 758 virtual bool cmp( const Node &n ) const; 759 virtual uint size_of() const; // Size is bigger 760 public: CallDynamicJavaNode(const TypeFunc * tf,address addr,ciMethod * method,int vtable_index,int bci)761 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 762 init_class_id(Class_CallDynamicJava); 763 } 764 765 // Late inlining modifies the JVMState, so we need to clone it 766 // when the call node is cloned. needs_clone_jvms(Compile * C)767 virtual bool needs_clone_jvms(Compile* C) { 768 return IncrementalInlineVirtual || CallNode::needs_clone_jvms(C); 769 } 770 771 int _vtable_index; 772 virtual int Opcode() const; 773 virtual Node* Ideal(PhaseGVN* phase, bool can_reshape); 774 #ifndef PRODUCT 775 virtual void dump_spec(outputStream *st) const; 776 #endif 777 }; 778 779 //------------------------------CallRuntimeNode-------------------------------- 780 // Make a direct subroutine call node into compiled C++ code. 781 class CallRuntimeNode : public CallNode { 782 virtual bool cmp( const Node &n ) const; 783 virtual uint size_of() const; // Size is bigger 784 public: CallRuntimeNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)785 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 786 const TypePtr* adr_type) 787 : CallNode(tf, addr, adr_type) 788 { 789 init_class_id(Class_CallRuntime); 790 _name = name; 791 } 792 793 virtual int Opcode() const; 794 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 795 796 #ifndef PRODUCT 797 virtual void dump_spec(outputStream *st) const; 798 #endif 799 }; 800 801 //------------------------------CallLeafNode----------------------------------- 802 // Make a direct subroutine call node into compiled C++ code, without 803 // safepoints 804 class CallLeafNode : public CallRuntimeNode { 805 public: CallLeafNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)806 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 807 const TypePtr* adr_type) 808 : CallRuntimeNode(tf, addr, name, adr_type) 809 { 810 init_class_id(Class_CallLeaf); 811 } 812 virtual int Opcode() const; guaranteed_safepoint()813 virtual bool guaranteed_safepoint() { return false; } 814 #ifndef PRODUCT 815 virtual void dump_spec(outputStream *st) const; 816 #endif 817 }; 818 819 //------------------------------CallNativeNode----------------------------------- 820 // Make a direct call into a foreign function with an arbitrary ABI 821 // safepoints 822 class CallNativeNode : public CallNode { 823 friend class MachCallNativeNode; 824 virtual bool cmp( const Node &n ) const; 825 virtual uint size_of() const; 826 static void print_regs(const GrowableArray<VMReg>& regs, outputStream* st); 827 public: 828 GrowableArray<VMReg> _arg_regs; 829 GrowableArray<VMReg> _ret_regs; 830 const int _shadow_space_bytes; 831 const bool _need_transition; 832 CallNativeNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type,const GrowableArray<VMReg> & arg_regs,const GrowableArray<VMReg> & ret_regs,int shadow_space_bytes,bool need_transition)833 CallNativeNode(const TypeFunc* tf, address addr, const char* name, 834 const TypePtr* adr_type, 835 const GrowableArray<VMReg>& arg_regs, 836 const GrowableArray<VMReg>& ret_regs, 837 int shadow_space_bytes, 838 bool need_transition) 839 : CallNode(tf, addr, adr_type), _arg_regs(arg_regs), 840 _ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes), 841 _need_transition(need_transition) 842 { 843 init_class_id(Class_CallNative); 844 _name = name; 845 } 846 virtual int Opcode() const; guaranteed_safepoint()847 virtual bool guaranteed_safepoint() { return _need_transition; } 848 virtual Node* match(const ProjNode *proj, const Matcher *m); 849 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 850 #ifndef PRODUCT 851 virtual void dump_spec(outputStream *st) const; 852 #endif 853 }; 854 855 //------------------------------CallLeafNoFPNode------------------------------- 856 // CallLeafNode, not using floating point or using it in the same manner as 857 // the generated code 858 class CallLeafNoFPNode : public CallLeafNode { 859 public: CallLeafNoFPNode(const TypeFunc * tf,address addr,const char * name,const TypePtr * adr_type)860 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 861 const TypePtr* adr_type) 862 : CallLeafNode(tf, addr, name, adr_type) 863 { 864 init_class_id(Class_CallLeafNoFP); 865 } 866 virtual int Opcode() const; 867 }; 868 869 870 //------------------------------Allocate--------------------------------------- 871 // High-level memory allocation 872 // 873 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 874 // get expanded into a code sequence containing a call. Unlike other CallNodes, 875 // they have 2 memory projections and 2 i_o projections (which are distinguished by 876 // the _is_io_use flag in the projection.) This is needed when expanding the node in 877 // order to differentiate the uses of the projection on the normal control path from 878 // those on the exception return path. 879 // 880 class AllocateNode : public CallNode { 881 public: 882 enum { 883 // Output: 884 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 885 // Inputs: 886 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 887 KlassNode, // type (maybe dynamic) of the obj. 888 InitialTest, // slow-path test (may be constant) 889 ALength, // array length (or TOP if none) 890 ParmLimit 891 }; 892 alloc_type(const Type * t)893 static const TypeFunc* alloc_type(const Type* t) { 894 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 895 fields[AllocSize] = TypeInt::POS; 896 fields[KlassNode] = TypeInstPtr::NOTNULL; 897 fields[InitialTest] = TypeInt::BOOL; 898 fields[ALength] = t; // length (can be a bad length) 899 900 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 901 902 // create result type (range) 903 fields = TypeTuple::fields(1); 904 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 905 906 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 907 908 return TypeFunc::make(domain, range); 909 } 910 911 // Result of Escape Analysis 912 bool _is_scalar_replaceable; 913 bool _is_non_escaping; 914 // True when MemBar for new is redundant with MemBar at initialzer exit 915 bool _is_allocation_MemBar_redundant; 916 917 virtual uint size_of() const; // Size is bigger 918 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 919 Node *size, Node *klass_node, Node *initial_test); 920 // Expansion modifies the JVMState, so we need to clone it needs_clone_jvms(Compile * C)921 virtual bool needs_clone_jvms(Compile* C) { return true; } 922 virtual int Opcode() const; ideal_reg() const923 virtual uint ideal_reg() const { return Op_RegP; } guaranteed_safepoint()924 virtual bool guaranteed_safepoint() { return false; } 925 926 // allocations do not modify their arguments may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)927 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 928 929 // Pattern-match a possible usage of AllocateNode. 930 // Return null if no allocation is recognized. 931 // The operand is the pointer produced by the (possible) allocation. 932 // It must be a projection of the Allocate or its subsequent CastPP. 933 // (Note: This function is defined in file graphKit.cpp, near 934 // GraphKit::new_instance/new_array, whose output it recognizes.) 935 // The 'ptr' may not have an offset unless the 'offset' argument is given. 936 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 937 938 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 939 // an offset, which is reported back to the caller. 940 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 941 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 942 intptr_t& offset); 943 944 // Dig the klass operand out of a (possible) allocation site. Ideal_klass(Node * ptr,PhaseTransform * phase)945 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 946 AllocateNode* allo = Ideal_allocation(ptr, phase); 947 return (allo == NULL) ? NULL : allo->in(KlassNode); 948 } 949 950 // Conservatively small estimate of offset of first non-header byte. minimum_header_size()951 int minimum_header_size() { 952 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 953 instanceOopDesc::base_offset_in_bytes(); 954 } 955 956 // Return the corresponding initialization barrier (or null if none). 957 // Walks out edges to find it... 958 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 959 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 960 InitializeNode* initialization(); 961 962 // Convenience for initialization->maybe_set_complete(phase) 963 bool maybe_set_complete(PhaseGVN* phase); 964 965 // Return true if allocation doesn't escape thread, its escape state 966 // needs be noEscape or ArgEscape. InitializeNode._does_not_escape 967 // is true when its allocation's escape state is noEscape or 968 // ArgEscape. In case allocation's InitializeNode is NULL, check 969 // AlllocateNode._is_non_escaping flag. 970 // AlllocateNode._is_non_escaping is true when its escape state is 971 // noEscape. does_not_escape_thread()972 bool does_not_escape_thread() { 973 InitializeNode* init = NULL; 974 return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape()); 975 } 976 977 // If object doesn't escape in <.init> method and there is memory barrier 978 // inserted at exit of its <.init>, memory barrier for new is not necessary. 979 // Inovke this method when MemBar at exit of initializer and post-dominate 980 // allocation node. 981 void compute_MemBar_redundancy(ciMethod* initializer); is_allocation_MemBar_redundant()982 bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; } 983 984 Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem); 985 }; 986 987 //------------------------------AllocateArray--------------------------------- 988 // 989 // High-level array allocation 990 // 991 class AllocateArrayNode : public AllocateNode { 992 public: AllocateArrayNode(Compile * C,const TypeFunc * atype,Node * ctrl,Node * mem,Node * abio,Node * size,Node * klass_node,Node * initial_test,Node * count_val)993 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 994 Node* size, Node* klass_node, Node* initial_test, 995 Node* count_val 996 ) 997 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 998 initial_test) 999 { 1000 init_class_id(Class_AllocateArray); 1001 set_req(AllocateNode::ALength, count_val); 1002 } 1003 virtual int Opcode() const; 1004 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1005 1006 // Dig the length operand out of a array allocation site. Ideal_length()1007 Node* Ideal_length() { 1008 return in(AllocateNode::ALength); 1009 } 1010 1011 // Dig the length operand out of a array allocation site and narrow the 1012 // type with a CastII, if necesssary 1013 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 1014 1015 // Pattern-match a possible usage of AllocateArrayNode. 1016 // Return null if no allocation is recognized. Ideal_array_allocation(Node * ptr,PhaseTransform * phase)1017 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 1018 AllocateNode* allo = Ideal_allocation(ptr, phase); 1019 return (allo == NULL || !allo->is_AllocateArray()) 1020 ? NULL : allo->as_AllocateArray(); 1021 } 1022 }; 1023 1024 //------------------------------AbstractLockNode----------------------------------- 1025 class AbstractLockNode: public CallNode { 1026 private: 1027 enum { 1028 Regular = 0, // Normal lock 1029 NonEscObj, // Lock is used for non escaping object 1030 Coarsened, // Lock was coarsened 1031 Nested // Nested lock 1032 } _kind; 1033 #ifndef PRODUCT 1034 NamedCounter* _counter; 1035 static const char* _kind_names[Nested+1]; 1036 #endif 1037 1038 protected: 1039 // helper functions for lock elimination 1040 // 1041 1042 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 1043 GrowableArray<AbstractLockNode*> &lock_ops); 1044 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1045 GrowableArray<AbstractLockNode*> &lock_ops); 1046 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1047 GrowableArray<AbstractLockNode*> &lock_ops); 1048 LockNode *find_matching_lock(UnlockNode* unlock); 1049 1050 // Update the counter to indicate that this lock was eliminated. 1051 void set_eliminated_lock_counter() PRODUCT_RETURN; 1052 1053 public: AbstractLockNode(const TypeFunc * tf)1054 AbstractLockNode(const TypeFunc *tf) 1055 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 1056 _kind(Regular) 1057 { 1058 #ifndef PRODUCT 1059 _counter = NULL; 1060 #endif 1061 } 1062 virtual int Opcode() const = 0; obj_node() const1063 Node * obj_node() const {return in(TypeFunc::Parms + 0); } box_node() const1064 Node * box_node() const {return in(TypeFunc::Parms + 1); } fastlock_node() const1065 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } set_box_node(Node * box)1066 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 1067 sub(const Type * t1,const Type * t2) const1068 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 1069 size_of() const1070 virtual uint size_of() const { return sizeof(*this); } 1071 is_eliminated() const1072 bool is_eliminated() const { return (_kind != Regular); } is_non_esc_obj() const1073 bool is_non_esc_obj() const { return (_kind == NonEscObj); } is_coarsened() const1074 bool is_coarsened() const { return (_kind == Coarsened); } is_nested() const1075 bool is_nested() const { return (_kind == Nested); } 1076 1077 const char * kind_as_string() const; 1078 void log_lock_optimization(Compile* c, const char * tag) const; 1079 set_non_esc_obj()1080 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } set_coarsened()1081 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } set_nested()1082 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 1083 1084 // locking does not modify its arguments may_modify(const TypeOopPtr * t_oop,PhaseTransform * phase)1085 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 1086 1087 #ifndef PRODUCT 1088 void create_lock_counter(JVMState* s); counter() const1089 NamedCounter* counter() const { return _counter; } 1090 virtual void dump_spec(outputStream* st) const; 1091 virtual void dump_compact_spec(outputStream* st) const; 1092 virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; 1093 #endif 1094 }; 1095 1096 //------------------------------Lock--------------------------------------- 1097 // High-level lock operation 1098 // 1099 // This is a subclass of CallNode because it is a macro node which gets expanded 1100 // into a code sequence containing a call. This node takes 3 "parameters": 1101 // 0 - object to lock 1102 // 1 - a BoxLockNode 1103 // 2 - a FastLockNode 1104 // 1105 class LockNode : public AbstractLockNode { 1106 public: 1107 lock_type()1108 static const TypeFunc *lock_type() { 1109 // create input type (domain) 1110 const Type **fields = TypeTuple::fields(3); 1111 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1112 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1113 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1114 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1115 1116 // create result type (range) 1117 fields = TypeTuple::fields(0); 1118 1119 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1120 1121 return TypeFunc::make(domain,range); 1122 } 1123 1124 virtual int Opcode() const; 1125 virtual uint size_of() const; // Size is bigger LockNode(Compile * C,const TypeFunc * tf)1126 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1127 init_class_id(Class_Lock); 1128 init_flags(Flag_is_macro); 1129 C->add_macro_node(this); 1130 } guaranteed_safepoint()1131 virtual bool guaranteed_safepoint() { return false; } 1132 1133 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1134 // Expansion modifies the JVMState, so we need to clone it needs_clone_jvms(Compile * C)1135 virtual bool needs_clone_jvms(Compile* C) { return true; } 1136 1137 bool is_nested_lock_region(); // Is this Lock nested? 1138 bool is_nested_lock_region(Compile * c); // Why isn't this Lock nested? 1139 }; 1140 1141 //------------------------------Unlock--------------------------------------- 1142 // High-level unlock operation 1143 class UnlockNode : public AbstractLockNode { 1144 private: 1145 #ifdef ASSERT 1146 JVMState* const _dbg_jvms; // Pointer to list of JVM State objects 1147 #endif 1148 public: 1149 virtual int Opcode() const; 1150 virtual uint size_of() const; // Size is bigger UnlockNode(Compile * C,const TypeFunc * tf)1151 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) 1152 #ifdef ASSERT 1153 , _dbg_jvms(NULL) 1154 #endif 1155 { 1156 init_class_id(Class_Unlock); 1157 init_flags(Flag_is_macro); 1158 C->add_macro_node(this); 1159 } 1160 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1161 // unlock is never a safepoint guaranteed_safepoint()1162 virtual bool guaranteed_safepoint() { return false; } 1163 #ifdef ASSERT set_dbg_jvms(JVMState * s)1164 void set_dbg_jvms(JVMState* s) { 1165 *(JVMState**)&_dbg_jvms = s; // override const attribute in the accessor 1166 } dbg_jvms() const1167 JVMState* dbg_jvms() const { return _dbg_jvms; } 1168 #else dbg_jvms() const1169 JVMState* dbg_jvms() const { return NULL; } 1170 #endif 1171 }; 1172 #endif // SHARE_OPTO_CALLNODE_HPP 1173