1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MEMNODE_HPP 26 #define SHARE_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 bool _unsafe_access; // Access of unsafe origin. 46 uint8_t _barrier_data; // Bit field with barrier information 47 48 protected: 49 #ifdef ASSERT 50 const TypePtr* _adr_type; // What kind of memory is being addressed? 51 #endif 52 virtual uint size_of() const; 53 public: 54 enum { Control, // When is it safe to do this load? 55 Memory, // Chunk of memory is being loaded from 56 Address, // Actually address, derived from base 57 ValueIn, // Value to store 58 OopStore // Preceeding oop store, only in StoreCM 59 }; 60 typedef enum { unordered = 0, 61 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 62 release, // Store has to release or be preceded by MemBarRelease. 63 seqcst, // LoadStore has to have both acquire and release semantics. 64 unset // The memory ordering is not set (used for testing) 65 } MemOrd; 66 protected: MemNode(Node * c0,Node * c1,Node * c2,const TypePtr * at)67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) : 68 Node(c0,c1,c2), 69 _unaligned_access(false), 70 _mismatched_access(false), 71 _unsafe_access(false), 72 _barrier_data(0) { 73 init_class_id(Class_Mem); 74 debug_only(_adr_type=at; adr_type();) 75 } MemNode(Node * c0,Node * c1,Node * c2,const TypePtr * at,Node * c3)76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : 77 Node(c0,c1,c2,c3), 78 _unaligned_access(false), 79 _mismatched_access(false), 80 _unsafe_access(false), 81 _barrier_data(0) { 82 init_class_id(Class_Mem); 83 debug_only(_adr_type=at; adr_type();) 84 } MemNode(Node * c0,Node * c1,Node * c2,const TypePtr * at,Node * c3,Node * c4)85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : 86 Node(c0,c1,c2,c3,c4), 87 _unaligned_access(false), 88 _mismatched_access(false), 89 _unsafe_access(false), 90 _barrier_data(0) { 91 init_class_id(Class_Mem); 92 debug_only(_adr_type=at; adr_type();) 93 } 94 find_previous_arraycopy(PhaseTransform * phase,Node * ld_alloc,Node * & mem,bool can_see_stored_value) const95 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 96 ArrayCopyNode* find_array_copy_clone(PhaseTransform* phase, Node* ld_alloc, Node* mem) const; 97 static bool check_if_adr_maybe_raw(Node* adr); 98 99 public: 100 // Helpers for the optimizer. Documented in memnode.cpp. 101 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 102 Node* p2, AllocateNode* a2, 103 PhaseTransform* phase); 104 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 105 106 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 107 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 108 // This one should probably be a phase-specific function: 109 static bool all_controls_dominate(Node* dom, Node* sub); 110 111 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 112 113 // Shared code for Ideal methods: 114 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 115 116 // Helper function for adr_type() implementations. 117 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 118 119 // Raw access function, to allow copying of adr_type efficiently in 120 // product builds and retain the debug info for debug builds. raw_adr_type() const121 const TypePtr *raw_adr_type() const { 122 #ifdef ASSERT 123 return _adr_type; 124 #else 125 return 0; 126 #endif 127 } 128 129 // Map a load or store opcode to its corresponding store opcode. 130 // (Return -1 if unknown.) store_Opcode() const131 virtual int store_Opcode() const { return -1; } 132 133 // What is the type of the value in memory? (T_VOID mean "unspecified".) 134 virtual BasicType memory_type() const = 0; memory_size() const135 virtual int memory_size() const { 136 #ifdef ASSERT 137 return type2aelembytes(memory_type(), true); 138 #else 139 return type2aelembytes(memory_type()); 140 #endif 141 } 142 barrier_data()143 uint8_t barrier_data() { return _barrier_data; } set_barrier_data(uint8_t barrier_data)144 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 145 146 // Search through memory states which precede this node (load or store). 147 // Look for an exact match for the address, with no intervening 148 // aliased stores. 149 Node* find_previous_store(PhaseTransform* phase); 150 151 // Can this node (load or store) accurately see a stored value in 152 // the given memory state? (The state may or may not be in(Memory).) 153 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 154 set_unaligned_access()155 void set_unaligned_access() { _unaligned_access = true; } is_unaligned_access() const156 bool is_unaligned_access() const { return _unaligned_access; } set_mismatched_access()157 void set_mismatched_access() { _mismatched_access = true; } is_mismatched_access() const158 bool is_mismatched_access() const { return _mismatched_access; } set_unsafe_access()159 void set_unsafe_access() { _unsafe_access = true; } is_unsafe_access() const160 bool is_unsafe_access() const { return _unsafe_access; } 161 162 #ifndef PRODUCT 163 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 164 virtual void dump_spec(outputStream *st) const; 165 #endif 166 }; 167 168 //------------------------------LoadNode--------------------------------------- 169 // Load value; requires Memory and Address 170 class LoadNode : public MemNode { 171 public: 172 // Some loads (from unsafe) should be pinned: they don't depend only 173 // on the dominating test. The field _control_dependency below records 174 // whether that node depends only on the dominating test. 175 // Pinned and UnknownControl are similar, but differ in that Pinned 176 // loads are not allowed to float across safepoints, whereas UnknownControl 177 // loads are allowed to do that. Therefore, Pinned is stricter. 178 enum ControlDependency { 179 Pinned, 180 UnknownControl, 181 DependsOnlyOnTest 182 }; 183 184 private: 185 // LoadNode::hash() doesn't take the _control_dependency field 186 // into account: If the graph already has a non-pinned LoadNode and 187 // we add a pinned LoadNode with the same inputs, it's safe for GVN 188 // to replace the pinned LoadNode with the non-pinned LoadNode, 189 // otherwise it wouldn't be safe to have a non pinned LoadNode with 190 // those inputs in the first place. If the graph already has a 191 // pinned LoadNode and we add a non pinned LoadNode with the same 192 // inputs, it's safe (but suboptimal) for GVN to replace the 193 // non-pinned LoadNode by the pinned LoadNode. 194 ControlDependency _control_dependency; 195 196 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 197 // loads that can be reordered, and such requiring acquire semantics to 198 // adhere to the Java specification. The required behaviour is stored in 199 // this field. 200 const MemOrd _mo; 201 202 AllocateNode* is_new_object_mark_load(PhaseGVN *phase) const; 203 204 protected: 205 virtual bool cmp(const Node &n) const; 206 virtual uint size_of() const; // Size is bigger 207 // Should LoadNode::Ideal() attempt to remove control edges? 208 virtual bool can_remove_control() const; 209 const Type* const _type; // What kind of value is loaded? 210 211 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 212 public: 213 LoadNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * rt,MemOrd mo,ControlDependency control_dependency)214 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 215 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { 216 init_class_id(Class_Load); 217 } is_unordered() const218 inline bool is_unordered() const { return !is_acquire(); } is_acquire() const219 inline bool is_acquire() const { 220 assert(_mo == unordered || _mo == acquire, "unexpected"); 221 return _mo == acquire; 222 } is_unsigned() const223 inline bool is_unsigned() const { 224 int lop = Opcode(); 225 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 226 } 227 228 // Polymorphic factory method: 229 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 230 const TypePtr* at, const Type *rt, BasicType bt, 231 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 232 bool unaligned = false, bool mismatched = false, bool unsafe = false, 233 uint8_t barrier_data = 0); 234 235 virtual uint hash() const; // Check the type 236 237 // Handle algebraic identities here. If we have an identity, return the Node 238 // we are equivalent to. We look for Load of a Store. 239 virtual Node* Identity(PhaseGVN* phase); 240 241 // If the load is from Field memory and the pointer is non-null, it might be possible to 242 // zero out the control input. 243 // If the offset is constant and the base is an object allocation, 244 // try to hook me up to the exact initializing store. 245 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 246 247 // Split instance field load through Phi. 248 Node* split_through_phi(PhaseGVN *phase); 249 250 // Recover original value from boxed values 251 Node *eliminate_autobox(PhaseGVN *phase); 252 253 // Compute a new Type for this node. Basically we just do the pre-check, 254 // then call the virtual add() to set the type. 255 virtual const Type* Value(PhaseGVN* phase) const; 256 257 // Common methods for LoadKlass and LoadNKlass nodes. 258 const Type* klass_value_common(PhaseGVN* phase) const; 259 Node* klass_identity_common(PhaseGVN* phase); 260 261 virtual uint ideal_reg() const; 262 virtual const Type *bottom_type() const; 263 // Following method is copied from TypeNode: set_type(const Type * t)264 void set_type(const Type* t) { 265 assert(t != NULL, "sanity"); 266 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 267 *(const Type**)&_type = t; // cast away const-ness 268 // If this node is in the hash table, make sure it doesn't need a rehash. 269 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 270 } type() const271 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 272 273 // Do not match memory edge 274 virtual uint match_edge(uint idx) const; 275 276 // Map a load opcode to its corresponding store opcode. 277 virtual int store_Opcode() const = 0; 278 279 // Check if the load's memory input is a Phi node with the same control. 280 bool is_instance_field_load_with_local_phi(Node* ctrl); 281 282 Node* convert_to_unsigned_load(PhaseGVN& gvn); 283 Node* convert_to_signed_load(PhaseGVN& gvn); 284 285 bool has_reinterpret_variant(const Type* rt); 286 Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt); 287 pin()288 void pin() { _control_dependency = Pinned; } has_unknown_control_dependency() const289 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 290 291 #ifndef PRODUCT 292 virtual void dump_spec(outputStream *st) const; 293 #endif 294 #ifdef ASSERT 295 // Helper function to allow a raw load without control edge for some cases 296 static bool is_immutable_value(Node* adr); 297 #endif 298 protected: 299 const Type* load_array_final_field(const TypeKlassPtr *tkls, 300 ciKlass* klass) const; 301 302 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 303 304 // depends_only_on_test is almost always true, and needs to be almost always 305 // true to enable key hoisting & commoning optimizations. However, for the 306 // special case of RawPtr loads from TLS top & end, and other loads performed by 307 // GC barriers, the control edge carries the dependence preventing hoisting past 308 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 309 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 310 // which produce results (new raw memory state) inside of loops preventing all 311 // manner of other optimizations). Basically, it's ugly but so is the alternative. 312 // See comment in macro.cpp, around line 125 expand_allocate_common(). depends_only_on_test() const313 virtual bool depends_only_on_test() const { 314 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 315 } 316 }; 317 318 //------------------------------LoadBNode-------------------------------------- 319 // Load a byte (8bits signed) from memory 320 class LoadBNode : public LoadNode { 321 public: LoadBNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)322 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 323 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 324 virtual int Opcode() const; ideal_reg() const325 virtual uint ideal_reg() const { return Op_RegI; } 326 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 327 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const328 virtual int store_Opcode() const { return Op_StoreB; } memory_type() const329 virtual BasicType memory_type() const { return T_BYTE; } 330 }; 331 332 //------------------------------LoadUBNode------------------------------------- 333 // Load a unsigned byte (8bits unsigned) from memory 334 class LoadUBNode : public LoadNode { 335 public: LoadUBNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)336 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 337 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 338 virtual int Opcode() const; ideal_reg() const339 virtual uint ideal_reg() const { return Op_RegI; } 340 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 341 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const342 virtual int store_Opcode() const { return Op_StoreB; } memory_type() const343 virtual BasicType memory_type() const { return T_BYTE; } 344 }; 345 346 //------------------------------LoadUSNode------------------------------------- 347 // Load an unsigned short/char (16bits unsigned) from memory 348 class LoadUSNode : public LoadNode { 349 public: LoadUSNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)350 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 351 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 352 virtual int Opcode() const; ideal_reg() const353 virtual uint ideal_reg() const { return Op_RegI; } 354 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 355 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const356 virtual int store_Opcode() const { return Op_StoreC; } memory_type() const357 virtual BasicType memory_type() const { return T_CHAR; } 358 }; 359 360 //------------------------------LoadSNode-------------------------------------- 361 // Load a short (16bits signed) from memory 362 class LoadSNode : public LoadNode { 363 public: LoadSNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)364 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 365 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 366 virtual int Opcode() const; ideal_reg() const367 virtual uint ideal_reg() const { return Op_RegI; } 368 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 369 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const370 virtual int store_Opcode() const { return Op_StoreC; } memory_type() const371 virtual BasicType memory_type() const { return T_SHORT; } 372 }; 373 374 //------------------------------LoadINode-------------------------------------- 375 // Load an integer from memory 376 class LoadINode : public LoadNode { 377 public: LoadINode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)378 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 379 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 380 virtual int Opcode() const; ideal_reg() const381 virtual uint ideal_reg() const { return Op_RegI; } store_Opcode() const382 virtual int store_Opcode() const { return Op_StoreI; } memory_type() const383 virtual BasicType memory_type() const { return T_INT; } 384 }; 385 386 //------------------------------LoadRangeNode---------------------------------- 387 // Load an array length from the array 388 class LoadRangeNode : public LoadINode { 389 public: LoadRangeNode(Node * c,Node * mem,Node * adr,const TypeInt * ti=TypeInt::POS)390 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 391 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 392 virtual int Opcode() const; 393 virtual const Type* Value(PhaseGVN* phase) const; 394 virtual Node* Identity(PhaseGVN* phase); 395 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 396 }; 397 398 //------------------------------LoadLNode-------------------------------------- 399 // Load a long from memory 400 class LoadLNode : public LoadNode { hash() const401 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } cmp(const Node & n) const402 virtual bool cmp( const Node &n ) const { 403 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 404 && LoadNode::cmp(n); 405 } size_of() const406 virtual uint size_of() const { return sizeof(*this); } 407 const bool _require_atomic_access; // is piecewise load forbidden? 408 409 public: LoadLNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeLong * tl,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest,bool require_atomic_access=false)410 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 411 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 412 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 413 virtual int Opcode() const; ideal_reg() const414 virtual uint ideal_reg() const { return Op_RegL; } store_Opcode() const415 virtual int store_Opcode() const { return Op_StoreL; } memory_type() const416 virtual BasicType memory_type() const { return T_LONG; } require_atomic_access() const417 bool require_atomic_access() const { return _require_atomic_access; } 418 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 419 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 420 bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0); 421 #ifndef PRODUCT dump_spec(outputStream * st) const422 virtual void dump_spec(outputStream *st) const { 423 LoadNode::dump_spec(st); 424 if (_require_atomic_access) st->print(" Atomic!"); 425 } 426 #endif 427 }; 428 429 //------------------------------LoadL_unalignedNode---------------------------- 430 // Load a long from unaligned memory 431 class LoadL_unalignedNode : public LoadLNode { 432 public: LoadL_unalignedNode(Node * c,Node * mem,Node * adr,const TypePtr * at,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)433 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 434 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 435 virtual int Opcode() const; 436 }; 437 438 //------------------------------LoadFNode-------------------------------------- 439 // Load a float (64 bits) from memory 440 class LoadFNode : public LoadNode { 441 public: LoadFNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)442 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 443 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 444 virtual int Opcode() const; ideal_reg() const445 virtual uint ideal_reg() const { return Op_RegF; } store_Opcode() const446 virtual int store_Opcode() const { return Op_StoreF; } memory_type() const447 virtual BasicType memory_type() const { return T_FLOAT; } 448 }; 449 450 //------------------------------LoadDNode-------------------------------------- 451 // Load a double (64 bits) from memory 452 class LoadDNode : public LoadNode { hash() const453 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } cmp(const Node & n) const454 virtual bool cmp( const Node &n ) const { 455 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 456 && LoadNode::cmp(n); 457 } size_of() const458 virtual uint size_of() const { return sizeof(*this); } 459 const bool _require_atomic_access; // is piecewise load forbidden? 460 461 public: LoadDNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest,bool require_atomic_access=false)462 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 463 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 464 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 465 virtual int Opcode() const; ideal_reg() const466 virtual uint ideal_reg() const { return Op_RegD; } store_Opcode() const467 virtual int store_Opcode() const { return Op_StoreD; } memory_type() const468 virtual BasicType memory_type() const { return T_DOUBLE; } require_atomic_access() const469 bool require_atomic_access() const { return _require_atomic_access; } 470 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 471 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 472 bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0); 473 #ifndef PRODUCT dump_spec(outputStream * st) const474 virtual void dump_spec(outputStream *st) const { 475 LoadNode::dump_spec(st); 476 if (_require_atomic_access) st->print(" Atomic!"); 477 } 478 #endif 479 }; 480 481 //------------------------------LoadD_unalignedNode---------------------------- 482 // Load a double from unaligned memory 483 class LoadD_unalignedNode : public LoadDNode { 484 public: LoadD_unalignedNode(Node * c,Node * mem,Node * adr,const TypePtr * at,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)485 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 486 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 487 virtual int Opcode() const; 488 }; 489 490 //------------------------------LoadPNode-------------------------------------- 491 // Load a pointer from memory (either object or array) 492 class LoadPNode : public LoadNode { 493 public: LoadPNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypePtr * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)494 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 495 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 496 virtual int Opcode() const; ideal_reg() const497 virtual uint ideal_reg() const { return Op_RegP; } store_Opcode() const498 virtual int store_Opcode() const { return Op_StoreP; } memory_type() const499 virtual BasicType memory_type() const { return T_ADDRESS; } 500 }; 501 502 503 //------------------------------LoadNNode-------------------------------------- 504 // Load a narrow oop from memory (either object or array) 505 class LoadNNode : public LoadNode { 506 public: LoadNNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)507 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 508 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 509 virtual int Opcode() const; ideal_reg() const510 virtual uint ideal_reg() const { return Op_RegN; } store_Opcode() const511 virtual int store_Opcode() const { return Op_StoreN; } memory_type() const512 virtual BasicType memory_type() const { return T_NARROWOOP; } 513 }; 514 515 //------------------------------LoadKlassNode---------------------------------- 516 // Load a Klass from an object 517 class LoadKlassNode : public LoadPNode { 518 protected: 519 // In most cases, LoadKlassNode does not have the control input set. If the control 520 // input is set, it must not be removed (by LoadNode::Ideal()). 521 virtual bool can_remove_control() const; 522 public: LoadKlassNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeKlassPtr * tk,MemOrd mo)523 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 524 : LoadPNode(c, mem, adr, at, tk, mo) {} 525 virtual int Opcode() const; 526 virtual const Type* Value(PhaseGVN* phase) const; 527 virtual Node* Identity(PhaseGVN* phase); depends_only_on_test() const528 virtual bool depends_only_on_test() const { return true; } 529 530 // Polymorphic factory method: 531 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 532 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 533 }; 534 535 //------------------------------LoadNKlassNode--------------------------------- 536 // Load a narrow Klass from an object. 537 class LoadNKlassNode : public LoadNNode { 538 public: LoadNKlassNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeNarrowKlass * tk,MemOrd mo)539 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 540 : LoadNNode(c, mem, adr, at, tk, mo) {} 541 virtual int Opcode() const; ideal_reg() const542 virtual uint ideal_reg() const { return Op_RegN; } store_Opcode() const543 virtual int store_Opcode() const { return Op_StoreNKlass; } memory_type() const544 virtual BasicType memory_type() const { return T_NARROWKLASS; } 545 546 virtual const Type* Value(PhaseGVN* phase) const; 547 virtual Node* Identity(PhaseGVN* phase); depends_only_on_test() const548 virtual bool depends_only_on_test() const { return true; } 549 }; 550 551 552 //------------------------------StoreNode-------------------------------------- 553 // Store value; requires Store, Address and Value 554 class StoreNode : public MemNode { 555 private: 556 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 557 // stores that can be reordered, and such requiring release semantics to 558 // adhere to the Java specification. The required behaviour is stored in 559 // this field. 560 const MemOrd _mo; 561 // Needed for proper cloning. size_of() const562 virtual uint size_of() const { return sizeof(*this); } 563 protected: 564 virtual bool cmp( const Node &n ) const; depends_only_on_test() const565 virtual bool depends_only_on_test() const { return false; } 566 567 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 568 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 569 570 public: 571 // We must ensure that stores of object references will be visible 572 // only after the object's initialization. So the callers of this 573 // procedure must indicate that the store requires `release' 574 // semantics, if the stored value is an object reference that might 575 // point to a new object and may become externally visible. StoreNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)576 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 577 : MemNode(c, mem, adr, at, val), _mo(mo) { 578 init_class_id(Class_Store); 579 } StoreNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,Node * oop_store,MemOrd mo)580 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 581 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 582 init_class_id(Class_Store); 583 } 584 is_unordered() const585 inline bool is_unordered() const { return !is_release(); } is_release() const586 inline bool is_release() const { 587 assert((_mo == unordered || _mo == release), "unexpected"); 588 return _mo == release; 589 } 590 591 // Conservatively release stores of object references in order to 592 // ensure visibility of object initialization. release_if_reference(const BasicType t)593 static inline MemOrd release_if_reference(const BasicType t) { 594 #ifdef AARCH64 595 // AArch64 doesn't need a release store here because object 596 // initialization contains the necessary barriers. 597 return unordered; 598 #else 599 const MemOrd mo = (t == T_ARRAY || 600 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 601 t == T_OBJECT) ? release : unordered; 602 return mo; 603 #endif 604 } 605 606 // Polymorphic factory method 607 // 608 // We must ensure that stores of object references will be visible 609 // only after the object's initialization. So the callers of this 610 // procedure must indicate that the store requires `release' 611 // semantics, if the stored value is an object reference that might 612 // point to a new object and may become externally visible. 613 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 614 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 615 616 virtual uint hash() const; // Check the type 617 618 // If the store is to Field memory and the pointer is non-null, we can 619 // zero out the control input. 620 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 621 622 // Compute a new Type for this node. Basically we just do the pre-check, 623 // then call the virtual add() to set the type. 624 virtual const Type* Value(PhaseGVN* phase) const; 625 626 // Check for identity function on memory (Load then Store at same address) 627 virtual Node* Identity(PhaseGVN* phase); 628 629 // Do not match memory edge 630 virtual uint match_edge(uint idx) const; 631 632 virtual const Type *bottom_type() const; // returns Type::MEMORY 633 634 // Map a store opcode to its corresponding own opcode, trivially. store_Opcode() const635 virtual int store_Opcode() const { return Opcode(); } 636 637 // have all possible loads of the value stored been optimized away? 638 bool value_never_loaded(PhaseTransform *phase) const; 639 640 bool has_reinterpret_variant(const Type* vt); 641 Node* convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt); 642 643 MemBarNode* trailing_membar() const; 644 }; 645 646 //------------------------------StoreBNode------------------------------------- 647 // Store byte to memory 648 class StoreBNode : public StoreNode { 649 public: StoreBNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)650 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 651 : StoreNode(c, mem, adr, at, val, mo) {} 652 virtual int Opcode() const; 653 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); memory_type() const654 virtual BasicType memory_type() const { return T_BYTE; } 655 }; 656 657 //------------------------------StoreCNode------------------------------------- 658 // Store char/short to memory 659 class StoreCNode : public StoreNode { 660 public: StoreCNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)661 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 662 : StoreNode(c, mem, adr, at, val, mo) {} 663 virtual int Opcode() const; 664 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); memory_type() const665 virtual BasicType memory_type() const { return T_CHAR; } 666 }; 667 668 //------------------------------StoreINode------------------------------------- 669 // Store int to memory 670 class StoreINode : public StoreNode { 671 public: StoreINode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)672 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 673 : StoreNode(c, mem, adr, at, val, mo) {} 674 virtual int Opcode() const; memory_type() const675 virtual BasicType memory_type() const { return T_INT; } 676 }; 677 678 //------------------------------StoreLNode------------------------------------- 679 // Store long to memory 680 class StoreLNode : public StoreNode { hash() const681 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } cmp(const Node & n) const682 virtual bool cmp( const Node &n ) const { 683 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 684 && StoreNode::cmp(n); 685 } size_of() const686 virtual uint size_of() const { return sizeof(*this); } 687 const bool _require_atomic_access; // is piecewise store forbidden? 688 689 public: StoreLNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo,bool require_atomic_access=false)690 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 691 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 692 virtual int Opcode() const; memory_type() const693 virtual BasicType memory_type() const { return T_LONG; } require_atomic_access() const694 bool require_atomic_access() const { return _require_atomic_access; } 695 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 696 #ifndef PRODUCT dump_spec(outputStream * st) const697 virtual void dump_spec(outputStream *st) const { 698 StoreNode::dump_spec(st); 699 if (_require_atomic_access) st->print(" Atomic!"); 700 } 701 #endif 702 }; 703 704 //------------------------------StoreFNode------------------------------------- 705 // Store float to memory 706 class StoreFNode : public StoreNode { 707 public: StoreFNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)708 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 709 : StoreNode(c, mem, adr, at, val, mo) {} 710 virtual int Opcode() const; memory_type() const711 virtual BasicType memory_type() const { return T_FLOAT; } 712 }; 713 714 //------------------------------StoreDNode------------------------------------- 715 // Store double to memory 716 class StoreDNode : public StoreNode { hash() const717 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } cmp(const Node & n) const718 virtual bool cmp( const Node &n ) const { 719 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 720 && StoreNode::cmp(n); 721 } size_of() const722 virtual uint size_of() const { return sizeof(*this); } 723 const bool _require_atomic_access; // is piecewise store forbidden? 724 public: StoreDNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo,bool require_atomic_access=false)725 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 726 MemOrd mo, bool require_atomic_access = false) 727 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 728 virtual int Opcode() const; memory_type() const729 virtual BasicType memory_type() const { return T_DOUBLE; } require_atomic_access() const730 bool require_atomic_access() const { return _require_atomic_access; } 731 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 732 #ifndef PRODUCT dump_spec(outputStream * st) const733 virtual void dump_spec(outputStream *st) const { 734 StoreNode::dump_spec(st); 735 if (_require_atomic_access) st->print(" Atomic!"); 736 } 737 #endif 738 739 }; 740 741 //------------------------------StorePNode------------------------------------- 742 // Store pointer to memory 743 class StorePNode : public StoreNode { 744 public: StorePNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)745 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 746 : StoreNode(c, mem, adr, at, val, mo) {} 747 virtual int Opcode() const; memory_type() const748 virtual BasicType memory_type() const { return T_ADDRESS; } 749 }; 750 751 //------------------------------StoreNNode------------------------------------- 752 // Store narrow oop to memory 753 class StoreNNode : public StoreNode { 754 public: StoreNNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)755 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 756 : StoreNode(c, mem, adr, at, val, mo) {} 757 virtual int Opcode() const; memory_type() const758 virtual BasicType memory_type() const { return T_NARROWOOP; } 759 }; 760 761 //------------------------------StoreNKlassNode-------------------------------------- 762 // Store narrow klass to memory 763 class StoreNKlassNode : public StoreNNode { 764 public: StoreNKlassNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)765 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 766 : StoreNNode(c, mem, adr, at, val, mo) {} 767 virtual int Opcode() const; memory_type() const768 virtual BasicType memory_type() const { return T_NARROWKLASS; } 769 }; 770 771 //------------------------------StoreCMNode----------------------------------- 772 // Store card-mark byte to memory for CM 773 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 774 // Preceeding equivalent StoreCMs may be eliminated. 775 class StoreCMNode : public StoreNode { 776 private: hash() const777 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } cmp(const Node & n) const778 virtual bool cmp( const Node &n ) const { 779 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 780 && StoreNode::cmp(n); 781 } size_of() const782 virtual uint size_of() const { return sizeof(*this); } 783 int _oop_alias_idx; // The alias_idx of OopStore 784 785 public: StoreCMNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,Node * oop_store,int oop_alias_idx)786 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 787 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 788 _oop_alias_idx(oop_alias_idx) { 789 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 790 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 791 "bad oop alias idx"); 792 } 793 virtual int Opcode() const; 794 virtual Node* Identity(PhaseGVN* phase); 795 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 796 virtual const Type* Value(PhaseGVN* phase) const; memory_type() const797 virtual BasicType memory_type() const { return T_VOID; } // unspecific oop_alias_idx() const798 int oop_alias_idx() const { return _oop_alias_idx; } 799 }; 800 801 //------------------------------LoadPLockedNode--------------------------------- 802 // Load-locked a pointer from memory (either object or array). 803 // On Sparc & Intel this is implemented as a normal pointer load. 804 // On PowerPC and friends it's a real load-locked. 805 class LoadPLockedNode : public LoadPNode { 806 public: LoadPLockedNode(Node * c,Node * mem,Node * adr,MemOrd mo)807 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 808 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 809 virtual int Opcode() const; store_Opcode() const810 virtual int store_Opcode() const { return Op_StorePConditional; } depends_only_on_test() const811 virtual bool depends_only_on_test() const { return true; } 812 }; 813 814 //------------------------------SCMemProjNode--------------------------------------- 815 // This class defines a projection of the memory state of a store conditional node. 816 // These nodes return a value, but also update memory. 817 class SCMemProjNode : public ProjNode { 818 public: 819 enum {SCMEMPROJCON = (uint)-2}; SCMemProjNode(Node * src)820 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 821 virtual int Opcode() const; is_CFG() const822 virtual bool is_CFG() const { return false; } bottom_type() const823 virtual const Type *bottom_type() const {return Type::MEMORY;} adr_type() const824 virtual const TypePtr *adr_type() const { 825 Node* ctrl = in(0); 826 if (ctrl == NULL) return NULL; // node is dead 827 return ctrl->in(MemNode::Memory)->adr_type(); 828 } ideal_reg() const829 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 830 virtual const Type* Value(PhaseGVN* phase) const; 831 #ifndef PRODUCT dump_spec(outputStream * st) const832 virtual void dump_spec(outputStream *st) const {}; 833 #endif 834 }; 835 836 //------------------------------LoadStoreNode--------------------------- 837 // Note: is_Mem() method returns 'true' for this class. 838 class LoadStoreNode : public Node { 839 private: 840 const Type* const _type; // What kind of value is loaded? 841 const TypePtr* _adr_type; // What kind of memory is being addressed? 842 uint8_t _barrier_data; // Bit field with barrier information 843 virtual uint size_of() const; // Size is bigger 844 public: 845 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); depends_only_on_test() const846 virtual bool depends_only_on_test() const { return false; } match_edge(uint idx) const847 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 848 bottom_type() const849 virtual const Type *bottom_type() const { return _type; } 850 virtual uint ideal_reg() const; adr_type() const851 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 852 853 bool result_not_used() const; 854 MemBarNode* trailing_membar() const; 855 barrier_data()856 uint8_t barrier_data() { return _barrier_data; } set_barrier_data(uint8_t barrier_data)857 void set_barrier_data(uint8_t barrier_data) { _barrier_data = barrier_data; } 858 }; 859 860 class LoadStoreConditionalNode : public LoadStoreNode { 861 public: 862 enum { 863 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 864 }; 865 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 866 }; 867 868 //------------------------------StorePConditionalNode--------------------------- 869 // Conditionally store pointer to memory, if no change since prior 870 // load-locked. Sets flags for success or failure of the store. 871 class StorePConditionalNode : public LoadStoreConditionalNode { 872 public: StorePConditionalNode(Node * c,Node * mem,Node * adr,Node * val,Node * ll)873 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 874 virtual int Opcode() const; 875 // Produces flags ideal_reg() const876 virtual uint ideal_reg() const { return Op_RegFlags; } 877 }; 878 879 //------------------------------StoreIConditionalNode--------------------------- 880 // Conditionally store int to memory, if no change since prior 881 // load-locked. Sets flags for success or failure of the store. 882 class StoreIConditionalNode : public LoadStoreConditionalNode { 883 public: StoreIConditionalNode(Node * c,Node * mem,Node * adr,Node * val,Node * ii)884 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 885 virtual int Opcode() const; 886 // Produces flags ideal_reg() const887 virtual uint ideal_reg() const { return Op_RegFlags; } 888 }; 889 890 //------------------------------StoreLConditionalNode--------------------------- 891 // Conditionally store long to memory, if no change since prior 892 // load-locked. Sets flags for success or failure of the store. 893 class StoreLConditionalNode : public LoadStoreConditionalNode { 894 public: StoreLConditionalNode(Node * c,Node * mem,Node * adr,Node * val,Node * ll)895 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 896 virtual int Opcode() const; 897 // Produces flags ideal_reg() const898 virtual uint ideal_reg() const { return Op_RegFlags; } 899 }; 900 901 class CompareAndSwapNode : public LoadStoreConditionalNode { 902 private: 903 const MemNode::MemOrd _mem_ord; 904 public: CompareAndSwapNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)905 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} order() const906 MemNode::MemOrd order() const { 907 return _mem_ord; 908 } size_of() const909 virtual uint size_of() const { return sizeof(*this); } 910 }; 911 912 class CompareAndExchangeNode : public LoadStoreNode { 913 private: 914 const MemNode::MemOrd _mem_ord; 915 public: 916 enum { 917 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 918 }; CompareAndExchangeNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord,const TypePtr * at,const Type * t)919 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 920 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 921 init_req(ExpectedIn, ex ); 922 } 923 order() const924 MemNode::MemOrd order() const { 925 return _mem_ord; 926 } size_of() const927 virtual uint size_of() const { return sizeof(*this); } 928 }; 929 930 //------------------------------CompareAndSwapBNode--------------------------- 931 class CompareAndSwapBNode : public CompareAndSwapNode { 932 public: CompareAndSwapBNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)933 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 934 virtual int Opcode() const; 935 }; 936 937 //------------------------------CompareAndSwapSNode--------------------------- 938 class CompareAndSwapSNode : public CompareAndSwapNode { 939 public: CompareAndSwapSNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)940 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 941 virtual int Opcode() const; 942 }; 943 944 //------------------------------CompareAndSwapINode--------------------------- 945 class CompareAndSwapINode : public CompareAndSwapNode { 946 public: CompareAndSwapINode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)947 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 948 virtual int Opcode() const; 949 }; 950 951 //------------------------------CompareAndSwapLNode--------------------------- 952 class CompareAndSwapLNode : public CompareAndSwapNode { 953 public: CompareAndSwapLNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)954 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 955 virtual int Opcode() const; 956 }; 957 958 //------------------------------CompareAndSwapPNode--------------------------- 959 class CompareAndSwapPNode : public CompareAndSwapNode { 960 public: CompareAndSwapPNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)961 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 962 virtual int Opcode() const; 963 }; 964 965 //------------------------------CompareAndSwapNNode--------------------------- 966 class CompareAndSwapNNode : public CompareAndSwapNode { 967 public: CompareAndSwapNNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)968 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 969 virtual int Opcode() const; 970 }; 971 972 //------------------------------WeakCompareAndSwapBNode--------------------------- 973 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 974 public: WeakCompareAndSwapBNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)975 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 976 virtual int Opcode() const; 977 }; 978 979 //------------------------------WeakCompareAndSwapSNode--------------------------- 980 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 981 public: WeakCompareAndSwapSNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)982 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 983 virtual int Opcode() const; 984 }; 985 986 //------------------------------WeakCompareAndSwapINode--------------------------- 987 class WeakCompareAndSwapINode : public CompareAndSwapNode { 988 public: WeakCompareAndSwapINode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)989 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 990 virtual int Opcode() const; 991 }; 992 993 //------------------------------WeakCompareAndSwapLNode--------------------------- 994 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 995 public: WeakCompareAndSwapLNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)996 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 997 virtual int Opcode() const; 998 }; 999 1000 //------------------------------WeakCompareAndSwapPNode--------------------------- 1001 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 1002 public: WeakCompareAndSwapPNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)1003 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1004 virtual int Opcode() const; 1005 }; 1006 1007 //------------------------------WeakCompareAndSwapNNode--------------------------- 1008 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 1009 public: WeakCompareAndSwapNNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)1010 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1011 virtual int Opcode() const; 1012 }; 1013 1014 //------------------------------CompareAndExchangeBNode--------------------------- 1015 class CompareAndExchangeBNode : public CompareAndExchangeNode { 1016 public: CompareAndExchangeBNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1017 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 1018 virtual int Opcode() const; 1019 }; 1020 1021 1022 //------------------------------CompareAndExchangeSNode--------------------------- 1023 class CompareAndExchangeSNode : public CompareAndExchangeNode { 1024 public: CompareAndExchangeSNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1025 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 1026 virtual int Opcode() const; 1027 }; 1028 1029 //------------------------------CompareAndExchangeLNode--------------------------- 1030 class CompareAndExchangeLNode : public CompareAndExchangeNode { 1031 public: CompareAndExchangeLNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1032 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 1033 virtual int Opcode() const; 1034 }; 1035 1036 1037 //------------------------------CompareAndExchangeINode--------------------------- 1038 class CompareAndExchangeINode : public CompareAndExchangeNode { 1039 public: CompareAndExchangeINode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1040 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 1041 virtual int Opcode() const; 1042 }; 1043 1044 1045 //------------------------------CompareAndExchangePNode--------------------------- 1046 class CompareAndExchangePNode : public CompareAndExchangeNode { 1047 public: CompareAndExchangePNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,const Type * t,MemNode::MemOrd mem_ord)1048 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1049 virtual int Opcode() const; 1050 }; 1051 1052 //------------------------------CompareAndExchangeNNode--------------------------- 1053 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1054 public: CompareAndExchangeNNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,const Type * t,MemNode::MemOrd mem_ord)1055 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1056 virtual int Opcode() const; 1057 }; 1058 1059 //------------------------------GetAndAddBNode--------------------------- 1060 class GetAndAddBNode : public LoadStoreNode { 1061 public: GetAndAddBNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1062 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1063 virtual int Opcode() const; 1064 }; 1065 1066 //------------------------------GetAndAddSNode--------------------------- 1067 class GetAndAddSNode : public LoadStoreNode { 1068 public: GetAndAddSNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1069 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1070 virtual int Opcode() const; 1071 }; 1072 1073 //------------------------------GetAndAddINode--------------------------- 1074 class GetAndAddINode : public LoadStoreNode { 1075 public: GetAndAddINode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1076 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1077 virtual int Opcode() const; 1078 }; 1079 1080 //------------------------------GetAndAddLNode--------------------------- 1081 class GetAndAddLNode : public LoadStoreNode { 1082 public: GetAndAddLNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1083 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1084 virtual int Opcode() const; 1085 }; 1086 1087 //------------------------------GetAndSetBNode--------------------------- 1088 class GetAndSetBNode : public LoadStoreNode { 1089 public: GetAndSetBNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1090 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1091 virtual int Opcode() const; 1092 }; 1093 1094 //------------------------------GetAndSetSNode--------------------------- 1095 class GetAndSetSNode : public LoadStoreNode { 1096 public: GetAndSetSNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1097 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1098 virtual int Opcode() const; 1099 }; 1100 1101 //------------------------------GetAndSetINode--------------------------- 1102 class GetAndSetINode : public LoadStoreNode { 1103 public: GetAndSetINode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1104 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1105 virtual int Opcode() const; 1106 }; 1107 1108 //------------------------------GetAndSetLNode--------------------------- 1109 class GetAndSetLNode : public LoadStoreNode { 1110 public: GetAndSetLNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1111 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1112 virtual int Opcode() const; 1113 }; 1114 1115 //------------------------------GetAndSetPNode--------------------------- 1116 class GetAndSetPNode : public LoadStoreNode { 1117 public: GetAndSetPNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at,const Type * t)1118 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1119 virtual int Opcode() const; 1120 }; 1121 1122 //------------------------------GetAndSetNNode--------------------------- 1123 class GetAndSetNNode : public LoadStoreNode { 1124 public: GetAndSetNNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at,const Type * t)1125 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1126 virtual int Opcode() const; 1127 }; 1128 1129 //------------------------------ClearArray------------------------------------- 1130 class ClearArrayNode: public Node { 1131 private: 1132 bool _is_large; 1133 public: ClearArrayNode(Node * ctrl,Node * arymem,Node * word_cnt,Node * base,bool is_large)1134 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1135 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1136 init_class_id(Class_ClearArray); 1137 } 1138 virtual int Opcode() const; bottom_type() const1139 virtual const Type *bottom_type() const { return Type::MEMORY; } 1140 // ClearArray modifies array elements, and so affects only the 1141 // array memory addressed by the bottom_type of its base address. 1142 virtual const class TypePtr *adr_type() const; 1143 virtual Node* Identity(PhaseGVN* phase); 1144 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1145 virtual uint match_edge(uint idx) const; is_large() const1146 bool is_large() const { return _is_large; } 1147 1148 // Clear the given area of an object or array. 1149 // The start offset must always be aligned mod BytesPerInt. 1150 // The end offset must always be aligned mod BytesPerLong. 1151 // Return the new memory. 1152 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1153 intptr_t start_offset, 1154 intptr_t end_offset, 1155 PhaseGVN* phase); 1156 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1157 intptr_t start_offset, 1158 Node* end_offset, 1159 PhaseGVN* phase); 1160 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1161 Node* start_offset, 1162 Node* end_offset, 1163 PhaseGVN* phase); 1164 // Return allocation input memory edge if it is different instance 1165 // or itself if it is the one we are looking for. 1166 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1167 }; 1168 1169 //------------------------------MemBar----------------------------------------- 1170 // There are different flavors of Memory Barriers to match the Java Memory 1171 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1172 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1173 // volatile-load. Monitor-exit and volatile-store act as Release: no 1174 // preceding ref can be moved to after them. We insert a MemBar-Release 1175 // before a FastUnlock or volatile-store. All volatiles need to be 1176 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1177 // separate it from any following volatile-load. 1178 class MemBarNode: public MultiNode { 1179 virtual uint hash() const ; // { return NO_HASH; } 1180 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1181 size_of() const1182 virtual uint size_of() const { return sizeof(*this); } 1183 // Memory type this node is serializing. Usually either rawptr or bottom. 1184 const TypePtr* _adr_type; 1185 1186 // How is this membar related to a nearby memory access? 1187 enum { 1188 Standalone, 1189 TrailingLoad, 1190 TrailingStore, 1191 LeadingStore, 1192 TrailingLoadStore, 1193 LeadingLoadStore, 1194 TrailingPartialArrayCopy 1195 } _kind; 1196 1197 #ifdef ASSERT 1198 uint _pair_idx; 1199 #endif 1200 1201 public: 1202 enum { 1203 Precedent = TypeFunc::Parms // optional edge to force precedence 1204 }; 1205 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1206 virtual int Opcode() const = 0; adr_type() const1207 virtual const class TypePtr *adr_type() const { return _adr_type; } 1208 virtual const Type* Value(PhaseGVN* phase) const; 1209 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); match_edge(uint idx) const1210 virtual uint match_edge(uint idx) const { return 0; } bottom_type() const1211 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1212 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1213 // Factory method. Builds a wide or narrow membar. 1214 // Optional 'precedent' becomes an extra edge if not null. 1215 static MemBarNode* make(Compile* C, int opcode, 1216 int alias_idx = Compile::AliasIdxBot, 1217 Node* precedent = NULL); 1218 1219 MemBarNode* trailing_membar() const; 1220 MemBarNode* leading_membar() const; 1221 set_trailing_load()1222 void set_trailing_load() { _kind = TrailingLoad; } trailing_load() const1223 bool trailing_load() const { return _kind == TrailingLoad; } trailing_store() const1224 bool trailing_store() const { return _kind == TrailingStore; } leading_store() const1225 bool leading_store() const { return _kind == LeadingStore; } trailing_load_store() const1226 bool trailing_load_store() const { return _kind == TrailingLoadStore; } leading_load_store() const1227 bool leading_load_store() const { return _kind == LeadingLoadStore; } trailing() const1228 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } leading() const1229 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } standalone() const1230 bool standalone() const { return _kind == Standalone; } set_trailing_partial_array_copy()1231 void set_trailing_partial_array_copy() { _kind = TrailingPartialArrayCopy; } trailing_partial_array_copy() const1232 bool trailing_partial_array_copy() const { return _kind == TrailingPartialArrayCopy; } 1233 1234 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1235 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1236 1237 void remove(PhaseIterGVN *igvn); 1238 }; 1239 1240 // "Acquire" - no following ref can move before (but earlier refs can 1241 // follow, like an early Load stalled in cache). Requires multi-cpu 1242 // visibility. Inserted after a volatile load. 1243 class MemBarAcquireNode: public MemBarNode { 1244 public: MemBarAcquireNode(Compile * C,int alias_idx,Node * precedent)1245 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1246 : MemBarNode(C, alias_idx, precedent) {} 1247 virtual int Opcode() const; 1248 }; 1249 1250 // "Acquire" - no following ref can move before (but earlier refs can 1251 // follow, like an early Load stalled in cache). Requires multi-cpu 1252 // visibility. Inserted independ of any load, as required 1253 // for intrinsic Unsafe.loadFence(). 1254 class LoadFenceNode: public MemBarNode { 1255 public: LoadFenceNode(Compile * C,int alias_idx,Node * precedent)1256 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1257 : MemBarNode(C, alias_idx, precedent) {} 1258 virtual int Opcode() const; 1259 }; 1260 1261 // "Release" - no earlier ref can move after (but later refs can move 1262 // up, like a speculative pipelined cache-hitting Load). Requires 1263 // multi-cpu visibility. Inserted before a volatile store. 1264 class MemBarReleaseNode: public MemBarNode { 1265 public: MemBarReleaseNode(Compile * C,int alias_idx,Node * precedent)1266 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1267 : MemBarNode(C, alias_idx, precedent) {} 1268 virtual int Opcode() const; 1269 }; 1270 1271 // "Release" - no earlier ref can move after (but later refs can move 1272 // up, like a speculative pipelined cache-hitting Load). Requires 1273 // multi-cpu visibility. Inserted independent of any store, as required 1274 // for intrinsic Unsafe.storeFence(). 1275 class StoreFenceNode: public MemBarNode { 1276 public: StoreFenceNode(Compile * C,int alias_idx,Node * precedent)1277 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1278 : MemBarNode(C, alias_idx, precedent) {} 1279 virtual int Opcode() const; 1280 }; 1281 1282 // "Acquire" - no following ref can move before (but earlier refs can 1283 // follow, like an early Load stalled in cache). Requires multi-cpu 1284 // visibility. Inserted after a FastLock. 1285 class MemBarAcquireLockNode: public MemBarNode { 1286 public: MemBarAcquireLockNode(Compile * C,int alias_idx,Node * precedent)1287 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1288 : MemBarNode(C, alias_idx, precedent) {} 1289 virtual int Opcode() const; 1290 }; 1291 1292 // "Release" - no earlier ref can move after (but later refs can move 1293 // up, like a speculative pipelined cache-hitting Load). Requires 1294 // multi-cpu visibility. Inserted before a FastUnLock. 1295 class MemBarReleaseLockNode: public MemBarNode { 1296 public: MemBarReleaseLockNode(Compile * C,int alias_idx,Node * precedent)1297 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1298 : MemBarNode(C, alias_idx, precedent) {} 1299 virtual int Opcode() const; 1300 }; 1301 1302 class MemBarStoreStoreNode: public MemBarNode { 1303 public: MemBarStoreStoreNode(Compile * C,int alias_idx,Node * precedent)1304 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1305 : MemBarNode(C, alias_idx, precedent) { 1306 init_class_id(Class_MemBarStoreStore); 1307 } 1308 virtual int Opcode() const; 1309 }; 1310 1311 // Ordering between a volatile store and a following volatile load. 1312 // Requires multi-CPU visibility? 1313 class MemBarVolatileNode: public MemBarNode { 1314 public: MemBarVolatileNode(Compile * C,int alias_idx,Node * precedent)1315 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1316 : MemBarNode(C, alias_idx, precedent) {} 1317 virtual int Opcode() const; 1318 }; 1319 1320 // Ordering within the same CPU. Used to order unsafe memory references 1321 // inside the compiler when we lack alias info. Not needed "outside" the 1322 // compiler because the CPU does all the ordering for us. 1323 class MemBarCPUOrderNode: public MemBarNode { 1324 public: MemBarCPUOrderNode(Compile * C,int alias_idx,Node * precedent)1325 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1326 : MemBarNode(C, alias_idx, precedent) {} 1327 virtual int Opcode() const; ideal_reg() const1328 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1329 }; 1330 1331 class OnSpinWaitNode: public MemBarNode { 1332 public: OnSpinWaitNode(Compile * C,int alias_idx,Node * precedent)1333 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1334 : MemBarNode(C, alias_idx, precedent) {} 1335 virtual int Opcode() const; 1336 }; 1337 1338 // Isolation of object setup after an AllocateNode and before next safepoint. 1339 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1340 class InitializeNode: public MemBarNode { 1341 friend class AllocateNode; 1342 1343 enum { 1344 Incomplete = 0, 1345 Complete = 1, 1346 WithArraycopy = 2 1347 }; 1348 int _is_complete; 1349 1350 bool _does_not_escape; 1351 1352 public: 1353 enum { 1354 Control = TypeFunc::Control, 1355 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1356 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1357 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1358 }; 1359 1360 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1361 virtual int Opcode() const; size_of() const1362 virtual uint size_of() const { return sizeof(*this); } ideal_reg() const1363 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1364 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1365 1366 // Manage incoming memory edges via a MergeMem on in(Memory): 1367 Node* memory(uint alias_idx); 1368 1369 // The raw memory edge coming directly from the Allocation. 1370 // The contents of this memory are *always* all-zero-bits. zero_memory()1371 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1372 1373 // Return the corresponding allocation for this initialization (or null if none). 1374 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1375 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1376 AllocateNode* allocation(); 1377 1378 // Anything other than zeroing in this init? 1379 bool is_non_zero(); 1380 1381 // An InitializeNode must completed before macro expansion is done. 1382 // Completion requires that the AllocateNode must be followed by 1383 // initialization of the new memory to zero, then to any initializers. is_complete()1384 bool is_complete() { return _is_complete != Incomplete; } is_complete_with_arraycopy()1385 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1386 1387 // Mark complete. (Must not yet be complete.) 1388 void set_complete(PhaseGVN* phase); set_complete_with_arraycopy()1389 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1390 does_not_escape()1391 bool does_not_escape() { return _does_not_escape; } set_does_not_escape()1392 void set_does_not_escape() { _does_not_escape = true; } 1393 1394 #ifdef ASSERT 1395 // ensure all non-degenerate stores are ordered and non-overlapping 1396 bool stores_are_sane(PhaseTransform* phase); 1397 #endif //ASSERT 1398 1399 // See if this store can be captured; return offset where it initializes. 1400 // Return 0 if the store cannot be moved (any sort of problem). 1401 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape); 1402 1403 // Capture another store; reformat it to write my internal raw memory. 1404 // Return the captured copy, else NULL if there is some sort of problem. 1405 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape); 1406 1407 // Find captured store which corresponds to the range [start..start+size). 1408 // Return my own memory projection (meaning the initial zero bits) 1409 // if there is no such store. Return NULL if there is a problem. 1410 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1411 1412 // Called when the associated AllocateNode is expanded into CFG. 1413 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1414 intptr_t header_size, Node* size_in_bytes, 1415 PhaseIterGVN* phase); 1416 1417 private: 1418 void remove_extra_zeroes(); 1419 1420 // Find out where a captured store should be placed (or already is placed). 1421 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1422 PhaseTransform* phase); 1423 1424 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1425 1426 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1427 1428 bool detect_init_independence(Node* value, PhaseGVN* phase); 1429 1430 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1431 PhaseGVN* phase); 1432 1433 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1434 }; 1435 1436 //------------------------------MergeMem--------------------------------------- 1437 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1438 class MergeMemNode: public Node { 1439 virtual uint hash() const ; // { return NO_HASH; } 1440 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1441 friend class MergeMemStream; 1442 MergeMemNode(Node* def); // clients use MergeMemNode::make 1443 1444 public: 1445 // If the input is a whole memory state, clone it with all its slices intact. 1446 // Otherwise, make a new memory state with just that base memory input. 1447 // In either case, the result is a newly created MergeMem. 1448 static MergeMemNode* make(Node* base_memory); 1449 1450 virtual int Opcode() const; 1451 virtual Node* Identity(PhaseGVN* phase); 1452 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); ideal_reg() const1453 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1454 virtual uint match_edge(uint idx) const { return 0; } 1455 virtual const RegMask &out_RegMask() const; bottom_type() const1456 virtual const Type *bottom_type() const { return Type::MEMORY; } adr_type() const1457 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1458 // sparse accessors 1459 // Fetch the previously stored "set_memory_at", or else the base memory. 1460 // (Caller should clone it if it is a phi-nest.) 1461 Node* memory_at(uint alias_idx) const; 1462 // set the memory, regardless of its previous value 1463 void set_memory_at(uint alias_idx, Node* n); 1464 // the "base" is the memory that provides the non-finite support base_memory() const1465 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1466 // warning: setting the base can implicitly set any of the other slices too 1467 void set_base_memory(Node* def); 1468 // sentinel value which denotes a copy of the base memory: empty_memory() const1469 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1470 static Node* make_empty_memory(); // where the sentinel comes from is_empty_memory(Node * n) const1471 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1472 // hook for the iterator, to perform any necessary setup 1473 void iteration_setup(const MergeMemNode* other = NULL); 1474 // push sentinels until I am at least as long as the other (semantic no-op) 1475 void grow_to_match(const MergeMemNode* other); 1476 bool verify_sparse() const PRODUCT_RETURN0; 1477 #ifndef PRODUCT 1478 virtual void dump_spec(outputStream *st) const; 1479 #endif 1480 }; 1481 1482 class MergeMemStream : public StackObj { 1483 private: 1484 MergeMemNode* _mm; 1485 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1486 Node* _mm_base; // loop-invariant base memory of _mm 1487 int _idx; 1488 int _cnt; 1489 Node* _mem; 1490 Node* _mem2; 1491 int _cnt2; 1492 init(MergeMemNode * mm,const MergeMemNode * mm2=NULL)1493 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1494 // subsume_node will break sparseness at times, whenever a memory slice 1495 // folds down to a copy of the base ("fat") memory. In such a case, 1496 // the raw edge will update to base, although it should be top. 1497 // This iterator will recognize either top or base_memory as an 1498 // "empty" slice. See is_empty, is_empty2, and next below. 1499 // 1500 // The sparseness property is repaired in MergeMemNode::Ideal. 1501 // As long as access to a MergeMem goes through this iterator 1502 // or the memory_at accessor, flaws in the sparseness will 1503 // never be observed. 1504 // 1505 // Also, iteration_setup repairs sparseness. 1506 assert(mm->verify_sparse(), "please, no dups of base"); 1507 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1508 1509 _mm = mm; 1510 _mm_base = mm->base_memory(); 1511 _mm2 = mm2; 1512 _cnt = mm->req(); 1513 _idx = Compile::AliasIdxBot-1; // start at the base memory 1514 _mem = NULL; 1515 _mem2 = NULL; 1516 } 1517 1518 #ifdef ASSERT check_memory() const1519 Node* check_memory() const { 1520 if (at_base_memory()) 1521 return _mm->base_memory(); 1522 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1523 return _mm->memory_at(_idx); 1524 else 1525 return _mm_base; 1526 } check_memory2() const1527 Node* check_memory2() const { 1528 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1529 } 1530 #endif 1531 1532 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; assert_synch() const1533 void assert_synch() const { 1534 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1535 "no side-effects except through the stream"); 1536 } 1537 1538 public: 1539 1540 // expected usages: 1541 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1542 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1543 1544 // iterate over one merge MergeMemStream(MergeMemNode * mm)1545 MergeMemStream(MergeMemNode* mm) { 1546 mm->iteration_setup(); 1547 init(mm); 1548 debug_only(_cnt2 = 999); 1549 } 1550 // iterate in parallel over two merges 1551 // only iterates through non-empty elements of mm2 MergeMemStream(MergeMemNode * mm,const MergeMemNode * mm2)1552 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1553 assert(mm2, "second argument must be a MergeMem also"); 1554 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1555 mm->iteration_setup(mm2); 1556 init(mm, mm2); 1557 _cnt2 = mm2->req(); 1558 } 1559 #ifdef ASSERT ~MergeMemStream()1560 ~MergeMemStream() { 1561 assert_synch(); 1562 } 1563 #endif 1564 all_memory() const1565 MergeMemNode* all_memory() const { 1566 return _mm; 1567 } base_memory() const1568 Node* base_memory() const { 1569 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1570 return _mm_base; 1571 } all_memory2() const1572 const MergeMemNode* all_memory2() const { 1573 assert(_mm2 != NULL, ""); 1574 return _mm2; 1575 } at_base_memory() const1576 bool at_base_memory() const { 1577 return _idx == Compile::AliasIdxBot; 1578 } alias_idx() const1579 int alias_idx() const { 1580 assert(_mem, "must call next 1st"); 1581 return _idx; 1582 } 1583 adr_type() const1584 const TypePtr* adr_type() const { 1585 return Compile::current()->get_adr_type(alias_idx()); 1586 } 1587 adr_type(Compile * C) const1588 const TypePtr* adr_type(Compile* C) const { 1589 return C->get_adr_type(alias_idx()); 1590 } is_empty() const1591 bool is_empty() const { 1592 assert(_mem, "must call next 1st"); 1593 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1594 return _mem->is_top(); 1595 } is_empty2() const1596 bool is_empty2() const { 1597 assert(_mem2, "must call next 1st"); 1598 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1599 return _mem2->is_top(); 1600 } memory() const1601 Node* memory() const { 1602 assert(!is_empty(), "must not be empty"); 1603 assert_synch(); 1604 return _mem; 1605 } 1606 // get the current memory, regardless of empty or non-empty status force_memory() const1607 Node* force_memory() const { 1608 assert(!is_empty() || !at_base_memory(), ""); 1609 // Use _mm_base to defend against updates to _mem->base_memory(). 1610 Node *mem = _mem->is_top() ? _mm_base : _mem; 1611 assert(mem == check_memory(), ""); 1612 return mem; 1613 } memory2() const1614 Node* memory2() const { 1615 assert(_mem2 == check_memory2(), ""); 1616 return _mem2; 1617 } set_memory(Node * mem)1618 void set_memory(Node* mem) { 1619 if (at_base_memory()) { 1620 // Note that this does not change the invariant _mm_base. 1621 _mm->set_base_memory(mem); 1622 } else { 1623 _mm->set_memory_at(_idx, mem); 1624 } 1625 _mem = mem; 1626 assert_synch(); 1627 } 1628 1629 // Recover from a side effect to the MergeMemNode. set_memory()1630 void set_memory() { 1631 _mem = _mm->in(_idx); 1632 } 1633 next()1634 bool next() { return next(false); } next2()1635 bool next2() { return next(true); } 1636 next_non_empty()1637 bool next_non_empty() { return next_non_empty(false); } next_non_empty2()1638 bool next_non_empty2() { return next_non_empty(true); } 1639 // next_non_empty2 can yield states where is_empty() is true 1640 1641 private: 1642 // find the next item, which might be empty next(bool have_mm2)1643 bool next(bool have_mm2) { 1644 assert((_mm2 != NULL) == have_mm2, "use other next"); 1645 assert_synch(); 1646 if (++_idx < _cnt) { 1647 // Note: This iterator allows _mm to be non-sparse. 1648 // It behaves the same whether _mem is top or base_memory. 1649 _mem = _mm->in(_idx); 1650 if (have_mm2) 1651 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1652 return true; 1653 } 1654 return false; 1655 } 1656 1657 // find the next non-empty item next_non_empty(bool have_mm2)1658 bool next_non_empty(bool have_mm2) { 1659 while (next(have_mm2)) { 1660 if (!is_empty()) { 1661 // make sure _mem2 is filled in sensibly 1662 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1663 return true; 1664 } else if (have_mm2 && !is_empty2()) { 1665 return true; // is_empty() == true 1666 } 1667 } 1668 return false; 1669 } 1670 }; 1671 1672 // cachewb node for guaranteeing writeback of the cache line at a 1673 // given address to (non-volatile) RAM 1674 class CacheWBNode : public Node { 1675 public: CacheWBNode(Node * ctrl,Node * mem,Node * addr)1676 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1677 virtual int Opcode() const; ideal_reg() const1678 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1679 virtual uint match_edge(uint idx) const { return (idx == 2); } adr_type() const1680 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } bottom_type() const1681 virtual const Type *bottom_type() const { return Type::MEMORY; } 1682 }; 1683 1684 // cachewb pre sync node for ensuring that writebacks are serialised 1685 // relative to preceding or following stores 1686 class CacheWBPreSyncNode : public Node { 1687 public: CacheWBPreSyncNode(Node * ctrl,Node * mem)1688 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1689 virtual int Opcode() const; ideal_reg() const1690 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1691 virtual uint match_edge(uint idx) const { return false; } adr_type() const1692 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } bottom_type() const1693 virtual const Type *bottom_type() const { return Type::MEMORY; } 1694 }; 1695 1696 // cachewb pre sync node for ensuring that writebacks are serialised 1697 // relative to preceding or following stores 1698 class CacheWBPostSyncNode : public Node { 1699 public: CacheWBPostSyncNode(Node * ctrl,Node * mem)1700 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1701 virtual int Opcode() const; ideal_reg() const1702 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1703 virtual uint match_edge(uint idx) const { return false; } adr_type() const1704 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } bottom_type() const1705 virtual const Type *bottom_type() const { return Type::MEMORY; } 1706 }; 1707 1708 //------------------------------Prefetch--------------------------------------- 1709 1710 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1711 class PrefetchAllocationNode : public Node { 1712 public: PrefetchAllocationNode(Node * mem,Node * adr)1713 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1714 virtual int Opcode() const; ideal_reg() const1715 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1716 virtual uint match_edge(uint idx) const { return idx==2; } bottom_type() const1717 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1718 }; 1719 1720 #endif // SHARE_OPTO_MEMNODE_HPP 1721