1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MEMNODE_HPP 26 #define SHARE_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 bool _unsafe_access; // Access of unsafe origin. 46 uint8_t _barrier; // Bit field with barrier information 47 48 protected: 49 #ifdef ASSERT 50 const TypePtr* _adr_type; // What kind of memory is being addressed? 51 #endif 52 virtual uint size_of() const; 53 public: 54 enum { Control, // When is it safe to do this load? 55 Memory, // Chunk of memory is being loaded from 56 Address, // Actually address, derived from base 57 ValueIn, // Value to store 58 OopStore // Preceeding oop store, only in StoreCM 59 }; 60 typedef enum { unordered = 0, 61 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 62 release, // Store has to release or be preceded by MemBarRelease. 63 seqcst, // LoadStore has to have both acquire and release semantics. 64 unset // The memory ordering is not set (used for testing) 65 } MemOrd; 66 protected: MemNode(Node * c0,Node * c1,Node * c2,const TypePtr * at)67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) : 68 Node(c0,c1,c2), 69 _unaligned_access(false), 70 _mismatched_access(false), 71 _unsafe_access(false), 72 _barrier(0) { 73 init_class_id(Class_Mem); 74 debug_only(_adr_type=at; adr_type();) 75 } MemNode(Node * c0,Node * c1,Node * c2,const TypePtr * at,Node * c3)76 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : 77 Node(c0,c1,c2,c3), 78 _unaligned_access(false), 79 _mismatched_access(false), 80 _unsafe_access(false), 81 _barrier(0) { 82 init_class_id(Class_Mem); 83 debug_only(_adr_type=at; adr_type();) 84 } MemNode(Node * c0,Node * c1,Node * c2,const TypePtr * at,Node * c3,Node * c4)85 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : 86 Node(c0,c1,c2,c3,c4), 87 _unaligned_access(false), 88 _mismatched_access(false), 89 _unsafe_access(false), 90 _barrier(0) { 91 init_class_id(Class_Mem); 92 debug_only(_adr_type=at; adr_type();) 93 } 94 find_previous_arraycopy(PhaseTransform * phase,Node * ld_alloc,Node * & mem,bool can_see_stored_value) const95 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 96 static bool check_if_adr_maybe_raw(Node* adr); 97 98 public: 99 // Helpers for the optimizer. Documented in memnode.cpp. 100 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 101 Node* p2, AllocateNode* a2, 102 PhaseTransform* phase); 103 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 104 105 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 106 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 107 // This one should probably be a phase-specific function: 108 static bool all_controls_dominate(Node* dom, Node* sub); 109 110 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 111 112 // Shared code for Ideal methods: 113 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 114 115 // Helper function for adr_type() implementations. 116 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 117 118 // Raw access function, to allow copying of adr_type efficiently in 119 // product builds and retain the debug info for debug builds. raw_adr_type() const120 const TypePtr *raw_adr_type() const { 121 #ifdef ASSERT 122 return _adr_type; 123 #else 124 return 0; 125 #endif 126 } 127 128 // Map a load or store opcode to its corresponding store opcode. 129 // (Return -1 if unknown.) store_Opcode() const130 virtual int store_Opcode() const { return -1; } 131 132 // What is the type of the value in memory? (T_VOID mean "unspecified".) 133 virtual BasicType memory_type() const = 0; memory_size() const134 virtual int memory_size() const { 135 #ifdef ASSERT 136 return type2aelembytes(memory_type(), true); 137 #else 138 return type2aelembytes(memory_type()); 139 #endif 140 } 141 barrier_data()142 uint8_t barrier_data() { return _barrier; } set_barrier_data(uint8_t barrier_data)143 void set_barrier_data(uint8_t barrier_data) { _barrier = barrier_data; } 144 145 // Search through memory states which precede this node (load or store). 146 // Look for an exact match for the address, with no intervening 147 // aliased stores. 148 Node* find_previous_store(PhaseTransform* phase); 149 150 // Can this node (load or store) accurately see a stored value in 151 // the given memory state? (The state may or may not be in(Memory).) 152 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 153 set_unaligned_access()154 void set_unaligned_access() { _unaligned_access = true; } is_unaligned_access() const155 bool is_unaligned_access() const { return _unaligned_access; } set_mismatched_access()156 void set_mismatched_access() { _mismatched_access = true; } is_mismatched_access() const157 bool is_mismatched_access() const { return _mismatched_access; } set_unsafe_access()158 void set_unsafe_access() { _unsafe_access = true; } is_unsafe_access() const159 bool is_unsafe_access() const { return _unsafe_access; } 160 161 #ifndef PRODUCT 162 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 163 virtual void dump_spec(outputStream *st) const; 164 #endif 165 }; 166 167 //------------------------------LoadNode--------------------------------------- 168 // Load value; requires Memory and Address 169 class LoadNode : public MemNode { 170 public: 171 // Some loads (from unsafe) should be pinned: they don't depend only 172 // on the dominating test. The field _control_dependency below records 173 // whether that node depends only on the dominating test. 174 // Pinned and UnknownControl are similar, but differ in that Pinned 175 // loads are not allowed to float across safepoints, whereas UnknownControl 176 // loads are allowed to do that. Therefore, Pinned is stricter. 177 enum ControlDependency { 178 Pinned, 179 UnknownControl, 180 DependsOnlyOnTest 181 }; 182 183 private: 184 // LoadNode::hash() doesn't take the _control_dependency field 185 // into account: If the graph already has a non-pinned LoadNode and 186 // we add a pinned LoadNode with the same inputs, it's safe for GVN 187 // to replace the pinned LoadNode with the non-pinned LoadNode, 188 // otherwise it wouldn't be safe to have a non pinned LoadNode with 189 // those inputs in the first place. If the graph already has a 190 // pinned LoadNode and we add a non pinned LoadNode with the same 191 // inputs, it's safe (but suboptimal) for GVN to replace the 192 // non-pinned LoadNode by the pinned LoadNode. 193 ControlDependency _control_dependency; 194 195 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 196 // loads that can be reordered, and such requiring acquire semantics to 197 // adhere to the Java specification. The required behaviour is stored in 198 // this field. 199 const MemOrd _mo; 200 201 AllocateNode* is_new_object_mark_load(PhaseGVN *phase) const; 202 203 protected: 204 virtual bool cmp(const Node &n) const; 205 virtual uint size_of() const; // Size is bigger 206 // Should LoadNode::Ideal() attempt to remove control edges? 207 virtual bool can_remove_control() const; 208 const Type* const _type; // What kind of value is loaded? 209 210 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 211 public: 212 LoadNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * rt,MemOrd mo,ControlDependency control_dependency)213 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 214 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { 215 init_class_id(Class_Load); 216 } is_unordered() const217 inline bool is_unordered() const { return !is_acquire(); } is_acquire() const218 inline bool is_acquire() const { 219 assert(_mo == unordered || _mo == acquire, "unexpected"); 220 return _mo == acquire; 221 } is_unsigned() const222 inline bool is_unsigned() const { 223 int lop = Opcode(); 224 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 225 } 226 227 // Polymorphic factory method: 228 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 229 const TypePtr* at, const Type *rt, BasicType bt, 230 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 231 bool unaligned = false, bool mismatched = false, bool unsafe = false, 232 uint8_t barrier_data = 0); 233 234 virtual uint hash() const; // Check the type 235 236 // Handle algebraic identities here. If we have an identity, return the Node 237 // we are equivalent to. We look for Load of a Store. 238 virtual Node* Identity(PhaseGVN* phase); 239 240 // If the load is from Field memory and the pointer is non-null, it might be possible to 241 // zero out the control input. 242 // If the offset is constant and the base is an object allocation, 243 // try to hook me up to the exact initializing store. 244 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 245 246 // Split instance field load through Phi. 247 Node* split_through_phi(PhaseGVN *phase); 248 249 // Recover original value from boxed values 250 Node *eliminate_autobox(PhaseGVN *phase); 251 252 // Compute a new Type for this node. Basically we just do the pre-check, 253 // then call the virtual add() to set the type. 254 virtual const Type* Value(PhaseGVN* phase) const; 255 256 // Common methods for LoadKlass and LoadNKlass nodes. 257 const Type* klass_value_common(PhaseGVN* phase) const; 258 Node* klass_identity_common(PhaseGVN* phase); 259 260 virtual uint ideal_reg() const; 261 virtual const Type *bottom_type() const; 262 // Following method is copied from TypeNode: set_type(const Type * t)263 void set_type(const Type* t) { 264 assert(t != NULL, "sanity"); 265 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 266 *(const Type**)&_type = t; // cast away const-ness 267 // If this node is in the hash table, make sure it doesn't need a rehash. 268 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 269 } type() const270 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 271 272 // Do not match memory edge 273 virtual uint match_edge(uint idx) const; 274 275 // Map a load opcode to its corresponding store opcode. 276 virtual int store_Opcode() const = 0; 277 278 // Check if the load's memory input is a Phi node with the same control. 279 bool is_instance_field_load_with_local_phi(Node* ctrl); 280 281 Node* convert_to_unsigned_load(PhaseGVN& gvn); 282 Node* convert_to_signed_load(PhaseGVN& gvn); 283 pin()284 void pin() { _control_dependency = Pinned; } has_unknown_control_dependency() const285 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 286 287 #ifndef PRODUCT 288 virtual void dump_spec(outputStream *st) const; 289 #endif 290 #ifdef ASSERT 291 // Helper function to allow a raw load without control edge for some cases 292 static bool is_immutable_value(Node* adr); 293 #endif 294 protected: 295 const Type* load_array_final_field(const TypeKlassPtr *tkls, 296 ciKlass* klass) const; 297 298 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 299 300 // depends_only_on_test is almost always true, and needs to be almost always 301 // true to enable key hoisting & commoning optimizations. However, for the 302 // special case of RawPtr loads from TLS top & end, and other loads performed by 303 // GC barriers, the control edge carries the dependence preventing hoisting past 304 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 305 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 306 // which produce results (new raw memory state) inside of loops preventing all 307 // manner of other optimizations). Basically, it's ugly but so is the alternative. 308 // See comment in macro.cpp, around line 125 expand_allocate_common(). depends_only_on_test() const309 virtual bool depends_only_on_test() const { 310 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 311 } 312 }; 313 314 //------------------------------LoadBNode-------------------------------------- 315 // Load a byte (8bits signed) from memory 316 class LoadBNode : public LoadNode { 317 public: LoadBNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)318 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 319 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 320 virtual int Opcode() const; ideal_reg() const321 virtual uint ideal_reg() const { return Op_RegI; } 322 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 323 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const324 virtual int store_Opcode() const { return Op_StoreB; } memory_type() const325 virtual BasicType memory_type() const { return T_BYTE; } 326 }; 327 328 //------------------------------LoadUBNode------------------------------------- 329 // Load a unsigned byte (8bits unsigned) from memory 330 class LoadUBNode : public LoadNode { 331 public: LoadUBNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)332 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 333 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 334 virtual int Opcode() const; ideal_reg() const335 virtual uint ideal_reg() const { return Op_RegI; } 336 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 337 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const338 virtual int store_Opcode() const { return Op_StoreB; } memory_type() const339 virtual BasicType memory_type() const { return T_BYTE; } 340 }; 341 342 //------------------------------LoadUSNode------------------------------------- 343 // Load an unsigned short/char (16bits unsigned) from memory 344 class LoadUSNode : public LoadNode { 345 public: LoadUSNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)346 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 347 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 348 virtual int Opcode() const; ideal_reg() const349 virtual uint ideal_reg() const { return Op_RegI; } 350 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 351 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const352 virtual int store_Opcode() const { return Op_StoreC; } memory_type() const353 virtual BasicType memory_type() const { return T_CHAR; } 354 }; 355 356 //------------------------------LoadSNode-------------------------------------- 357 // Load a short (16bits signed) from memory 358 class LoadSNode : public LoadNode { 359 public: LoadSNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)360 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 361 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 362 virtual int Opcode() const; ideal_reg() const363 virtual uint ideal_reg() const { return Op_RegI; } 364 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 365 virtual const Type* Value(PhaseGVN* phase) const; store_Opcode() const366 virtual int store_Opcode() const { return Op_StoreC; } memory_type() const367 virtual BasicType memory_type() const { return T_SHORT; } 368 }; 369 370 //------------------------------LoadINode-------------------------------------- 371 // Load an integer from memory 372 class LoadINode : public LoadNode { 373 public: LoadINode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeInt * ti,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)374 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 375 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 376 virtual int Opcode() const; ideal_reg() const377 virtual uint ideal_reg() const { return Op_RegI; } store_Opcode() const378 virtual int store_Opcode() const { return Op_StoreI; } memory_type() const379 virtual BasicType memory_type() const { return T_INT; } 380 }; 381 382 //------------------------------LoadRangeNode---------------------------------- 383 // Load an array length from the array 384 class LoadRangeNode : public LoadINode { 385 public: LoadRangeNode(Node * c,Node * mem,Node * adr,const TypeInt * ti=TypeInt::POS)386 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 387 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 388 virtual int Opcode() const; 389 virtual const Type* Value(PhaseGVN* phase) const; 390 virtual Node* Identity(PhaseGVN* phase); 391 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 392 }; 393 394 //------------------------------LoadLNode-------------------------------------- 395 // Load a long from memory 396 class LoadLNode : public LoadNode { hash() const397 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } cmp(const Node & n) const398 virtual bool cmp( const Node &n ) const { 399 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 400 && LoadNode::cmp(n); 401 } size_of() const402 virtual uint size_of() const { return sizeof(*this); } 403 const bool _require_atomic_access; // is piecewise load forbidden? 404 405 public: LoadLNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeLong * tl,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest,bool require_atomic_access=false)406 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 407 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 408 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 409 virtual int Opcode() const; ideal_reg() const410 virtual uint ideal_reg() const { return Op_RegL; } store_Opcode() const411 virtual int store_Opcode() const { return Op_StoreL; } memory_type() const412 virtual BasicType memory_type() const { return T_LONG; } require_atomic_access() const413 bool require_atomic_access() const { return _require_atomic_access; } 414 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 415 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 416 bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0); 417 #ifndef PRODUCT dump_spec(outputStream * st) const418 virtual void dump_spec(outputStream *st) const { 419 LoadNode::dump_spec(st); 420 if (_require_atomic_access) st->print(" Atomic!"); 421 } 422 #endif 423 }; 424 425 //------------------------------LoadL_unalignedNode---------------------------- 426 // Load a long from unaligned memory 427 class LoadL_unalignedNode : public LoadLNode { 428 public: LoadL_unalignedNode(Node * c,Node * mem,Node * adr,const TypePtr * at,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)429 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 430 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 431 virtual int Opcode() const; 432 }; 433 434 //------------------------------LoadFNode-------------------------------------- 435 // Load a float (64 bits) from memory 436 class LoadFNode : public LoadNode { 437 public: LoadFNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)438 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 439 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 440 virtual int Opcode() const; ideal_reg() const441 virtual uint ideal_reg() const { return Op_RegF; } store_Opcode() const442 virtual int store_Opcode() const { return Op_StoreF; } memory_type() const443 virtual BasicType memory_type() const { return T_FLOAT; } 444 }; 445 446 //------------------------------LoadDNode-------------------------------------- 447 // Load a double (64 bits) from memory 448 class LoadDNode : public LoadNode { hash() const449 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } cmp(const Node & n) const450 virtual bool cmp( const Node &n ) const { 451 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 452 && LoadNode::cmp(n); 453 } size_of() const454 virtual uint size_of() const { return sizeof(*this); } 455 const bool _require_atomic_access; // is piecewise load forbidden? 456 457 public: LoadDNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest,bool require_atomic_access=false)458 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 459 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 460 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 461 virtual int Opcode() const; ideal_reg() const462 virtual uint ideal_reg() const { return Op_RegD; } store_Opcode() const463 virtual int store_Opcode() const { return Op_StoreD; } memory_type() const464 virtual BasicType memory_type() const { return T_DOUBLE; } require_atomic_access() const465 bool require_atomic_access() const { return _require_atomic_access; } 466 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 467 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 468 bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0); 469 #ifndef PRODUCT dump_spec(outputStream * st) const470 virtual void dump_spec(outputStream *st) const { 471 LoadNode::dump_spec(st); 472 if (_require_atomic_access) st->print(" Atomic!"); 473 } 474 #endif 475 }; 476 477 //------------------------------LoadD_unalignedNode---------------------------- 478 // Load a double from unaligned memory 479 class LoadD_unalignedNode : public LoadDNode { 480 public: LoadD_unalignedNode(Node * c,Node * mem,Node * adr,const TypePtr * at,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)481 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 482 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 483 virtual int Opcode() const; 484 }; 485 486 //------------------------------LoadPNode-------------------------------------- 487 // Load a pointer from memory (either object or array) 488 class LoadPNode : public LoadNode { 489 public: LoadPNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypePtr * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)490 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 491 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 492 virtual int Opcode() const; ideal_reg() const493 virtual uint ideal_reg() const { return Op_RegP; } store_Opcode() const494 virtual int store_Opcode() const { return Op_StoreP; } memory_type() const495 virtual BasicType memory_type() const { return T_ADDRESS; } 496 }; 497 498 499 //------------------------------LoadNNode-------------------------------------- 500 // Load a narrow oop from memory (either object or array) 501 class LoadNNode : public LoadNode { 502 public: LoadNNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const Type * t,MemOrd mo,ControlDependency control_dependency=DependsOnlyOnTest)503 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 504 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 505 virtual int Opcode() const; ideal_reg() const506 virtual uint ideal_reg() const { return Op_RegN; } store_Opcode() const507 virtual int store_Opcode() const { return Op_StoreN; } memory_type() const508 virtual BasicType memory_type() const { return T_NARROWOOP; } 509 }; 510 511 //------------------------------LoadKlassNode---------------------------------- 512 // Load a Klass from an object 513 class LoadKlassNode : public LoadPNode { 514 protected: 515 // In most cases, LoadKlassNode does not have the control input set. If the control 516 // input is set, it must not be removed (by LoadNode::Ideal()). 517 virtual bool can_remove_control() const; 518 public: LoadKlassNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeKlassPtr * tk,MemOrd mo)519 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 520 : LoadPNode(c, mem, adr, at, tk, mo) {} 521 virtual int Opcode() const; 522 virtual const Type* Value(PhaseGVN* phase) const; 523 virtual Node* Identity(PhaseGVN* phase); depends_only_on_test() const524 virtual bool depends_only_on_test() const { return true; } 525 526 // Polymorphic factory method: 527 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 528 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 529 }; 530 531 //------------------------------LoadNKlassNode--------------------------------- 532 // Load a narrow Klass from an object. 533 class LoadNKlassNode : public LoadNNode { 534 public: LoadNKlassNode(Node * c,Node * mem,Node * adr,const TypePtr * at,const TypeNarrowKlass * tk,MemOrd mo)535 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 536 : LoadNNode(c, mem, adr, at, tk, mo) {} 537 virtual int Opcode() const; ideal_reg() const538 virtual uint ideal_reg() const { return Op_RegN; } store_Opcode() const539 virtual int store_Opcode() const { return Op_StoreNKlass; } memory_type() const540 virtual BasicType memory_type() const { return T_NARROWKLASS; } 541 542 virtual const Type* Value(PhaseGVN* phase) const; 543 virtual Node* Identity(PhaseGVN* phase); depends_only_on_test() const544 virtual bool depends_only_on_test() const { return true; } 545 }; 546 547 548 //------------------------------StoreNode-------------------------------------- 549 // Store value; requires Store, Address and Value 550 class StoreNode : public MemNode { 551 private: 552 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 553 // stores that can be reordered, and such requiring release semantics to 554 // adhere to the Java specification. The required behaviour is stored in 555 // this field. 556 const MemOrd _mo; 557 // Needed for proper cloning. size_of() const558 virtual uint size_of() const { return sizeof(*this); } 559 protected: 560 virtual bool cmp( const Node &n ) const; depends_only_on_test() const561 virtual bool depends_only_on_test() const { return false; } 562 563 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 564 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 565 566 public: 567 // We must ensure that stores of object references will be visible 568 // only after the object's initialization. So the callers of this 569 // procedure must indicate that the store requires `release' 570 // semantics, if the stored value is an object reference that might 571 // point to a new object and may become externally visible. StoreNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)572 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 573 : MemNode(c, mem, adr, at, val), _mo(mo) { 574 init_class_id(Class_Store); 575 } StoreNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,Node * oop_store,MemOrd mo)576 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 577 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 578 init_class_id(Class_Store); 579 } 580 is_unordered() const581 inline bool is_unordered() const { return !is_release(); } is_release() const582 inline bool is_release() const { 583 assert((_mo == unordered || _mo == release), "unexpected"); 584 return _mo == release; 585 } 586 587 // Conservatively release stores of object references in order to 588 // ensure visibility of object initialization. release_if_reference(const BasicType t)589 static inline MemOrd release_if_reference(const BasicType t) { 590 #ifdef AARCH64 591 // AArch64 doesn't need a release store here because object 592 // initialization contains the necessary barriers. 593 return unordered; 594 #else 595 const MemOrd mo = (t == T_ARRAY || 596 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 597 t == T_OBJECT) ? release : unordered; 598 return mo; 599 #endif 600 } 601 602 // Polymorphic factory method 603 // 604 // We must ensure that stores of object references will be visible 605 // only after the object's initialization. So the callers of this 606 // procedure must indicate that the store requires `release' 607 // semantics, if the stored value is an object reference that might 608 // point to a new object and may become externally visible. 609 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 610 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 611 612 virtual uint hash() const; // Check the type 613 614 // If the store is to Field memory and the pointer is non-null, we can 615 // zero out the control input. 616 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 617 618 // Compute a new Type for this node. Basically we just do the pre-check, 619 // then call the virtual add() to set the type. 620 virtual const Type* Value(PhaseGVN* phase) const; 621 622 // Check for identity function on memory (Load then Store at same address) 623 virtual Node* Identity(PhaseGVN* phase); 624 625 // Do not match memory edge 626 virtual uint match_edge(uint idx) const; 627 628 virtual const Type *bottom_type() const; // returns Type::MEMORY 629 630 // Map a store opcode to its corresponding own opcode, trivially. store_Opcode() const631 virtual int store_Opcode() const { return Opcode(); } 632 633 // have all possible loads of the value stored been optimized away? 634 bool value_never_loaded(PhaseTransform *phase) const; 635 636 MemBarNode* trailing_membar() const; 637 }; 638 639 //------------------------------StoreBNode------------------------------------- 640 // Store byte to memory 641 class StoreBNode : public StoreNode { 642 public: StoreBNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)643 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 644 : StoreNode(c, mem, adr, at, val, mo) {} 645 virtual int Opcode() const; 646 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); memory_type() const647 virtual BasicType memory_type() const { return T_BYTE; } 648 }; 649 650 //------------------------------StoreCNode------------------------------------- 651 // Store char/short to memory 652 class StoreCNode : public StoreNode { 653 public: StoreCNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)654 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 655 : StoreNode(c, mem, adr, at, val, mo) {} 656 virtual int Opcode() const; 657 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); memory_type() const658 virtual BasicType memory_type() const { return T_CHAR; } 659 }; 660 661 //------------------------------StoreINode------------------------------------- 662 // Store int to memory 663 class StoreINode : public StoreNode { 664 public: StoreINode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)665 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 666 : StoreNode(c, mem, adr, at, val, mo) {} 667 virtual int Opcode() const; memory_type() const668 virtual BasicType memory_type() const { return T_INT; } 669 }; 670 671 //------------------------------StoreLNode------------------------------------- 672 // Store long to memory 673 class StoreLNode : public StoreNode { hash() const674 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } cmp(const Node & n) const675 virtual bool cmp( const Node &n ) const { 676 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 677 && StoreNode::cmp(n); 678 } size_of() const679 virtual uint size_of() const { return sizeof(*this); } 680 const bool _require_atomic_access; // is piecewise store forbidden? 681 682 public: StoreLNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo,bool require_atomic_access=false)683 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 684 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 685 virtual int Opcode() const; memory_type() const686 virtual BasicType memory_type() const { return T_LONG; } require_atomic_access() const687 bool require_atomic_access() const { return _require_atomic_access; } 688 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 689 #ifndef PRODUCT dump_spec(outputStream * st) const690 virtual void dump_spec(outputStream *st) const { 691 StoreNode::dump_spec(st); 692 if (_require_atomic_access) st->print(" Atomic!"); 693 } 694 #endif 695 }; 696 697 //------------------------------StoreFNode------------------------------------- 698 // Store float to memory 699 class StoreFNode : public StoreNode { 700 public: StoreFNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)701 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 702 : StoreNode(c, mem, adr, at, val, mo) {} 703 virtual int Opcode() const; memory_type() const704 virtual BasicType memory_type() const { return T_FLOAT; } 705 }; 706 707 //------------------------------StoreDNode------------------------------------- 708 // Store double to memory 709 class StoreDNode : public StoreNode { hash() const710 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } cmp(const Node & n) const711 virtual bool cmp( const Node &n ) const { 712 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 713 && StoreNode::cmp(n); 714 } size_of() const715 virtual uint size_of() const { return sizeof(*this); } 716 const bool _require_atomic_access; // is piecewise store forbidden? 717 public: StoreDNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo,bool require_atomic_access=false)718 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 719 MemOrd mo, bool require_atomic_access = false) 720 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 721 virtual int Opcode() const; memory_type() const722 virtual BasicType memory_type() const { return T_DOUBLE; } require_atomic_access() const723 bool require_atomic_access() const { return _require_atomic_access; } 724 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 725 #ifndef PRODUCT dump_spec(outputStream * st) const726 virtual void dump_spec(outputStream *st) const { 727 StoreNode::dump_spec(st); 728 if (_require_atomic_access) st->print(" Atomic!"); 729 } 730 #endif 731 732 }; 733 734 //------------------------------StorePNode------------------------------------- 735 // Store pointer to memory 736 class StorePNode : public StoreNode { 737 public: StorePNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)738 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 739 : StoreNode(c, mem, adr, at, val, mo) {} 740 virtual int Opcode() const; memory_type() const741 virtual BasicType memory_type() const { return T_ADDRESS; } 742 }; 743 744 //------------------------------StoreNNode------------------------------------- 745 // Store narrow oop to memory 746 class StoreNNode : public StoreNode { 747 public: StoreNNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)748 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 749 : StoreNode(c, mem, adr, at, val, mo) {} 750 virtual int Opcode() const; memory_type() const751 virtual BasicType memory_type() const { return T_NARROWOOP; } 752 }; 753 754 //------------------------------StoreNKlassNode-------------------------------------- 755 // Store narrow klass to memory 756 class StoreNKlassNode : public StoreNNode { 757 public: StoreNKlassNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,MemOrd mo)758 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 759 : StoreNNode(c, mem, adr, at, val, mo) {} 760 virtual int Opcode() const; memory_type() const761 virtual BasicType memory_type() const { return T_NARROWKLASS; } 762 }; 763 764 //------------------------------StoreCMNode----------------------------------- 765 // Store card-mark byte to memory for CM 766 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 767 // Preceeding equivalent StoreCMs may be eliminated. 768 class StoreCMNode : public StoreNode { 769 private: hash() const770 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } cmp(const Node & n) const771 virtual bool cmp( const Node &n ) const { 772 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 773 && StoreNode::cmp(n); 774 } size_of() const775 virtual uint size_of() const { return sizeof(*this); } 776 int _oop_alias_idx; // The alias_idx of OopStore 777 778 public: StoreCMNode(Node * c,Node * mem,Node * adr,const TypePtr * at,Node * val,Node * oop_store,int oop_alias_idx)779 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 780 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 781 _oop_alias_idx(oop_alias_idx) { 782 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 783 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 784 "bad oop alias idx"); 785 } 786 virtual int Opcode() const; 787 virtual Node* Identity(PhaseGVN* phase); 788 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 789 virtual const Type* Value(PhaseGVN* phase) const; memory_type() const790 virtual BasicType memory_type() const { return T_VOID; } // unspecific oop_alias_idx() const791 int oop_alias_idx() const { return _oop_alias_idx; } 792 }; 793 794 //------------------------------LoadPLockedNode--------------------------------- 795 // Load-locked a pointer from memory (either object or array). 796 // On Sparc & Intel this is implemented as a normal pointer load. 797 // On PowerPC and friends it's a real load-locked. 798 class LoadPLockedNode : public LoadPNode { 799 public: LoadPLockedNode(Node * c,Node * mem,Node * adr,MemOrd mo)800 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 801 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 802 virtual int Opcode() const; store_Opcode() const803 virtual int store_Opcode() const { return Op_StorePConditional; } depends_only_on_test() const804 virtual bool depends_only_on_test() const { return true; } 805 }; 806 807 //------------------------------SCMemProjNode--------------------------------------- 808 // This class defines a projection of the memory state of a store conditional node. 809 // These nodes return a value, but also update memory. 810 class SCMemProjNode : public ProjNode { 811 public: 812 enum {SCMEMPROJCON = (uint)-2}; SCMemProjNode(Node * src)813 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 814 virtual int Opcode() const; is_CFG() const815 virtual bool is_CFG() const { return false; } bottom_type() const816 virtual const Type *bottom_type() const {return Type::MEMORY;} adr_type() const817 virtual const TypePtr *adr_type() const { 818 Node* ctrl = in(0); 819 if (ctrl == NULL) return NULL; // node is dead 820 return ctrl->in(MemNode::Memory)->adr_type(); 821 } ideal_reg() const822 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 823 virtual const Type* Value(PhaseGVN* phase) const; 824 #ifndef PRODUCT dump_spec(outputStream * st) const825 virtual void dump_spec(outputStream *st) const {}; 826 #endif 827 }; 828 829 //------------------------------LoadStoreNode--------------------------- 830 // Note: is_Mem() method returns 'true' for this class. 831 class LoadStoreNode : public Node { 832 private: 833 const Type* const _type; // What kind of value is loaded? 834 const TypePtr* _adr_type; // What kind of memory is being addressed? 835 uint8_t _barrier; // Bit field with barrier information 836 virtual uint size_of() const; // Size is bigger 837 public: 838 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); depends_only_on_test() const839 virtual bool depends_only_on_test() const { return false; } match_edge(uint idx) const840 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 841 bottom_type() const842 virtual const Type *bottom_type() const { return _type; } 843 virtual uint ideal_reg() const; adr_type() const844 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 845 846 bool result_not_used() const; 847 MemBarNode* trailing_membar() const; 848 barrier_data()849 uint8_t barrier_data() { return _barrier; } set_barrier_data(uint8_t barrier_data)850 void set_barrier_data(uint8_t barrier_data) { _barrier = barrier_data; } 851 }; 852 853 class LoadStoreConditionalNode : public LoadStoreNode { 854 public: 855 enum { 856 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 857 }; 858 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 859 }; 860 861 //------------------------------StorePConditionalNode--------------------------- 862 // Conditionally store pointer to memory, if no change since prior 863 // load-locked. Sets flags for success or failure of the store. 864 class StorePConditionalNode : public LoadStoreConditionalNode { 865 public: StorePConditionalNode(Node * c,Node * mem,Node * adr,Node * val,Node * ll)866 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 867 virtual int Opcode() const; 868 // Produces flags ideal_reg() const869 virtual uint ideal_reg() const { return Op_RegFlags; } 870 }; 871 872 //------------------------------StoreIConditionalNode--------------------------- 873 // Conditionally store int to memory, if no change since prior 874 // load-locked. Sets flags for success or failure of the store. 875 class StoreIConditionalNode : public LoadStoreConditionalNode { 876 public: StoreIConditionalNode(Node * c,Node * mem,Node * adr,Node * val,Node * ii)877 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 878 virtual int Opcode() const; 879 // Produces flags ideal_reg() const880 virtual uint ideal_reg() const { return Op_RegFlags; } 881 }; 882 883 //------------------------------StoreLConditionalNode--------------------------- 884 // Conditionally store long to memory, if no change since prior 885 // load-locked. Sets flags for success or failure of the store. 886 class StoreLConditionalNode : public LoadStoreConditionalNode { 887 public: StoreLConditionalNode(Node * c,Node * mem,Node * adr,Node * val,Node * ll)888 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 889 virtual int Opcode() const; 890 // Produces flags ideal_reg() const891 virtual uint ideal_reg() const { return Op_RegFlags; } 892 }; 893 894 class CompareAndSwapNode : public LoadStoreConditionalNode { 895 private: 896 const MemNode::MemOrd _mem_ord; 897 public: CompareAndSwapNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)898 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} order() const899 MemNode::MemOrd order() const { 900 return _mem_ord; 901 } size_of() const902 virtual uint size_of() const { return sizeof(*this); } 903 }; 904 905 class CompareAndExchangeNode : public LoadStoreNode { 906 private: 907 const MemNode::MemOrd _mem_ord; 908 public: 909 enum { 910 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 911 }; CompareAndExchangeNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord,const TypePtr * at,const Type * t)912 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 913 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 914 init_req(ExpectedIn, ex ); 915 } 916 order() const917 MemNode::MemOrd order() const { 918 return _mem_ord; 919 } size_of() const920 virtual uint size_of() const { return sizeof(*this); } 921 }; 922 923 //------------------------------CompareAndSwapBNode--------------------------- 924 class CompareAndSwapBNode : public CompareAndSwapNode { 925 public: CompareAndSwapBNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)926 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 927 virtual int Opcode() const; 928 }; 929 930 //------------------------------CompareAndSwapSNode--------------------------- 931 class CompareAndSwapSNode : public CompareAndSwapNode { 932 public: CompareAndSwapSNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)933 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 934 virtual int Opcode() const; 935 }; 936 937 //------------------------------CompareAndSwapINode--------------------------- 938 class CompareAndSwapINode : public CompareAndSwapNode { 939 public: CompareAndSwapINode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)940 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 941 virtual int Opcode() const; 942 }; 943 944 //------------------------------CompareAndSwapLNode--------------------------- 945 class CompareAndSwapLNode : public CompareAndSwapNode { 946 public: CompareAndSwapLNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)947 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 948 virtual int Opcode() const; 949 }; 950 951 //------------------------------CompareAndSwapPNode--------------------------- 952 class CompareAndSwapPNode : public CompareAndSwapNode { 953 public: CompareAndSwapPNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)954 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 955 virtual int Opcode() const; 956 }; 957 958 //------------------------------CompareAndSwapNNode--------------------------- 959 class CompareAndSwapNNode : public CompareAndSwapNode { 960 public: CompareAndSwapNNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)961 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 962 virtual int Opcode() const; 963 }; 964 965 //------------------------------WeakCompareAndSwapBNode--------------------------- 966 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 967 public: WeakCompareAndSwapBNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)968 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 969 virtual int Opcode() const; 970 }; 971 972 //------------------------------WeakCompareAndSwapSNode--------------------------- 973 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 974 public: WeakCompareAndSwapSNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)975 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 976 virtual int Opcode() const; 977 }; 978 979 //------------------------------WeakCompareAndSwapINode--------------------------- 980 class WeakCompareAndSwapINode : public CompareAndSwapNode { 981 public: WeakCompareAndSwapINode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)982 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 983 virtual int Opcode() const; 984 }; 985 986 //------------------------------WeakCompareAndSwapLNode--------------------------- 987 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 988 public: WeakCompareAndSwapLNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)989 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 990 virtual int Opcode() const; 991 }; 992 993 //------------------------------WeakCompareAndSwapPNode--------------------------- 994 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 995 public: WeakCompareAndSwapPNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)996 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 997 virtual int Opcode() const; 998 }; 999 1000 //------------------------------WeakCompareAndSwapNNode--------------------------- 1001 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 1002 public: WeakCompareAndSwapNNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,MemNode::MemOrd mem_ord)1003 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1004 virtual int Opcode() const; 1005 }; 1006 1007 //------------------------------CompareAndExchangeBNode--------------------------- 1008 class CompareAndExchangeBNode : public CompareAndExchangeNode { 1009 public: CompareAndExchangeBNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1010 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 1011 virtual int Opcode() const; 1012 }; 1013 1014 1015 //------------------------------CompareAndExchangeSNode--------------------------- 1016 class CompareAndExchangeSNode : public CompareAndExchangeNode { 1017 public: CompareAndExchangeSNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1018 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 1019 virtual int Opcode() const; 1020 }; 1021 1022 //------------------------------CompareAndExchangeLNode--------------------------- 1023 class CompareAndExchangeLNode : public CompareAndExchangeNode { 1024 public: CompareAndExchangeLNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1025 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 1026 virtual int Opcode() const; 1027 }; 1028 1029 1030 //------------------------------CompareAndExchangeINode--------------------------- 1031 class CompareAndExchangeINode : public CompareAndExchangeNode { 1032 public: CompareAndExchangeINode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,MemNode::MemOrd mem_ord)1033 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 1034 virtual int Opcode() const; 1035 }; 1036 1037 1038 //------------------------------CompareAndExchangePNode--------------------------- 1039 class CompareAndExchangePNode : public CompareAndExchangeNode { 1040 public: CompareAndExchangePNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,const Type * t,MemNode::MemOrd mem_ord)1041 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1042 virtual int Opcode() const; 1043 }; 1044 1045 //------------------------------CompareAndExchangeNNode--------------------------- 1046 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1047 public: CompareAndExchangeNNode(Node * c,Node * mem,Node * adr,Node * val,Node * ex,const TypePtr * at,const Type * t,MemNode::MemOrd mem_ord)1048 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1049 virtual int Opcode() const; 1050 }; 1051 1052 //------------------------------GetAndAddBNode--------------------------- 1053 class GetAndAddBNode : public LoadStoreNode { 1054 public: GetAndAddBNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1055 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1056 virtual int Opcode() const; 1057 }; 1058 1059 //------------------------------GetAndAddSNode--------------------------- 1060 class GetAndAddSNode : public LoadStoreNode { 1061 public: GetAndAddSNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1062 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1063 virtual int Opcode() const; 1064 }; 1065 1066 //------------------------------GetAndAddINode--------------------------- 1067 class GetAndAddINode : public LoadStoreNode { 1068 public: GetAndAddINode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1069 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1070 virtual int Opcode() const; 1071 }; 1072 1073 //------------------------------GetAndAddLNode--------------------------- 1074 class GetAndAddLNode : public LoadStoreNode { 1075 public: GetAndAddLNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1076 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1077 virtual int Opcode() const; 1078 }; 1079 1080 //------------------------------GetAndSetBNode--------------------------- 1081 class GetAndSetBNode : public LoadStoreNode { 1082 public: GetAndSetBNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1083 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1084 virtual int Opcode() const; 1085 }; 1086 1087 //------------------------------GetAndSetSNode--------------------------- 1088 class GetAndSetSNode : public LoadStoreNode { 1089 public: GetAndSetSNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1090 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1091 virtual int Opcode() const; 1092 }; 1093 1094 //------------------------------GetAndSetINode--------------------------- 1095 class GetAndSetINode : public LoadStoreNode { 1096 public: GetAndSetINode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1097 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1098 virtual int Opcode() const; 1099 }; 1100 1101 //------------------------------GetAndSetLNode--------------------------- 1102 class GetAndSetLNode : public LoadStoreNode { 1103 public: GetAndSetLNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at)1104 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1105 virtual int Opcode() const; 1106 }; 1107 1108 //------------------------------GetAndSetPNode--------------------------- 1109 class GetAndSetPNode : public LoadStoreNode { 1110 public: GetAndSetPNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at,const Type * t)1111 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1112 virtual int Opcode() const; 1113 }; 1114 1115 //------------------------------GetAndSetNNode--------------------------- 1116 class GetAndSetNNode : public LoadStoreNode { 1117 public: GetAndSetNNode(Node * c,Node * mem,Node * adr,Node * val,const TypePtr * at,const Type * t)1118 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1119 virtual int Opcode() const; 1120 }; 1121 1122 //------------------------------ClearArray------------------------------------- 1123 class ClearArrayNode: public Node { 1124 private: 1125 bool _is_large; 1126 public: ClearArrayNode(Node * ctrl,Node * arymem,Node * word_cnt,Node * base,bool is_large)1127 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1128 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1129 init_class_id(Class_ClearArray); 1130 } 1131 virtual int Opcode() const; bottom_type() const1132 virtual const Type *bottom_type() const { return Type::MEMORY; } 1133 // ClearArray modifies array elements, and so affects only the 1134 // array memory addressed by the bottom_type of its base address. 1135 virtual const class TypePtr *adr_type() const; 1136 virtual Node* Identity(PhaseGVN* phase); 1137 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1138 virtual uint match_edge(uint idx) const; is_large() const1139 bool is_large() const { return _is_large; } 1140 1141 // Clear the given area of an object or array. 1142 // The start offset must always be aligned mod BytesPerInt. 1143 // The end offset must always be aligned mod BytesPerLong. 1144 // Return the new memory. 1145 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1146 intptr_t start_offset, 1147 intptr_t end_offset, 1148 PhaseGVN* phase); 1149 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1150 intptr_t start_offset, 1151 Node* end_offset, 1152 PhaseGVN* phase); 1153 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1154 Node* start_offset, 1155 Node* end_offset, 1156 PhaseGVN* phase); 1157 // Return allocation input memory edge if it is different instance 1158 // or itself if it is the one we are looking for. 1159 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1160 }; 1161 1162 //------------------------------MemBar----------------------------------------- 1163 // There are different flavors of Memory Barriers to match the Java Memory 1164 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1165 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1166 // volatile-load. Monitor-exit and volatile-store act as Release: no 1167 // preceding ref can be moved to after them. We insert a MemBar-Release 1168 // before a FastUnlock or volatile-store. All volatiles need to be 1169 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1170 // separate it from any following volatile-load. 1171 class MemBarNode: public MultiNode { 1172 virtual uint hash() const ; // { return NO_HASH; } 1173 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1174 size_of() const1175 virtual uint size_of() const { return sizeof(*this); } 1176 // Memory type this node is serializing. Usually either rawptr or bottom. 1177 const TypePtr* _adr_type; 1178 1179 // How is this membar related to a nearby memory access? 1180 enum { 1181 Standalone, 1182 TrailingLoad, 1183 TrailingStore, 1184 LeadingStore, 1185 TrailingLoadStore, 1186 LeadingLoadStore 1187 } _kind; 1188 1189 #ifdef ASSERT 1190 uint _pair_idx; 1191 #endif 1192 1193 public: 1194 enum { 1195 Precedent = TypeFunc::Parms // optional edge to force precedence 1196 }; 1197 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1198 virtual int Opcode() const = 0; adr_type() const1199 virtual const class TypePtr *adr_type() const { return _adr_type; } 1200 virtual const Type* Value(PhaseGVN* phase) const; 1201 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); match_edge(uint idx) const1202 virtual uint match_edge(uint idx) const { return 0; } bottom_type() const1203 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1204 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1205 // Factory method. Builds a wide or narrow membar. 1206 // Optional 'precedent' becomes an extra edge if not null. 1207 static MemBarNode* make(Compile* C, int opcode, 1208 int alias_idx = Compile::AliasIdxBot, 1209 Node* precedent = NULL); 1210 1211 MemBarNode* trailing_membar() const; 1212 MemBarNode* leading_membar() const; 1213 set_trailing_load()1214 void set_trailing_load() { _kind = TrailingLoad; } trailing_load() const1215 bool trailing_load() const { return _kind == TrailingLoad; } trailing_store() const1216 bool trailing_store() const { return _kind == TrailingStore; } leading_store() const1217 bool leading_store() const { return _kind == LeadingStore; } trailing_load_store() const1218 bool trailing_load_store() const { return _kind == TrailingLoadStore; } leading_load_store() const1219 bool leading_load_store() const { return _kind == LeadingLoadStore; } trailing() const1220 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } leading() const1221 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } standalone() const1222 bool standalone() const { return _kind == Standalone; } 1223 1224 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1225 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1226 1227 void remove(PhaseIterGVN *igvn); 1228 }; 1229 1230 // "Acquire" - no following ref can move before (but earlier refs can 1231 // follow, like an early Load stalled in cache). Requires multi-cpu 1232 // visibility. Inserted after a volatile load. 1233 class MemBarAcquireNode: public MemBarNode { 1234 public: MemBarAcquireNode(Compile * C,int alias_idx,Node * precedent)1235 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1236 : MemBarNode(C, alias_idx, precedent) {} 1237 virtual int Opcode() const; 1238 }; 1239 1240 // "Acquire" - no following ref can move before (but earlier refs can 1241 // follow, like an early Load stalled in cache). Requires multi-cpu 1242 // visibility. Inserted independ of any load, as required 1243 // for intrinsic Unsafe.loadFence(). 1244 class LoadFenceNode: public MemBarNode { 1245 public: LoadFenceNode(Compile * C,int alias_idx,Node * precedent)1246 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1247 : MemBarNode(C, alias_idx, precedent) {} 1248 virtual int Opcode() const; 1249 }; 1250 1251 // "Release" - no earlier ref can move after (but later refs can move 1252 // up, like a speculative pipelined cache-hitting Load). Requires 1253 // multi-cpu visibility. Inserted before a volatile store. 1254 class MemBarReleaseNode: public MemBarNode { 1255 public: MemBarReleaseNode(Compile * C,int alias_idx,Node * precedent)1256 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1257 : MemBarNode(C, alias_idx, precedent) {} 1258 virtual int Opcode() const; 1259 }; 1260 1261 // "Release" - no earlier ref can move after (but later refs can move 1262 // up, like a speculative pipelined cache-hitting Load). Requires 1263 // multi-cpu visibility. Inserted independent of any store, as required 1264 // for intrinsic Unsafe.storeFence(). 1265 class StoreFenceNode: public MemBarNode { 1266 public: StoreFenceNode(Compile * C,int alias_idx,Node * precedent)1267 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1268 : MemBarNode(C, alias_idx, precedent) {} 1269 virtual int Opcode() const; 1270 }; 1271 1272 // "Acquire" - no following ref can move before (but earlier refs can 1273 // follow, like an early Load stalled in cache). Requires multi-cpu 1274 // visibility. Inserted after a FastLock. 1275 class MemBarAcquireLockNode: public MemBarNode { 1276 public: MemBarAcquireLockNode(Compile * C,int alias_idx,Node * precedent)1277 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1278 : MemBarNode(C, alias_idx, precedent) {} 1279 virtual int Opcode() const; 1280 }; 1281 1282 // "Release" - no earlier ref can move after (but later refs can move 1283 // up, like a speculative pipelined cache-hitting Load). Requires 1284 // multi-cpu visibility. Inserted before a FastUnLock. 1285 class MemBarReleaseLockNode: public MemBarNode { 1286 public: MemBarReleaseLockNode(Compile * C,int alias_idx,Node * precedent)1287 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1288 : MemBarNode(C, alias_idx, precedent) {} 1289 virtual int Opcode() const; 1290 }; 1291 1292 class MemBarStoreStoreNode: public MemBarNode { 1293 public: MemBarStoreStoreNode(Compile * C,int alias_idx,Node * precedent)1294 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1295 : MemBarNode(C, alias_idx, precedent) { 1296 init_class_id(Class_MemBarStoreStore); 1297 } 1298 virtual int Opcode() const; 1299 }; 1300 1301 // Ordering between a volatile store and a following volatile load. 1302 // Requires multi-CPU visibility? 1303 class MemBarVolatileNode: public MemBarNode { 1304 public: MemBarVolatileNode(Compile * C,int alias_idx,Node * precedent)1305 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1306 : MemBarNode(C, alias_idx, precedent) {} 1307 virtual int Opcode() const; 1308 }; 1309 1310 // Ordering within the same CPU. Used to order unsafe memory references 1311 // inside the compiler when we lack alias info. Not needed "outside" the 1312 // compiler because the CPU does all the ordering for us. 1313 class MemBarCPUOrderNode: public MemBarNode { 1314 public: MemBarCPUOrderNode(Compile * C,int alias_idx,Node * precedent)1315 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1316 : MemBarNode(C, alias_idx, precedent) {} 1317 virtual int Opcode() const; ideal_reg() const1318 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1319 }; 1320 1321 class OnSpinWaitNode: public MemBarNode { 1322 public: OnSpinWaitNode(Compile * C,int alias_idx,Node * precedent)1323 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1324 : MemBarNode(C, alias_idx, precedent) {} 1325 virtual int Opcode() const; 1326 }; 1327 1328 // Isolation of object setup after an AllocateNode and before next safepoint. 1329 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1330 class InitializeNode: public MemBarNode { 1331 friend class AllocateNode; 1332 1333 enum { 1334 Incomplete = 0, 1335 Complete = 1, 1336 WithArraycopy = 2 1337 }; 1338 int _is_complete; 1339 1340 bool _does_not_escape; 1341 1342 public: 1343 enum { 1344 Control = TypeFunc::Control, 1345 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1346 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1347 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1348 }; 1349 1350 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1351 virtual int Opcode() const; size_of() const1352 virtual uint size_of() const { return sizeof(*this); } ideal_reg() const1353 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1354 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1355 1356 // Manage incoming memory edges via a MergeMem on in(Memory): 1357 Node* memory(uint alias_idx); 1358 1359 // The raw memory edge coming directly from the Allocation. 1360 // The contents of this memory are *always* all-zero-bits. zero_memory()1361 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1362 1363 // Return the corresponding allocation for this initialization (or null if none). 1364 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1365 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1366 AllocateNode* allocation(); 1367 1368 // Anything other than zeroing in this init? 1369 bool is_non_zero(); 1370 1371 // An InitializeNode must completed before macro expansion is done. 1372 // Completion requires that the AllocateNode must be followed by 1373 // initialization of the new memory to zero, then to any initializers. is_complete()1374 bool is_complete() { return _is_complete != Incomplete; } is_complete_with_arraycopy()1375 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1376 1377 // Mark complete. (Must not yet be complete.) 1378 void set_complete(PhaseGVN* phase); set_complete_with_arraycopy()1379 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1380 does_not_escape()1381 bool does_not_escape() { return _does_not_escape; } set_does_not_escape()1382 void set_does_not_escape() { _does_not_escape = true; } 1383 1384 #ifdef ASSERT 1385 // ensure all non-degenerate stores are ordered and non-overlapping 1386 bool stores_are_sane(PhaseTransform* phase); 1387 #endif //ASSERT 1388 1389 // See if this store can be captured; return offset where it initializes. 1390 // Return 0 if the store cannot be moved (any sort of problem). 1391 intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape); 1392 1393 // Capture another store; reformat it to write my internal raw memory. 1394 // Return the captured copy, else NULL if there is some sort of problem. 1395 Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape); 1396 1397 // Find captured store which corresponds to the range [start..start+size). 1398 // Return my own memory projection (meaning the initial zero bits) 1399 // if there is no such store. Return NULL if there is a problem. 1400 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1401 1402 // Called when the associated AllocateNode is expanded into CFG. 1403 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1404 intptr_t header_size, Node* size_in_bytes, 1405 PhaseIterGVN* phase); 1406 1407 private: 1408 void remove_extra_zeroes(); 1409 1410 // Find out where a captured store should be placed (or already is placed). 1411 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1412 PhaseTransform* phase); 1413 1414 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1415 1416 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1417 1418 bool detect_init_independence(Node* value, PhaseGVN* phase); 1419 1420 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1421 PhaseGVN* phase); 1422 1423 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1424 }; 1425 1426 //------------------------------MergeMem--------------------------------------- 1427 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1428 class MergeMemNode: public Node { 1429 virtual uint hash() const ; // { return NO_HASH; } 1430 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1431 friend class MergeMemStream; 1432 MergeMemNode(Node* def); // clients use MergeMemNode::make 1433 1434 public: 1435 // If the input is a whole memory state, clone it with all its slices intact. 1436 // Otherwise, make a new memory state with just that base memory input. 1437 // In either case, the result is a newly created MergeMem. 1438 static MergeMemNode* make(Node* base_memory); 1439 1440 virtual int Opcode() const; 1441 virtual Node* Identity(PhaseGVN* phase); 1442 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); ideal_reg() const1443 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1444 virtual uint match_edge(uint idx) const { return 0; } 1445 virtual const RegMask &out_RegMask() const; bottom_type() const1446 virtual const Type *bottom_type() const { return Type::MEMORY; } adr_type() const1447 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1448 // sparse accessors 1449 // Fetch the previously stored "set_memory_at", or else the base memory. 1450 // (Caller should clone it if it is a phi-nest.) 1451 Node* memory_at(uint alias_idx) const; 1452 // set the memory, regardless of its previous value 1453 void set_memory_at(uint alias_idx, Node* n); 1454 // the "base" is the memory that provides the non-finite support base_memory() const1455 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1456 // warning: setting the base can implicitly set any of the other slices too 1457 void set_base_memory(Node* def); 1458 // sentinel value which denotes a copy of the base memory: empty_memory() const1459 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1460 static Node* make_empty_memory(); // where the sentinel comes from is_empty_memory(Node * n) const1461 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1462 // hook for the iterator, to perform any necessary setup 1463 void iteration_setup(const MergeMemNode* other = NULL); 1464 // push sentinels until I am at least as long as the other (semantic no-op) 1465 void grow_to_match(const MergeMemNode* other); 1466 bool verify_sparse() const PRODUCT_RETURN0; 1467 #ifndef PRODUCT 1468 virtual void dump_spec(outputStream *st) const; 1469 #endif 1470 }; 1471 1472 class MergeMemStream : public StackObj { 1473 private: 1474 MergeMemNode* _mm; 1475 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1476 Node* _mm_base; // loop-invariant base memory of _mm 1477 int _idx; 1478 int _cnt; 1479 Node* _mem; 1480 Node* _mem2; 1481 int _cnt2; 1482 init(MergeMemNode * mm,const MergeMemNode * mm2=NULL)1483 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1484 // subsume_node will break sparseness at times, whenever a memory slice 1485 // folds down to a copy of the base ("fat") memory. In such a case, 1486 // the raw edge will update to base, although it should be top. 1487 // This iterator will recognize either top or base_memory as an 1488 // "empty" slice. See is_empty, is_empty2, and next below. 1489 // 1490 // The sparseness property is repaired in MergeMemNode::Ideal. 1491 // As long as access to a MergeMem goes through this iterator 1492 // or the memory_at accessor, flaws in the sparseness will 1493 // never be observed. 1494 // 1495 // Also, iteration_setup repairs sparseness. 1496 assert(mm->verify_sparse(), "please, no dups of base"); 1497 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1498 1499 _mm = mm; 1500 _mm_base = mm->base_memory(); 1501 _mm2 = mm2; 1502 _cnt = mm->req(); 1503 _idx = Compile::AliasIdxBot-1; // start at the base memory 1504 _mem = NULL; 1505 _mem2 = NULL; 1506 } 1507 1508 #ifdef ASSERT check_memory() const1509 Node* check_memory() const { 1510 if (at_base_memory()) 1511 return _mm->base_memory(); 1512 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1513 return _mm->memory_at(_idx); 1514 else 1515 return _mm_base; 1516 } check_memory2() const1517 Node* check_memory2() const { 1518 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1519 } 1520 #endif 1521 1522 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; assert_synch() const1523 void assert_synch() const { 1524 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1525 "no side-effects except through the stream"); 1526 } 1527 1528 public: 1529 1530 // expected usages: 1531 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1532 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1533 1534 // iterate over one merge MergeMemStream(MergeMemNode * mm)1535 MergeMemStream(MergeMemNode* mm) { 1536 mm->iteration_setup(); 1537 init(mm); 1538 debug_only(_cnt2 = 999); 1539 } 1540 // iterate in parallel over two merges 1541 // only iterates through non-empty elements of mm2 MergeMemStream(MergeMemNode * mm,const MergeMemNode * mm2)1542 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1543 assert(mm2, "second argument must be a MergeMem also"); 1544 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1545 mm->iteration_setup(mm2); 1546 init(mm, mm2); 1547 _cnt2 = mm2->req(); 1548 } 1549 #ifdef ASSERT ~MergeMemStream()1550 ~MergeMemStream() { 1551 assert_synch(); 1552 } 1553 #endif 1554 all_memory() const1555 MergeMemNode* all_memory() const { 1556 return _mm; 1557 } base_memory() const1558 Node* base_memory() const { 1559 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1560 return _mm_base; 1561 } all_memory2() const1562 const MergeMemNode* all_memory2() const { 1563 assert(_mm2 != NULL, ""); 1564 return _mm2; 1565 } at_base_memory() const1566 bool at_base_memory() const { 1567 return _idx == Compile::AliasIdxBot; 1568 } alias_idx() const1569 int alias_idx() const { 1570 assert(_mem, "must call next 1st"); 1571 return _idx; 1572 } 1573 adr_type() const1574 const TypePtr* adr_type() const { 1575 return Compile::current()->get_adr_type(alias_idx()); 1576 } 1577 adr_type(Compile * C) const1578 const TypePtr* adr_type(Compile* C) const { 1579 return C->get_adr_type(alias_idx()); 1580 } is_empty() const1581 bool is_empty() const { 1582 assert(_mem, "must call next 1st"); 1583 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1584 return _mem->is_top(); 1585 } is_empty2() const1586 bool is_empty2() const { 1587 assert(_mem2, "must call next 1st"); 1588 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1589 return _mem2->is_top(); 1590 } memory() const1591 Node* memory() const { 1592 assert(!is_empty(), "must not be empty"); 1593 assert_synch(); 1594 return _mem; 1595 } 1596 // get the current memory, regardless of empty or non-empty status force_memory() const1597 Node* force_memory() const { 1598 assert(!is_empty() || !at_base_memory(), ""); 1599 // Use _mm_base to defend against updates to _mem->base_memory(). 1600 Node *mem = _mem->is_top() ? _mm_base : _mem; 1601 assert(mem == check_memory(), ""); 1602 return mem; 1603 } memory2() const1604 Node* memory2() const { 1605 assert(_mem2 == check_memory2(), ""); 1606 return _mem2; 1607 } set_memory(Node * mem)1608 void set_memory(Node* mem) { 1609 if (at_base_memory()) { 1610 // Note that this does not change the invariant _mm_base. 1611 _mm->set_base_memory(mem); 1612 } else { 1613 _mm->set_memory_at(_idx, mem); 1614 } 1615 _mem = mem; 1616 assert_synch(); 1617 } 1618 1619 // Recover from a side effect to the MergeMemNode. set_memory()1620 void set_memory() { 1621 _mem = _mm->in(_idx); 1622 } 1623 next()1624 bool next() { return next(false); } next2()1625 bool next2() { return next(true); } 1626 next_non_empty()1627 bool next_non_empty() { return next_non_empty(false); } next_non_empty2()1628 bool next_non_empty2() { return next_non_empty(true); } 1629 // next_non_empty2 can yield states where is_empty() is true 1630 1631 private: 1632 // find the next item, which might be empty next(bool have_mm2)1633 bool next(bool have_mm2) { 1634 assert((_mm2 != NULL) == have_mm2, "use other next"); 1635 assert_synch(); 1636 if (++_idx < _cnt) { 1637 // Note: This iterator allows _mm to be non-sparse. 1638 // It behaves the same whether _mem is top or base_memory. 1639 _mem = _mm->in(_idx); 1640 if (have_mm2) 1641 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1642 return true; 1643 } 1644 return false; 1645 } 1646 1647 // find the next non-empty item next_non_empty(bool have_mm2)1648 bool next_non_empty(bool have_mm2) { 1649 while (next(have_mm2)) { 1650 if (!is_empty()) { 1651 // make sure _mem2 is filled in sensibly 1652 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1653 return true; 1654 } else if (have_mm2 && !is_empty2()) { 1655 return true; // is_empty() == true 1656 } 1657 } 1658 return false; 1659 } 1660 }; 1661 1662 // cachewb node for guaranteeing writeback of the cache line at a 1663 // given address to (non-volatile) RAM 1664 class CacheWBNode : public Node { 1665 public: CacheWBNode(Node * ctrl,Node * mem,Node * addr)1666 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1667 virtual int Opcode() const; ideal_reg() const1668 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1669 virtual uint match_edge(uint idx) const { return (idx == 2); } adr_type() const1670 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } bottom_type() const1671 virtual const Type *bottom_type() const { return Type::MEMORY; } 1672 }; 1673 1674 // cachewb pre sync node for ensuring that writebacks are serialised 1675 // relative to preceding or following stores 1676 class CacheWBPreSyncNode : public Node { 1677 public: CacheWBPreSyncNode(Node * ctrl,Node * mem)1678 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1679 virtual int Opcode() const; ideal_reg() const1680 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1681 virtual uint match_edge(uint idx) const { return false; } adr_type() const1682 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } bottom_type() const1683 virtual const Type *bottom_type() const { return Type::MEMORY; } 1684 }; 1685 1686 // cachewb pre sync node for ensuring that writebacks are serialised 1687 // relative to preceding or following stores 1688 class CacheWBPostSyncNode : public Node { 1689 public: CacheWBPostSyncNode(Node * ctrl,Node * mem)1690 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1691 virtual int Opcode() const; ideal_reg() const1692 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1693 virtual uint match_edge(uint idx) const { return false; } adr_type() const1694 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } bottom_type() const1695 virtual const Type *bottom_type() const { return Type::MEMORY; } 1696 }; 1697 1698 //------------------------------Prefetch--------------------------------------- 1699 1700 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1701 class PrefetchAllocationNode : public Node { 1702 public: PrefetchAllocationNode(Node * mem,Node * adr)1703 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1704 virtual int Opcode() const; ideal_reg() const1705 virtual uint ideal_reg() const { return NotAMachineReg; } match_edge(uint idx) const1706 virtual uint match_edge(uint idx) const { return idx==2; } bottom_type() const1707 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1708 }; 1709 1710 #endif // SHARE_OPTO_MEMNODE_HPP 1711