1 //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the declaration of the MachineMemOperand class, which is a 10 // description of a memory reference. It is used to help track dependencies 11 // in the backend. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H 16 #define LLVM_CODEGEN_MACHINEMEMOPERAND_H 17 18 #include "llvm/ADT/BitmaskEnum.h" 19 #include "llvm/ADT/PointerUnion.h" 20 #include "llvm/CodeGen/LowLevelType.h" 21 #include "llvm/CodeGen/PseudoSourceValue.h" 22 #include "llvm/IR/DerivedTypes.h" 23 #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*> 24 #include "llvm/Support/AtomicOrdering.h" 25 #include "llvm/Support/DataTypes.h" 26 27 namespace llvm { 28 29 class MDNode; 30 class raw_ostream; 31 class MachineFunction; 32 class ModuleSlotTracker; 33 class TargetInstrInfo; 34 35 /// This class contains a discriminated union of information about pointers in 36 /// memory operands, relating them back to LLVM IR or to virtual locations (such 37 /// as frame indices) that are exposed during codegen. 38 struct MachinePointerInfo { 39 /// This is the IR pointer value for the access, or it is null if unknown. 40 PointerUnion<const Value *, const PseudoSourceValue *> V; 41 42 /// Offset - This is an offset from the base Value*. 43 int64_t Offset; 44 45 unsigned AddrSpace = 0; 46 47 uint8_t StackID; 48 49 explicit MachinePointerInfo(const Value *v, int64_t offset = 0, 50 uint8_t ID = 0) 51 : V(v), Offset(offset), StackID(ID) { 52 AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0; 53 } 54 55 explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0, 56 uint8_t ID = 0) 57 : V(v), Offset(offset), StackID(ID) { 58 AddrSpace = v ? v->getAddressSpace() : 0; 59 } 60 61 explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0) 62 : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace), 63 StackID(0) {} 64 65 explicit MachinePointerInfo( 66 PointerUnion<const Value *, const PseudoSourceValue *> v, 67 int64_t offset = 0, 68 uint8_t ID = 0) 69 : V(v), Offset(offset), StackID(ID) { 70 if (V) { 71 if (const auto *ValPtr = dyn_cast_if_present<const Value *>(V)) 72 AddrSpace = ValPtr->getType()->getPointerAddressSpace(); 73 else 74 AddrSpace = cast<const PseudoSourceValue *>(V)->getAddressSpace(); 75 } 76 } 77 78 MachinePointerInfo getWithOffset(int64_t O) const { 79 if (V.isNull()) 80 return MachinePointerInfo(AddrSpace, Offset + O); 81 if (isa<const Value *>(V)) 82 return MachinePointerInfo(cast<const Value *>(V), Offset + O, StackID); 83 return MachinePointerInfo(cast<const PseudoSourceValue *>(V), Offset + O, 84 StackID); 85 } 86 87 /// Return true if memory region [V, V+Offset+Size) is known to be 88 /// dereferenceable. 89 bool isDereferenceable(unsigned Size, LLVMContext &C, 90 const DataLayout &DL) const; 91 92 /// Return the LLVM IR address space number that this pointer points into. 93 unsigned getAddrSpace() const; 94 95 /// Return a MachinePointerInfo record that refers to the constant pool. 96 static MachinePointerInfo getConstantPool(MachineFunction &MF); 97 98 /// Return a MachinePointerInfo record that refers to the specified 99 /// FrameIndex. 100 static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, 101 int64_t Offset = 0); 102 103 /// Return a MachinePointerInfo record that refers to a jump table entry. 104 static MachinePointerInfo getJumpTable(MachineFunction &MF); 105 106 /// Return a MachinePointerInfo record that refers to a GOT entry. 107 static MachinePointerInfo getGOT(MachineFunction &MF); 108 109 /// Stack pointer relative access. 110 static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, 111 uint8_t ID = 0); 112 113 /// Stack memory without other information. 114 static MachinePointerInfo getUnknownStack(MachineFunction &MF); 115 }; 116 117 118 //===----------------------------------------------------------------------===// 119 /// A description of a memory reference used in the backend. 120 /// Instead of holding a StoreInst or LoadInst, this class holds the address 121 /// Value of the reference along with a byte size and offset. This allows it 122 /// to describe lowered loads and stores. Also, the special PseudoSourceValue 123 /// objects can be used to represent loads and stores to memory locations 124 /// that aren't explicit in the regular LLVM IR. 125 /// 126 class MachineMemOperand { 127 public: 128 /// Flags values. These may be or'd together. 129 enum Flags : uint16_t { 130 // No flags set. 131 MONone = 0, 132 /// The memory access reads data. 133 MOLoad = 1u << 0, 134 /// The memory access writes data. 135 MOStore = 1u << 1, 136 /// The memory access is volatile. 137 MOVolatile = 1u << 2, 138 /// The memory access is non-temporal. 139 MONonTemporal = 1u << 3, 140 /// The memory access is dereferenceable (i.e., doesn't trap). 141 MODereferenceable = 1u << 4, 142 /// The memory access always returns the same value (or traps). 143 MOInvariant = 1u << 5, 144 145 // Reserved for use by target-specific passes. 146 // Targets may override getSerializableMachineMemOperandTargetFlags() to 147 // enable MIR serialization/parsing of these flags. If more of these flags 148 // are added, the MIR printing/parsing code will need to be updated as well. 149 MOTargetFlag1 = 1u << 6, 150 MOTargetFlag2 = 1u << 7, 151 MOTargetFlag3 = 1u << 8, 152 153 LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3) 154 }; 155 156 private: 157 /// Atomic information for this memory operation. 158 struct MachineAtomicInfo { 159 /// Synchronization scope ID for this memory operation. 160 unsigned SSID : 8; // SyncScope::ID 161 /// Atomic ordering requirements for this memory operation. For cmpxchg 162 /// atomic operations, atomic ordering requirements when store occurs. 163 unsigned Ordering : 4; // enum AtomicOrdering 164 /// For cmpxchg atomic operations, atomic ordering requirements when store 165 /// does not occur. 166 unsigned FailureOrdering : 4; // enum AtomicOrdering 167 }; 168 169 MachinePointerInfo PtrInfo; 170 171 /// Track the memory type of the access. An access size which is unknown or 172 /// too large to be represented by LLT should use the invalid LLT. 173 LLT MemoryType; 174 175 Flags FlagVals; 176 Align BaseAlign; 177 MachineAtomicInfo AtomicInfo; 178 AAMDNodes AAInfo; 179 const MDNode *Ranges; 180 181 public: 182 /// Construct a MachineMemOperand object with the specified PtrInfo, flags, 183 /// size, and base alignment. For atomic operations the synchronization scope 184 /// and atomic ordering requirements must also be specified. For cmpxchg 185 /// atomic operations the atomic ordering requirements when store does not 186 /// occur must also be specified. 187 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s, 188 Align a, const AAMDNodes &AAInfo = AAMDNodes(), 189 const MDNode *Ranges = nullptr, 190 SyncScope::ID SSID = SyncScope::System, 191 AtomicOrdering Ordering = AtomicOrdering::NotAtomic, 192 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); 193 MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a, 194 const AAMDNodes &AAInfo = AAMDNodes(), 195 const MDNode *Ranges = nullptr, 196 SyncScope::ID SSID = SyncScope::System, 197 AtomicOrdering Ordering = AtomicOrdering::NotAtomic, 198 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); 199 200 const MachinePointerInfo &getPointerInfo() const { return PtrInfo; } 201 202 /// Return the base address of the memory access. This may either be a normal 203 /// LLVM IR Value, or one of the special values used in CodeGen. 204 /// Special values are those obtained via 205 /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and 206 /// other PseudoSourceValue member functions which return objects which stand 207 /// for frame/stack pointer relative references and other special references 208 /// which are not representable in the high-level IR. 209 const Value *getValue() const { 210 return dyn_cast_if_present<const Value *>(PtrInfo.V); 211 } 212 213 const PseudoSourceValue *getPseudoValue() const { 214 return dyn_cast_if_present<const PseudoSourceValue *>(PtrInfo.V); 215 } 216 217 const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); } 218 219 /// Return the raw flags of the source value, \see Flags. 220 Flags getFlags() const { return FlagVals; } 221 222 /// Bitwise OR the current flags with the given flags. 223 void setFlags(Flags f) { FlagVals |= f; } 224 225 /// For normal values, this is a byte offset added to the base address. 226 /// For PseudoSourceValue::FPRel values, this is the FrameIndex number. 227 int64_t getOffset() const { return PtrInfo.Offset; } 228 229 unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); } 230 231 /// Return the memory type of the memory reference. This should only be relied 232 /// on for GlobalISel G_* operation legalization. 233 LLT getMemoryType() const { return MemoryType; } 234 235 /// Return the size in bytes of the memory reference. 236 uint64_t getSize() const { 237 return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0); 238 } 239 240 /// Return the size in bits of the memory reference. 241 uint64_t getSizeInBits() const { 242 return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0); 243 } 244 245 LLT getType() const { 246 return MemoryType; 247 } 248 249 /// Return the minimum known alignment in bytes of the actual memory 250 /// reference. 251 Align getAlign() const; 252 253 /// Return the minimum known alignment in bytes of the base address, without 254 /// the offset. 255 Align getBaseAlign() const { return BaseAlign; } 256 257 /// Return the AA tags for the memory reference. 258 AAMDNodes getAAInfo() const { return AAInfo; } 259 260 /// Return the range tag for the memory reference. 261 const MDNode *getRanges() const { return Ranges; } 262 263 /// Returns the synchronization scope ID for this memory operation. 264 SyncScope::ID getSyncScopeID() const { 265 return static_cast<SyncScope::ID>(AtomicInfo.SSID); 266 } 267 268 /// Return the atomic ordering requirements for this memory operation. For 269 /// cmpxchg atomic operations, return the atomic ordering requirements when 270 /// store occurs. 271 AtomicOrdering getSuccessOrdering() const { 272 return static_cast<AtomicOrdering>(AtomicInfo.Ordering); 273 } 274 275 /// For cmpxchg atomic operations, return the atomic ordering requirements 276 /// when store does not occur. 277 AtomicOrdering getFailureOrdering() const { 278 return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering); 279 } 280 281 /// Return a single atomic ordering that is at least as strong as both the 282 /// success and failure orderings for an atomic operation. (For operations 283 /// other than cmpxchg, this is equivalent to getSuccessOrdering().) 284 AtomicOrdering getMergedOrdering() const { 285 return getMergedAtomicOrdering(getSuccessOrdering(), getFailureOrdering()); 286 } 287 288 bool isLoad() const { return FlagVals & MOLoad; } 289 bool isStore() const { return FlagVals & MOStore; } 290 bool isVolatile() const { return FlagVals & MOVolatile; } 291 bool isNonTemporal() const { return FlagVals & MONonTemporal; } 292 bool isDereferenceable() const { return FlagVals & MODereferenceable; } 293 bool isInvariant() const { return FlagVals & MOInvariant; } 294 295 /// Returns true if this operation has an atomic ordering requirement of 296 /// unordered or higher, false otherwise. 297 bool isAtomic() const { 298 return getSuccessOrdering() != AtomicOrdering::NotAtomic; 299 } 300 301 /// Returns true if this memory operation doesn't have any ordering 302 /// constraints other than normal aliasing. Volatile and (ordered) atomic 303 /// memory operations can't be reordered. 304 bool isUnordered() const { 305 return (getSuccessOrdering() == AtomicOrdering::NotAtomic || 306 getSuccessOrdering() == AtomicOrdering::Unordered) && 307 !isVolatile(); 308 } 309 310 /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a 311 /// greater alignment. This must only be used when the new alignment applies 312 /// to all users of this MachineMemOperand. 313 void refineAlignment(const MachineMemOperand *MMO); 314 315 /// Change the SourceValue for this MachineMemOperand. This should only be 316 /// used when an object is being relocated and all references to it are being 317 /// updated. 318 void setValue(const Value *NewSV) { PtrInfo.V = NewSV; } 319 void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; } 320 void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; } 321 322 /// Reset the tracked memory type. 323 void setType(LLT NewTy) { 324 MemoryType = NewTy; 325 } 326 327 /// Support for operator<<. 328 /// @{ 329 void print(raw_ostream &OS, ModuleSlotTracker &MST, 330 SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context, 331 const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const; 332 /// @} 333 334 friend bool operator==(const MachineMemOperand &LHS, 335 const MachineMemOperand &RHS) { 336 return LHS.getValue() == RHS.getValue() && 337 LHS.getPseudoValue() == RHS.getPseudoValue() && 338 LHS.getSize() == RHS.getSize() && 339 LHS.getOffset() == RHS.getOffset() && 340 LHS.getFlags() == RHS.getFlags() && 341 LHS.getAAInfo() == RHS.getAAInfo() && 342 LHS.getRanges() == RHS.getRanges() && 343 LHS.getAlign() == RHS.getAlign() && 344 LHS.getAddrSpace() == RHS.getAddrSpace(); 345 } 346 347 friend bool operator!=(const MachineMemOperand &LHS, 348 const MachineMemOperand &RHS) { 349 return !(LHS == RHS); 350 } 351 }; 352 353 } // End llvm namespace 354 355 #endif 356