1 //==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the declaration of the MachineMemOperand class, which is a
10 // description of a memory reference. It is used to help track dependencies
11 // in the backend.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
16 #define LLVM_CODEGEN_MACHINEMEMOPERAND_H
17 
18 #include "llvm/ADT/BitmaskEnum.h"
19 #include "llvm/ADT/PointerUnion.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
23 #include "llvm/Support/AtomicOrdering.h"
24 #include "llvm/Support/DataTypes.h"
25 
26 namespace llvm {
27 
28 class FoldingSetNodeID;
29 class MDNode;
30 class raw_ostream;
31 class MachineFunction;
32 class ModuleSlotTracker;
33 
34 /// This class contains a discriminated union of information about pointers in
35 /// memory operands, relating them back to LLVM IR or to virtual locations (such
36 /// as frame indices) that are exposed during codegen.
37 struct MachinePointerInfo {
38   /// This is the IR pointer value for the access, or it is null if unknown.
39   /// If this is null, then the access is to a pointer in the default address
40   /// space.
41   PointerUnion<const Value *, const PseudoSourceValue *> V;
42 
43   /// Offset - This is an offset from the base Value*.
44   int64_t Offset;
45 
46   uint8_t StackID;
47 
48   unsigned AddrSpace = 0;
49 
50   explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
51                               uint8_t ID = 0)
52       : V(v), Offset(offset), StackID(ID) {
53     AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
54   }
55 
56   explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
57                               uint8_t ID = 0)
58       : V(v), Offset(offset), StackID(ID) {
59     AddrSpace = v ? v->getAddressSpace() : 0;
60   }
61 
62   explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
63       : V((const Value *)nullptr), Offset(offset), StackID(0),
64         AddrSpace(AddressSpace) {}
65 
66   explicit MachinePointerInfo(
67     PointerUnion<const Value *, const PseudoSourceValue *> v,
68     int64_t offset = 0,
69     uint8_t ID = 0)
70     : V(v), Offset(offset), StackID(ID) {
71     if (V) {
72       if (const auto *ValPtr = V.dyn_cast<const Value*>())
73         AddrSpace = ValPtr->getType()->getPointerAddressSpace();
74       else
75         AddrSpace = V.get<const PseudoSourceValue*>()->getAddressSpace();
76     }
77   }
78 
79   MachinePointerInfo getWithOffset(int64_t O) const {
80     if (V.isNull())
81       return MachinePointerInfo(AddrSpace, Offset + O);
82     if (V.is<const Value*>())
83       return MachinePointerInfo(V.get<const Value*>(), Offset + O, StackID);
84     return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset + O,
85                               StackID);
86   }
87 
88   /// Return true if memory region [V, V+Offset+Size) is known to be
89   /// dereferenceable.
90   bool isDereferenceable(unsigned Size, LLVMContext &C,
91                          const DataLayout &DL) const;
92 
93   /// Return the LLVM IR address space number that this pointer points into.
94   unsigned getAddrSpace() const;
95 
96   /// Return a MachinePointerInfo record that refers to the constant pool.
97   static MachinePointerInfo getConstantPool(MachineFunction &MF);
98 
99   /// Return a MachinePointerInfo record that refers to the specified
100   /// FrameIndex.
101   static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
102                                           int64_t Offset = 0);
103 
104   /// Return a MachinePointerInfo record that refers to a jump table entry.
105   static MachinePointerInfo getJumpTable(MachineFunction &MF);
106 
107   /// Return a MachinePointerInfo record that refers to a GOT entry.
108   static MachinePointerInfo getGOT(MachineFunction &MF);
109 
110   /// Stack pointer relative access.
111   static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
112                                      uint8_t ID = 0);
113 
114   /// Stack memory without other information.
115   static MachinePointerInfo getUnknownStack(MachineFunction &MF);
116 };
117 
118 
119 //===----------------------------------------------------------------------===//
120 /// A description of a memory reference used in the backend.
121 /// Instead of holding a StoreInst or LoadInst, this class holds the address
122 /// Value of the reference along with a byte size and offset. This allows it
123 /// to describe lowered loads and stores. Also, the special PseudoSourceValue
124 /// objects can be used to represent loads and stores to memory locations
125 /// that aren't explicit in the regular LLVM IR.
126 ///
127 class MachineMemOperand {
128 public:
129   /// Flags values. These may be or'd together.
130   enum Flags : uint16_t {
131     // No flags set.
132     MONone = 0,
133     /// The memory access reads data.
134     MOLoad = 1u << 0,
135     /// The memory access writes data.
136     MOStore = 1u << 1,
137     /// The memory access is volatile.
138     MOVolatile = 1u << 2,
139     /// The memory access is non-temporal.
140     MONonTemporal = 1u << 3,
141     /// The memory access is dereferenceable (i.e., doesn't trap).
142     MODereferenceable = 1u << 4,
143     /// The memory access always returns the same value (or traps).
144     MOInvariant = 1u << 5,
145 
146     // Reserved for use by target-specific passes.
147     // Targets may override getSerializableMachineMemOperandTargetFlags() to
148     // enable MIR serialization/parsing of these flags.  If more of these flags
149     // are added, the MIR printing/parsing code will need to be updated as well.
150     MOTargetFlag1 = 1u << 6,
151     MOTargetFlag2 = 1u << 7,
152     MOTargetFlag3 = 1u << 8,
153 
154     LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
155   };
156 
157 private:
158   /// Atomic information for this memory operation.
159   struct MachineAtomicInfo {
160     /// Synchronization scope ID for this memory operation.
161     unsigned SSID : 8;            // SyncScope::ID
162     /// Atomic ordering requirements for this memory operation. For cmpxchg
163     /// atomic operations, atomic ordering requirements when store occurs.
164     unsigned Ordering : 4;        // enum AtomicOrdering
165     /// For cmpxchg atomic operations, atomic ordering requirements when store
166     /// does not occur.
167     unsigned FailureOrdering : 4; // enum AtomicOrdering
168   };
169 
170   MachinePointerInfo PtrInfo;
171   uint64_t Size;
172   Flags FlagVals;
173   Align BaseAlign;
174   MachineAtomicInfo AtomicInfo;
175   AAMDNodes AAInfo;
176   const MDNode *Ranges;
177 
178 public:
179   /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
180   /// size, and base alignment. For atomic operations the synchronization scope
181   /// and atomic ordering requirements must also be specified. For cmpxchg
182   /// atomic operations the atomic ordering requirements when store does not
183   /// occur must also be specified.
184   MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
185                     Align a, const AAMDNodes &AAInfo = AAMDNodes(),
186                     const MDNode *Ranges = nullptr,
187                     SyncScope::ID SSID = SyncScope::System,
188                     AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
189                     AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
190 
191   const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
192 
193   /// Return the base address of the memory access. This may either be a normal
194   /// LLVM IR Value, or one of the special values used in CodeGen.
195   /// Special values are those obtained via
196   /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
197   /// other PseudoSourceValue member functions which return objects which stand
198   /// for frame/stack pointer relative references and other special references
199   /// which are not representable in the high-level IR.
200   const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
201 
202   const PseudoSourceValue *getPseudoValue() const {
203     return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
204   }
205 
206   const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
207 
208   /// Return the raw flags of the source value, \see Flags.
209   Flags getFlags() const { return FlagVals; }
210 
211   /// Bitwise OR the current flags with the given flags.
212   void setFlags(Flags f) { FlagVals |= f; }
213 
214   /// For normal values, this is a byte offset added to the base address.
215   /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
216   int64_t getOffset() const { return PtrInfo.Offset; }
217 
218   unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
219 
220   /// Return the size in bytes of the memory reference.
221   uint64_t getSize() const { return Size; }
222 
223   /// Return the size in bits of the memory reference.
224   uint64_t getSizeInBits() const { return Size * 8; }
225 
226   LLVM_ATTRIBUTE_DEPRECATED(uint64_t getAlignment() const,
227                             "Use getAlign instead");
228 
229   /// Return the minimum known alignment in bytes of the actual memory
230   /// reference.
231   Align getAlign() const;
232 
233   LLVM_ATTRIBUTE_DEPRECATED(uint64_t getBaseAlignment() const,
234                             "Use getBaseAlign instead") {
235     return BaseAlign.value();
236   }
237 
238   /// Return the minimum known alignment in bytes of the base address, without
239   /// the offset.
240   Align getBaseAlign() const { return BaseAlign; }
241 
242   /// Return the AA tags for the memory reference.
243   AAMDNodes getAAInfo() const { return AAInfo; }
244 
245   /// Return the range tag for the memory reference.
246   const MDNode *getRanges() const { return Ranges; }
247 
248   /// Returns the synchronization scope ID for this memory operation.
249   SyncScope::ID getSyncScopeID() const {
250     return static_cast<SyncScope::ID>(AtomicInfo.SSID);
251   }
252 
253   /// Return the atomic ordering requirements for this memory operation. For
254   /// cmpxchg atomic operations, return the atomic ordering requirements when
255   /// store occurs.
256   AtomicOrdering getOrdering() const {
257     return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
258   }
259 
260   /// For cmpxchg atomic operations, return the atomic ordering requirements
261   /// when store does not occur.
262   AtomicOrdering getFailureOrdering() const {
263     return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
264   }
265 
266   bool isLoad() const { return FlagVals & MOLoad; }
267   bool isStore() const { return FlagVals & MOStore; }
268   bool isVolatile() const { return FlagVals & MOVolatile; }
269   bool isNonTemporal() const { return FlagVals & MONonTemporal; }
270   bool isDereferenceable() const { return FlagVals & MODereferenceable; }
271   bool isInvariant() const { return FlagVals & MOInvariant; }
272 
273   /// Returns true if this operation has an atomic ordering requirement of
274   /// unordered or higher, false otherwise.
275   bool isAtomic() const { return getOrdering() != AtomicOrdering::NotAtomic; }
276 
277   /// Returns true if this memory operation doesn't have any ordering
278   /// constraints other than normal aliasing. Volatile and (ordered) atomic
279   /// memory operations can't be reordered.
280   bool isUnordered() const {
281     return (getOrdering() == AtomicOrdering::NotAtomic ||
282             getOrdering() == AtomicOrdering::Unordered) &&
283            !isVolatile();
284   }
285 
286   /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
287   /// greater alignment. This must only be used when the new alignment applies
288   /// to all users of this MachineMemOperand.
289   void refineAlignment(const MachineMemOperand *MMO);
290 
291   /// Change the SourceValue for this MachineMemOperand. This should only be
292   /// used when an object is being relocated and all references to it are being
293   /// updated.
294   void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
295   void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
296   void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
297 
298   /// Profile - Gather unique data for the object.
299   ///
300   void Profile(FoldingSetNodeID &ID) const;
301 
302   /// Support for operator<<.
303   /// @{
304   void print(raw_ostream &OS, ModuleSlotTracker &MST,
305              SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
306              const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
307   /// @}
308 
309   friend bool operator==(const MachineMemOperand &LHS,
310                          const MachineMemOperand &RHS) {
311     return LHS.getValue() == RHS.getValue() &&
312            LHS.getPseudoValue() == RHS.getPseudoValue() &&
313            LHS.getSize() == RHS.getSize() &&
314            LHS.getOffset() == RHS.getOffset() &&
315            LHS.getFlags() == RHS.getFlags() &&
316            LHS.getAAInfo() == RHS.getAAInfo() &&
317            LHS.getRanges() == RHS.getRanges() &&
318            LHS.getAlign() == RHS.getAlign() &&
319            LHS.getAddrSpace() == RHS.getAddrSpace();
320   }
321 
322   friend bool operator!=(const MachineMemOperand &LHS,
323                          const MachineMemOperand &RHS) {
324     return !(LHS == RHS);
325   }
326 };
327 
328 } // End llvm namespace
329 
330 #endif
331