1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/GlobalAlias.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Statepoint.h"
27 
28 using namespace llvm;
29 
30 static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
31                       const DataLayout &DL) {
32   Align BA = Base->getPointerAlignment(DL);
33   const APInt APAlign(Offset.getBitWidth(), Alignment.value());
34   assert(APAlign.isPowerOf2() && "must be a power of 2!");
35   return BA >= Alignment && !(Offset & (APAlign - 1));
36 }
37 
38 /// Test if V is always a pointer to allocated and suitably aligned memory for
39 /// a simple load or store.
40 static bool isDereferenceableAndAlignedPointer(
41     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
42     const Instruction *CtxI, const DominatorTree *DT,
43     SmallPtrSetImpl<const Value *> &Visited, unsigned MaxDepth) {
44   assert(V->getType()->isPointerTy() && "Base must be pointer");
45 
46   // Recursion limit.
47   if (MaxDepth-- == 0)
48     return false;
49 
50   // Already visited?  Bail out, we've likely hit unreachable code.
51   if (!Visited.insert(V).second)
52     return false;
53 
54   // Note that it is not safe to speculate into a malloc'd region because
55   // malloc may return null.
56 
57   // bitcast instructions are no-ops as far as dereferenceability is concerned.
58   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
59     if (BC->getSrcTy()->isPointerTy())
60       return isDereferenceableAndAlignedPointer(
61           BC->getOperand(0), Alignment, Size, DL, CtxI, DT, Visited, MaxDepth);
62   }
63 
64   bool CheckForNonNull = false;
65   APInt KnownDerefBytes(Size.getBitWidth(),
66                         V->getPointerDereferenceableBytes(DL, CheckForNonNull));
67   if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size))
68     if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
69       // As we recursed through GEPs to get here, we've incrementally checked
70       // that each step advanced by a multiple of the alignment. If our base is
71       // properly aligned, then the original offset accessed must also be.
72       Type *Ty = V->getType();
73       assert(Ty->isSized() && "must be sized");
74       APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
75       return isAligned(V, Offset, Alignment, DL);
76     }
77 
78   // For GEPs, determine if the indexing lands within the allocated object.
79   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
80     const Value *Base = GEP->getPointerOperand();
81 
82     APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
83     if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
84         !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
85              .isMinValue())
86       return false;
87 
88     // If the base pointer is dereferenceable for Offset+Size bytes, then the
89     // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
90     // pointer is aligned to Align bytes, and the Offset is divisible by Align
91     // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
92     // aligned to Align bytes.
93 
94     // Offset and Size may have different bit widths if we have visited an
95     // addrspacecast, so we can't do arithmetic directly on the APInt values.
96     return isDereferenceableAndAlignedPointer(
97         Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
98         CtxI, DT, Visited, MaxDepth);
99   }
100 
101   // For gc.relocate, look through relocations
102   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
103     return isDereferenceableAndAlignedPointer(
104       RelocateInst->getDerivedPtr(), Alignment, Size, DL, CtxI, DT, Visited, MaxDepth);
105 
106   if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
107     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
108                                               Size, DL, CtxI, DT, Visited, MaxDepth);
109 
110   if (const auto *Call = dyn_cast<CallBase>(V))
111     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
112       return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
113                                                 DT, Visited, MaxDepth);
114 
115   // If we don't know, assume the worst.
116   return false;
117 }
118 
119 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
120                                               const APInt &Size,
121                                               const DataLayout &DL,
122                                               const Instruction *CtxI,
123                                               const DominatorTree *DT) {
124   // Note: At the moment, Size can be zero.  This ends up being interpreted as
125   // a query of whether [Base, V] is dereferenceable and V is aligned (since
126   // that's what the implementation happened to do).  It's unclear if this is
127   // the desired semantic, but at least SelectionDAG does exercise this case.
128 
129   SmallPtrSet<const Value *, 32> Visited;
130   return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT,
131                                               Visited, 16);
132 }
133 
134 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
135                                               MaybeAlign MA,
136                                               const DataLayout &DL,
137                                               const Instruction *CtxI,
138                                               const DominatorTree *DT) {
139   // For unsized types or scalable vectors we don't know exactly how many bytes
140   // are dereferenced, so bail out.
141   if (!Ty->isSized() || isa<ScalableVectorType>(Ty))
142     return false;
143 
144   // When dereferenceability information is provided by a dereferenceable
145   // attribute, we know exactly how many bytes are dereferenceable. If we can
146   // determine the exact offset to the attributed variable, we can use that
147   // information here.
148 
149   // Require ABI alignment for loads without alignment specification
150   const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty);
151   APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
152                    DL.getTypeStoreSize(Ty));
153   return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
154                                             DT);
155 }
156 
157 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
158                                     const DataLayout &DL,
159                                     const Instruction *CtxI,
160                                     const DominatorTree *DT) {
161   return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, DT);
162 }
163 
164 /// Test if A and B will obviously have the same value.
165 ///
166 /// This includes recognizing that %t0 and %t1 will have the same
167 /// value in code like this:
168 /// \code
169 ///   %t0 = getelementptr \@a, 0, 3
170 ///   store i32 0, i32* %t0
171 ///   %t1 = getelementptr \@a, 0, 3
172 ///   %t2 = load i32* %t1
173 /// \endcode
174 ///
175 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
176   // Test if the values are trivially equivalent.
177   if (A == B)
178     return true;
179 
180   // Test if the values come from identical arithmetic instructions.
181   // Use isIdenticalToWhenDefined instead of isIdenticalTo because
182   // this function is only used when one address use dominates the
183   // other, which means that they'll always either have the same
184   // value or one of them will have an undefined value.
185   if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
186       isa<GetElementPtrInst>(A))
187     if (const Instruction *BI = dyn_cast<Instruction>(B))
188       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
189         return true;
190 
191   // Otherwise they may not be equivalent.
192   return false;
193 }
194 
195 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
196                                              ScalarEvolution &SE,
197                                              DominatorTree &DT) {
198   auto &DL = LI->getModule()->getDataLayout();
199   Value *Ptr = LI->getPointerOperand();
200 
201   APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
202                 DL.getTypeStoreSize(LI->getType()));
203   const Align Alignment = LI->getAlign();
204 
205   Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
206 
207   // If given a uniform (i.e. non-varying) address, see if we can prove the
208   // access is safe within the loop w/o needing predication.
209   if (L->isLoopInvariant(Ptr))
210     return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
211                                               HeaderFirstNonPHI, &DT);
212 
213   // Otherwise, check to see if we have a repeating access pattern where we can
214   // prove that all accesses are well aligned and dereferenceable.
215   auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
216   if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
217     return false;
218   auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
219   if (!Step)
220     return false;
221   // TODO: generalize to access patterns which have gaps
222   if (Step->getAPInt() != EltSize)
223     return false;
224 
225   // TODO: If the symbolic trip count has a small bound (max count), we might
226   // be able to prove safety.
227   auto TC = SE.getSmallConstantTripCount(L);
228   if (!TC)
229     return false;
230 
231   const APInt AccessSize = TC * EltSize;
232 
233   auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
234   if (!StartS)
235     return false;
236   assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
237   Value *Base = StartS->getValue();
238 
239   // For the moment, restrict ourselves to the case where the access size is a
240   // multiple of the requested alignment and the base is aligned.
241   // TODO: generalize if a case found which warrants
242   if (EltSize.urem(Alignment.value()) != 0)
243     return false;
244   return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
245                                             HeaderFirstNonPHI, &DT);
246 }
247 
248 /// Check if executing a load of this pointer value cannot trap.
249 ///
250 /// If DT and ScanFrom are specified this method performs context-sensitive
251 /// analysis and returns true if it is safe to load immediately before ScanFrom.
252 ///
253 /// If it is not obviously safe to load from the specified pointer, we do
254 /// a quick local scan of the basic block containing \c ScanFrom, to determine
255 /// if the address is already accessed.
256 ///
257 /// This uses the pointee type to determine how many bytes need to be safe to
258 /// load from the pointer.
259 bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
260                                        const DataLayout &DL,
261                                        Instruction *ScanFrom,
262                                        const DominatorTree *DT) {
263   // If DT is not specified we can't make context-sensitive query
264   const Instruction* CtxI = DT ? ScanFrom : nullptr;
265   if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT))
266     return true;
267 
268   if (!ScanFrom)
269     return false;
270 
271   if (Size.getBitWidth() > 64)
272     return false;
273   const uint64_t LoadSize = Size.getZExtValue();
274 
275   // Otherwise, be a little bit aggressive by scanning the local block where we
276   // want to check to see if the pointer is already being loaded or stored
277   // from/to.  If so, the previous load or store would have already trapped,
278   // so there is no harm doing an extra load (also, CSE will later eliminate
279   // the load entirely).
280   BasicBlock::iterator BBI = ScanFrom->getIterator(),
281                        E = ScanFrom->getParent()->begin();
282 
283   // We can at least always strip pointer casts even though we can't use the
284   // base here.
285   V = V->stripPointerCasts();
286 
287   while (BBI != E) {
288     --BBI;
289 
290     // If we see a free or a call which may write to memory (i.e. which might do
291     // a free) the pointer could be marked invalid.
292     if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
293         !isa<DbgInfoIntrinsic>(BBI))
294       return false;
295 
296     Value *AccessedPtr;
297     Type *AccessedTy;
298     Align AccessedAlign;
299     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
300       // Ignore volatile loads. The execution of a volatile load cannot
301       // be used to prove an address is backed by regular memory; it can,
302       // for example, point to an MMIO register.
303       if (LI->isVolatile())
304         continue;
305       AccessedPtr = LI->getPointerOperand();
306       AccessedTy = LI->getType();
307       AccessedAlign = LI->getAlign();
308     } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
309       // Ignore volatile stores (see comment for loads).
310       if (SI->isVolatile())
311         continue;
312       AccessedPtr = SI->getPointerOperand();
313       AccessedTy = SI->getValueOperand()->getType();
314       AccessedAlign = SI->getAlign();
315     } else
316       continue;
317 
318     if (AccessedAlign < Alignment)
319       continue;
320 
321     // Handle trivial cases.
322     if (AccessedPtr == V &&
323         LoadSize <= DL.getTypeStoreSize(AccessedTy))
324       return true;
325 
326     if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
327         LoadSize <= DL.getTypeStoreSize(AccessedTy))
328       return true;
329   }
330   return false;
331 }
332 
333 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
334                                        const DataLayout &DL,
335                                        Instruction *ScanFrom,
336                                        const DominatorTree *DT) {
337   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
338   return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT);
339 }
340 
341   /// DefMaxInstsToScan - the default number of maximum instructions
342 /// to scan in the block, used by FindAvailableLoadedValue().
343 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
344 /// threading in part by eliminating partially redundant loads.
345 /// At that point, the value of MaxInstsToScan was already set to '6'
346 /// without documented explanation.
347 cl::opt<unsigned>
348 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
349   cl::desc("Use this to specify the default maximum number of instructions "
350            "to scan backward from a given instruction, when searching for "
351            "available loaded value"));
352 
353 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
354                                       BasicBlock *ScanBB,
355                                       BasicBlock::iterator &ScanFrom,
356                                       unsigned MaxInstsToScan,
357                                       AAResults *AA, bool *IsLoad,
358                                       unsigned *NumScanedInst) {
359   // Don't CSE load that is volatile or anything stronger than unordered.
360   if (!Load->isUnordered())
361     return nullptr;
362 
363   return FindAvailablePtrLoadStore(
364       Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB,
365       ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst);
366 }
367 
368 // Check if the load and the store have the same base, constant offsets and
369 // non-overlapping access ranges.
370 static bool AreNonOverlapSameBaseLoadAndStore(
371     Value *LoadPtr, Type *LoadTy, Value *StorePtr, Type *StoreTy,
372     const DataLayout &DL) {
373   APInt LoadOffset(DL.getTypeSizeInBits(LoadPtr->getType()), 0);
374   APInt StoreOffset(DL.getTypeSizeInBits(StorePtr->getType()), 0);
375   Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
376       DL, LoadOffset, /* AllowNonInbounds */ false);
377   Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
378       DL, StoreOffset, /* AllowNonInbounds */ false);
379   if (LoadBase != StoreBase)
380     return false;
381   auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
382   auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
383   ConstantRange LoadRange(LoadOffset,
384                           LoadOffset + LoadAccessSize.toRaw());
385   ConstantRange StoreRange(StoreOffset,
386                            StoreOffset + StoreAccessSize.toRaw());
387   return LoadRange.intersectWith(StoreRange).isEmptySet();
388 }
389 
390 Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
391                                        bool AtLeastAtomic, BasicBlock *ScanBB,
392                                        BasicBlock::iterator &ScanFrom,
393                                        unsigned MaxInstsToScan,
394                                        AAResults *AA, bool *IsLoadCSE,
395                                        unsigned *NumScanedInst) {
396   if (MaxInstsToScan == 0)
397     MaxInstsToScan = ~0U;
398 
399   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
400   Value *StrippedPtr = Ptr->stripPointerCasts();
401 
402   while (ScanFrom != ScanBB->begin()) {
403     // We must ignore debug info directives when counting (otherwise they
404     // would affect codegen).
405     Instruction *Inst = &*--ScanFrom;
406     if (isa<DbgInfoIntrinsic>(Inst))
407       continue;
408 
409     // Restore ScanFrom to expected value in case next test succeeds
410     ScanFrom++;
411 
412     if (NumScanedInst)
413       ++(*NumScanedInst);
414 
415     // Don't scan huge blocks.
416     if (MaxInstsToScan-- == 0)
417       return nullptr;
418 
419     --ScanFrom;
420     // If this is a load of Ptr, the loaded value is available.
421     // (This is true even if the load is volatile or atomic, although
422     // those cases are unlikely.)
423     if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
424       if (AreEquivalentAddressValues(
425               LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
426           CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
427 
428         // We can value forward from an atomic to a non-atomic, but not the
429         // other way around.
430         if (LI->isAtomic() < AtLeastAtomic)
431           return nullptr;
432 
433         if (IsLoadCSE)
434             *IsLoadCSE = true;
435         return LI;
436       }
437 
438     // Try to get the store size for the type.
439     auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
440 
441     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
442       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
443       // If this is a store through Ptr, the value is available!
444       // (This is true even if the store is volatile or atomic, although
445       // those cases are unlikely.)
446       if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
447           CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
448                                                AccessTy, DL)) {
449 
450         // We can value forward from an atomic to a non-atomic, but not the
451         // other way around.
452         if (SI->isAtomic() < AtLeastAtomic)
453           return nullptr;
454 
455         if (IsLoadCSE)
456           *IsLoadCSE = false;
457         return SI->getOperand(0);
458       }
459 
460       // If both StrippedPtr and StorePtr reach all the way to an alloca or
461       // global and they are different, ignore the store. This is a trivial form
462       // of alias analysis that is important for reg2mem'd code.
463       if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
464           (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
465           StrippedPtr != StorePtr)
466         continue;
467 
468       if (!AA) {
469         // When AA isn't available, but if the load and the store have the same
470         // base, constant offsets and non-overlapping access ranges, ignore the
471         // store. This is a simple form of alias analysis that is used by the
472         // inliner. FIXME: use BasicAA if possible.
473         if (AreNonOverlapSameBaseLoadAndStore(
474                 Ptr, AccessTy, SI->getPointerOperand(),
475                 SI->getValueOperand()->getType(), DL))
476           continue;
477       } else {
478         // If we have alias analysis and it says the store won't modify the
479         // loaded value, ignore the store.
480         if (!isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
481           continue;
482       }
483 
484       // Otherwise the store that may or may not alias the pointer, bail out.
485       ++ScanFrom;
486       return nullptr;
487     }
488 
489     // If this is some other instruction that may clobber Ptr, bail out.
490     if (Inst->mayWriteToMemory()) {
491       // If alias analysis claims that it really won't modify the load,
492       // ignore it.
493       if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
494         continue;
495 
496       // May modify the pointer, bail out.
497       ++ScanFrom;
498       return nullptr;
499     }
500   }
501 
502   // Got to the start of the block, we didn't find it, but are done for this
503   // block.
504   return nullptr;
505 }
506