1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/CaptureTracking.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Analysis/MemoryLocation.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/GlobalAlias.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/IR/Operator.h"
31 
32 using namespace llvm;
33 
isAligned(const Value * Base,const APInt & Offset,Align Alignment,const DataLayout & DL)34 static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
35                       const DataLayout &DL) {
36   Align BA = Base->getPointerAlignment(DL);
37   const APInt APAlign(Offset.getBitWidth(), Alignment.value());
38   assert(APAlign.isPowerOf2() && "must be a power of 2!");
39   return BA >= Alignment && !(Offset & (APAlign - 1));
40 }
41 
42 /// Test if V is always a pointer to allocated and suitably aligned memory for
43 /// a simple load or store.
isDereferenceableAndAlignedPointer(const Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT,const TargetLibraryInfo * TLI,SmallPtrSetImpl<const Value * > & Visited,unsigned MaxDepth)44 static bool isDereferenceableAndAlignedPointer(
45     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
46     const Instruction *CtxI, const DominatorTree *DT,
47     const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
48     unsigned MaxDepth) {
49   assert(V->getType()->isPointerTy() && "Base must be pointer");
50 
51   // Recursion limit.
52   if (MaxDepth-- == 0)
53     return false;
54 
55   // Already visited?  Bail out, we've likely hit unreachable code.
56   if (!Visited.insert(V).second)
57     return false;
58 
59   // Note that it is not safe to speculate into a malloc'd region because
60   // malloc may return null.
61 
62   // Recurse into both hands of select.
63   if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
64     return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
65                                               Size, DL, CtxI, DT, TLI, Visited,
66                                               MaxDepth) &&
67            isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
68                                               Size, DL, CtxI, DT, TLI, Visited,
69                                               MaxDepth);
70   }
71 
72   // bitcast instructions are no-ops as far as dereferenceability is concerned.
73   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
74     if (BC->getSrcTy()->isPointerTy())
75       return isDereferenceableAndAlignedPointer(
76           BC->getOperand(0), Alignment, Size, DL, CtxI, DT, TLI,
77           Visited, MaxDepth);
78   }
79 
80   bool CheckForNonNull, CheckForFreed;
81   APInt KnownDerefBytes(Size.getBitWidth(),
82                         V->getPointerDereferenceableBytes(DL, CheckForNonNull,
83                                                           CheckForFreed));
84   if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
85       !CheckForFreed)
86     if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
87       // As we recursed through GEPs to get here, we've incrementally checked
88       // that each step advanced by a multiple of the alignment. If our base is
89       // properly aligned, then the original offset accessed must also be.
90       Type *Ty = V->getType();
91       assert(Ty->isSized() && "must be sized");
92       APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
93       return isAligned(V, Offset, Alignment, DL);
94     }
95 
96   if (CtxI) {
97     /// Look through assumes to see if both dereferencability and alignment can
98     /// be provent by an assume
99     RetainedKnowledge AlignRK;
100     RetainedKnowledge DerefRK;
101     if (getKnowledgeForValue(
102             V, {Attribute::Dereferenceable, Attribute::Alignment}, nullptr,
103             [&](RetainedKnowledge RK, Instruction *Assume, auto) {
104               if (!isValidAssumeForContext(Assume, CtxI))
105                 return false;
106               if (RK.AttrKind == Attribute::Alignment)
107                 AlignRK = std::max(AlignRK, RK);
108               if (RK.AttrKind == Attribute::Dereferenceable)
109                 DerefRK = std::max(DerefRK, RK);
110               if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() &&
111                   DerefRK.ArgValue >= Size.getZExtValue())
112                 return true; // We have found what we needed so we stop looking
113               return false;  // Other assumes may have better information. so
114                              // keep looking
115             }))
116       return true;
117   }
118   /// TODO refactor this function to be able to search independently for
119   /// Dereferencability and Alignment requirements.
120 
121   // For GEPs, determine if the indexing lands within the allocated object.
122   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
123     const Value *Base = GEP->getPointerOperand();
124 
125     APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
126     if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
127         !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
128              .isMinValue())
129       return false;
130 
131     // If the base pointer is dereferenceable for Offset+Size bytes, then the
132     // GEP (== Base + Offset) is dereferenceable for Size bytes.  If the base
133     // pointer is aligned to Align bytes, and the Offset is divisible by Align
134     // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
135     // aligned to Align bytes.
136 
137     // Offset and Size may have different bit widths if we have visited an
138     // addrspacecast, so we can't do arithmetic directly on the APInt values.
139     return isDereferenceableAndAlignedPointer(
140         Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
141         CtxI, DT, TLI, Visited, MaxDepth);
142   }
143 
144   // For gc.relocate, look through relocations
145   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
146     return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
147                                               Alignment, Size, DL, CtxI, DT,
148                                               TLI, Visited, MaxDepth);
149 
150   if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
151     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
152                                               Size, DL, CtxI, DT, TLI,
153                                               Visited, MaxDepth);
154 
155   if (const auto *Call = dyn_cast<CallBase>(V)) {
156     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
157       return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
158                                                 DT, TLI, Visited, MaxDepth);
159 
160     // If we have a call we can't recurse through, check to see if this is an
161     // allocation function for which we can establish an minimum object size.
162     // Such a minimum object size is analogous to a deref_or_null attribute in
163     // that we still need to prove the result non-null at point of use.
164     // NOTE: We can only use the object size as a base fact as we a) need to
165     // prove alignment too, and b) don't want the compile time impact of a
166     // separate recursive walk.
167     ObjectSizeOpts Opts;
168     // TODO: It may be okay to round to align, but that would imply that
169     // accessing slightly out of bounds was legal, and we're currently
170     // inconsistent about that.  For the moment, be conservative.
171     Opts.RoundToAlign = false;
172     Opts.NullIsUnknownSize = true;
173     uint64_t ObjSize;
174     if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
175       APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
176       if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
177           isKnownNonZero(V, DL, 0, nullptr, CtxI, DT) && !V->canBeFreed()) {
178         // As we recursed through GEPs to get here, we've incrementally
179         // checked that each step advanced by a multiple of the alignment. If
180         // our base is properly aligned, then the original offset accessed
181         // must also be.
182         Type *Ty = V->getType();
183         assert(Ty->isSized() && "must be sized");
184         APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
185         return isAligned(V, Offset, Alignment, DL);
186       }
187     }
188   }
189 
190   // If we don't know, assume the worst.
191   return false;
192 }
193 
isDereferenceableAndAlignedPointer(const Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT,const TargetLibraryInfo * TLI)194 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
195                                               const APInt &Size,
196                                               const DataLayout &DL,
197                                               const Instruction *CtxI,
198                                               const DominatorTree *DT,
199                                               const TargetLibraryInfo *TLI) {
200   // Note: At the moment, Size can be zero.  This ends up being interpreted as
201   // a query of whether [Base, V] is dereferenceable and V is aligned (since
202   // that's what the implementation happened to do).  It's unclear if this is
203   // the desired semantic, but at least SelectionDAG does exercise this case.
204 
205   SmallPtrSet<const Value *, 32> Visited;
206   return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT,
207                                               TLI, Visited, 16);
208 }
209 
isDereferenceableAndAlignedPointer(const Value * V,Type * Ty,MaybeAlign MA,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT,const TargetLibraryInfo * TLI)210 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
211                                               MaybeAlign MA,
212                                               const DataLayout &DL,
213                                               const Instruction *CtxI,
214                                               const DominatorTree *DT,
215                                               const TargetLibraryInfo *TLI) {
216   // For unsized types or scalable vectors we don't know exactly how many bytes
217   // are dereferenced, so bail out.
218   if (!Ty->isSized() || isa<ScalableVectorType>(Ty))
219     return false;
220 
221   // When dereferenceability information is provided by a dereferenceable
222   // attribute, we know exactly how many bytes are dereferenceable. If we can
223   // determine the exact offset to the attributed variable, we can use that
224   // information here.
225 
226   // Require ABI alignment for loads without alignment specification
227   const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty);
228   APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
229                    DL.getTypeStoreSize(Ty));
230   return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
231                                             DT, TLI);
232 }
233 
isDereferenceablePointer(const Value * V,Type * Ty,const DataLayout & DL,const Instruction * CtxI,const DominatorTree * DT,const TargetLibraryInfo * TLI)234 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
235                                     const DataLayout &DL,
236                                     const Instruction *CtxI,
237                                     const DominatorTree *DT,
238                                     const TargetLibraryInfo *TLI) {
239   return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, DT, TLI);
240 }
241 
242 /// Test if A and B will obviously have the same value.
243 ///
244 /// This includes recognizing that %t0 and %t1 will have the same
245 /// value in code like this:
246 /// \code
247 ///   %t0 = getelementptr \@a, 0, 3
248 ///   store i32 0, i32* %t0
249 ///   %t1 = getelementptr \@a, 0, 3
250 ///   %t2 = load i32* %t1
251 /// \endcode
252 ///
AreEquivalentAddressValues(const Value * A,const Value * B)253 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
254   // Test if the values are trivially equivalent.
255   if (A == B)
256     return true;
257 
258   // Test if the values come from identical arithmetic instructions.
259   // Use isIdenticalToWhenDefined instead of isIdenticalTo because
260   // this function is only used when one address use dominates the
261   // other, which means that they'll always either have the same
262   // value or one of them will have an undefined value.
263   if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
264       isa<GetElementPtrInst>(A))
265     if (const Instruction *BI = dyn_cast<Instruction>(B))
266       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
267         return true;
268 
269   // Otherwise they may not be equivalent.
270   return false;
271 }
272 
isDereferenceableAndAlignedInLoop(LoadInst * LI,Loop * L,ScalarEvolution & SE,DominatorTree & DT)273 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
274                                              ScalarEvolution &SE,
275                                              DominatorTree &DT) {
276   auto &DL = LI->getModule()->getDataLayout();
277   Value *Ptr = LI->getPointerOperand();
278 
279   APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
280                 DL.getTypeStoreSize(LI->getType()).getFixedSize());
281   const Align Alignment = LI->getAlign();
282 
283   Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
284 
285   // If given a uniform (i.e. non-varying) address, see if we can prove the
286   // access is safe within the loop w/o needing predication.
287   if (L->isLoopInvariant(Ptr))
288     return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
289                                               HeaderFirstNonPHI, &DT);
290 
291   // Otherwise, check to see if we have a repeating access pattern where we can
292   // prove that all accesses are well aligned and dereferenceable.
293   auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
294   if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
295     return false;
296   auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
297   if (!Step)
298     return false;
299   // TODO: generalize to access patterns which have gaps
300   if (Step->getAPInt() != EltSize)
301     return false;
302 
303   auto TC = SE.getSmallConstantMaxTripCount(L);
304   if (!TC)
305     return false;
306 
307   const APInt AccessSize = TC * EltSize;
308 
309   auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
310   if (!StartS)
311     return false;
312   assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
313   Value *Base = StartS->getValue();
314 
315   // For the moment, restrict ourselves to the case where the access size is a
316   // multiple of the requested alignment and the base is aligned.
317   // TODO: generalize if a case found which warrants
318   if (EltSize.urem(Alignment.value()) != 0)
319     return false;
320   return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
321                                             HeaderFirstNonPHI, &DT);
322 }
323 
324 /// Check if executing a load of this pointer value cannot trap.
325 ///
326 /// If DT and ScanFrom are specified this method performs context-sensitive
327 /// analysis and returns true if it is safe to load immediately before ScanFrom.
328 ///
329 /// If it is not obviously safe to load from the specified pointer, we do
330 /// a quick local scan of the basic block containing \c ScanFrom, to determine
331 /// if the address is already accessed.
332 ///
333 /// This uses the pointee type to determine how many bytes need to be safe to
334 /// load from the pointer.
isSafeToLoadUnconditionally(Value * V,Align Alignment,APInt & Size,const DataLayout & DL,Instruction * ScanFrom,const DominatorTree * DT,const TargetLibraryInfo * TLI)335 bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
336                                        const DataLayout &DL,
337                                        Instruction *ScanFrom,
338                                        const DominatorTree *DT,
339                                        const TargetLibraryInfo *TLI) {
340   // If DT is not specified we can't make context-sensitive query
341   const Instruction* CtxI = DT ? ScanFrom : nullptr;
342   if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT, TLI))
343     return true;
344 
345   if (!ScanFrom)
346     return false;
347 
348   if (Size.getBitWidth() > 64)
349     return false;
350   const uint64_t LoadSize = Size.getZExtValue();
351 
352   // Otherwise, be a little bit aggressive by scanning the local block where we
353   // want to check to see if the pointer is already being loaded or stored
354   // from/to.  If so, the previous load or store would have already trapped,
355   // so there is no harm doing an extra load (also, CSE will later eliminate
356   // the load entirely).
357   BasicBlock::iterator BBI = ScanFrom->getIterator(),
358                        E = ScanFrom->getParent()->begin();
359 
360   // We can at least always strip pointer casts even though we can't use the
361   // base here.
362   V = V->stripPointerCasts();
363 
364   while (BBI != E) {
365     --BBI;
366 
367     // If we see a free or a call which may write to memory (i.e. which might do
368     // a free) the pointer could be marked invalid.
369     if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
370         !isa<DbgInfoIntrinsic>(BBI))
371       return false;
372 
373     Value *AccessedPtr;
374     Type *AccessedTy;
375     Align AccessedAlign;
376     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
377       // Ignore volatile loads. The execution of a volatile load cannot
378       // be used to prove an address is backed by regular memory; it can,
379       // for example, point to an MMIO register.
380       if (LI->isVolatile())
381         continue;
382       AccessedPtr = LI->getPointerOperand();
383       AccessedTy = LI->getType();
384       AccessedAlign = LI->getAlign();
385     } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
386       // Ignore volatile stores (see comment for loads).
387       if (SI->isVolatile())
388         continue;
389       AccessedPtr = SI->getPointerOperand();
390       AccessedTy = SI->getValueOperand()->getType();
391       AccessedAlign = SI->getAlign();
392     } else
393       continue;
394 
395     if (AccessedAlign < Alignment)
396       continue;
397 
398     // Handle trivial cases.
399     if (AccessedPtr == V &&
400         LoadSize <= DL.getTypeStoreSize(AccessedTy))
401       return true;
402 
403     if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
404         LoadSize <= DL.getTypeStoreSize(AccessedTy))
405       return true;
406   }
407   return false;
408 }
409 
isSafeToLoadUnconditionally(Value * V,Type * Ty,Align Alignment,const DataLayout & DL,Instruction * ScanFrom,const DominatorTree * DT,const TargetLibraryInfo * TLI)410 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
411                                        const DataLayout &DL,
412                                        Instruction *ScanFrom,
413                                        const DominatorTree *DT,
414                                        const TargetLibraryInfo *TLI) {
415   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
416   return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT, TLI);
417 }
418 
419 /// DefMaxInstsToScan - the default number of maximum instructions
420 /// to scan in the block, used by FindAvailableLoadedValue().
421 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
422 /// threading in part by eliminating partially redundant loads.
423 /// At that point, the value of MaxInstsToScan was already set to '6'
424 /// without documented explanation.
425 cl::opt<unsigned>
426 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
427   cl::desc("Use this to specify the default maximum number of instructions "
428            "to scan backward from a given instruction, when searching for "
429            "available loaded value"));
430 
FindAvailableLoadedValue(LoadInst * Load,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,AAResults * AA,bool * IsLoad,unsigned * NumScanedInst)431 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
432                                       BasicBlock *ScanBB,
433                                       BasicBlock::iterator &ScanFrom,
434                                       unsigned MaxInstsToScan,
435                                       AAResults *AA, bool *IsLoad,
436                                       unsigned *NumScanedInst) {
437   // Don't CSE load that is volatile or anything stronger than unordered.
438   if (!Load->isUnordered())
439     return nullptr;
440 
441   MemoryLocation Loc = MemoryLocation::get(Load);
442   return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
443                                    ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
444                                    NumScanedInst);
445 }
446 
447 // Check if the load and the store have the same base, constant offsets and
448 // non-overlapping access ranges.
areNonOverlapSameBaseLoadAndStore(const Value * LoadPtr,Type * LoadTy,const Value * StorePtr,Type * StoreTy,const DataLayout & DL)449 static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
450                                               Type *LoadTy,
451                                               const Value *StorePtr,
452                                               Type *StoreTy,
453                                               const DataLayout &DL) {
454   APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
455   APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
456   const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
457       DL, LoadOffset, /* AllowNonInbounds */ false);
458   const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
459       DL, StoreOffset, /* AllowNonInbounds */ false);
460   if (LoadBase != StoreBase)
461     return false;
462   auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
463   auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
464   ConstantRange LoadRange(LoadOffset,
465                           LoadOffset + LoadAccessSize.toRaw());
466   ConstantRange StoreRange(StoreOffset,
467                            StoreOffset + StoreAccessSize.toRaw());
468   return LoadRange.intersectWith(StoreRange).isEmptySet();
469 }
470 
getAvailableLoadStore(Instruction * Inst,const Value * Ptr,Type * AccessTy,bool AtLeastAtomic,const DataLayout & DL,bool * IsLoadCSE)471 static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr,
472                                     Type *AccessTy, bool AtLeastAtomic,
473                                     const DataLayout &DL, bool *IsLoadCSE) {
474   // If this is a load of Ptr, the loaded value is available.
475   // (This is true even if the load is volatile or atomic, although
476   // those cases are unlikely.)
477   if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
478     // We can value forward from an atomic to a non-atomic, but not the
479     // other way around.
480     if (LI->isAtomic() < AtLeastAtomic)
481       return nullptr;
482 
483     Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
484     if (!AreEquivalentAddressValues(LoadPtr, Ptr))
485       return nullptr;
486 
487     if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
488       if (IsLoadCSE)
489         *IsLoadCSE = true;
490       return LI;
491     }
492   }
493 
494   // If this is a store through Ptr, the value is available!
495   // (This is true even if the store is volatile or atomic, although
496   // those cases are unlikely.)
497   if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
498     // We can value forward from an atomic to a non-atomic, but not the
499     // other way around.
500     if (SI->isAtomic() < AtLeastAtomic)
501       return nullptr;
502 
503     Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
504     if (!AreEquivalentAddressValues(StorePtr, Ptr))
505       return nullptr;
506 
507     if (IsLoadCSE)
508       *IsLoadCSE = false;
509 
510     Value *Val = SI->getValueOperand();
511     if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
512       return Val;
513 
514     if (auto *C = dyn_cast<Constant>(Val))
515       return ConstantFoldLoadThroughBitcast(C, AccessTy, DL);
516   }
517 
518   return nullptr;
519 }
520 
findAvailablePtrLoadStore(const MemoryLocation & Loc,Type * AccessTy,bool AtLeastAtomic,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,AAResults * AA,bool * IsLoadCSE,unsigned * NumScanedInst)521 Value *llvm::findAvailablePtrLoadStore(
522     const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
523     BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
524     AAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
525   if (MaxInstsToScan == 0)
526     MaxInstsToScan = ~0U;
527 
528   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
529   const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
530 
531   while (ScanFrom != ScanBB->begin()) {
532     // We must ignore debug info directives when counting (otherwise they
533     // would affect codegen).
534     Instruction *Inst = &*--ScanFrom;
535     if (Inst->isDebugOrPseudoInst())
536       continue;
537 
538     // Restore ScanFrom to expected value in case next test succeeds
539     ScanFrom++;
540 
541     if (NumScanedInst)
542       ++(*NumScanedInst);
543 
544     // Don't scan huge blocks.
545     if (MaxInstsToScan-- == 0)
546       return nullptr;
547 
548     --ScanFrom;
549 
550     if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
551                                                  AtLeastAtomic, DL, IsLoadCSE))
552       return Available;
553 
554     // Try to get the store size for the type.
555     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
556       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
557 
558       // If both StrippedPtr and StorePtr reach all the way to an alloca or
559       // global and they are different, ignore the store. This is a trivial form
560       // of alias analysis that is important for reg2mem'd code.
561       if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
562           (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
563           StrippedPtr != StorePtr)
564         continue;
565 
566       if (!AA) {
567         // When AA isn't available, but if the load and the store have the same
568         // base, constant offsets and non-overlapping access ranges, ignore the
569         // store. This is a simple form of alias analysis that is used by the
570         // inliner. FIXME: use BasicAA if possible.
571         if (areNonOverlapSameBaseLoadAndStore(
572                 Loc.Ptr, AccessTy, SI->getPointerOperand(),
573                 SI->getValueOperand()->getType(), DL))
574           continue;
575       } else {
576         // If we have alias analysis and it says the store won't modify the
577         // loaded value, ignore the store.
578         if (!isModSet(AA->getModRefInfo(SI, Loc)))
579           continue;
580       }
581 
582       // Otherwise the store that may or may not alias the pointer, bail out.
583       ++ScanFrom;
584       return nullptr;
585     }
586 
587     // If this is some other instruction that may clobber Ptr, bail out.
588     if (Inst->mayWriteToMemory()) {
589       // If alias analysis claims that it really won't modify the load,
590       // ignore it.
591       if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
592         continue;
593 
594       // May modify the pointer, bail out.
595       ++ScanFrom;
596       return nullptr;
597     }
598   }
599 
600   // Got to the start of the block, we didn't find it, but are done for this
601   // block.
602   return nullptr;
603 }
604 
FindAvailableLoadedValue(LoadInst * Load,AAResults & AA,bool * IsLoadCSE,unsigned MaxInstsToScan)605 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, AAResults &AA,
606                                       bool *IsLoadCSE,
607                                       unsigned MaxInstsToScan) {
608   const DataLayout &DL = Load->getModule()->getDataLayout();
609   Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
610   BasicBlock *ScanBB = Load->getParent();
611   Type *AccessTy = Load->getType();
612   bool AtLeastAtomic = Load->isAtomic();
613 
614   if (!Load->isUnordered())
615     return nullptr;
616 
617   // Try to find an available value first, and delay expensive alias analysis
618   // queries until later.
619   Value *Available = nullptr;;
620   SmallVector<Instruction *> MustNotAliasInsts;
621   for (Instruction &Inst : make_range(++Load->getReverseIterator(),
622                                       ScanBB->rend())) {
623     if (Inst.isDebugOrPseudoInst())
624       continue;
625 
626     if (MaxInstsToScan-- == 0)
627       return nullptr;
628 
629     Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
630                                       AtLeastAtomic, DL, IsLoadCSE);
631     if (Available)
632       break;
633 
634     if (Inst.mayWriteToMemory())
635       MustNotAliasInsts.push_back(&Inst);
636   }
637 
638   // If we found an available value, ensure that the instructions in between
639   // did not modify the memory location.
640   if (Available) {
641     MemoryLocation Loc = MemoryLocation::get(Load);
642     for (Instruction *Inst : MustNotAliasInsts)
643       if (isModSet(AA.getModRefInfo(Inst, Loc)))
644         return nullptr;
645   }
646 
647   return Available;
648 }
649 
canReplacePointersIfEqual(Value * A,Value * B,const DataLayout & DL,Instruction * CtxI)650 bool llvm::canReplacePointersIfEqual(Value *A, Value *B, const DataLayout &DL,
651                                      Instruction *CtxI) {
652   Type *Ty = A->getType();
653   assert(Ty == B->getType() && Ty->isPointerTy() &&
654          "values must have matching pointer types");
655 
656   // NOTE: The checks in the function are incomplete and currently miss illegal
657   // cases! The current implementation is a starting point and the
658   // implementation should be made stricter over time.
659   if (auto *C = dyn_cast<Constant>(B)) {
660     // Do not allow replacing a pointer with a constant pointer, unless it is
661     // either null or at least one byte is dereferenceable.
662     APInt OneByte(DL.getPointerTypeSizeInBits(Ty), 1);
663     return C->isNullValue() ||
664            isDereferenceableAndAlignedPointer(B, Align(1), OneByte, DL, CtxI);
665   }
666 
667   return true;
668 }
669