1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Analysis/MemoryLocation.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26
27 using namespace llvm;
28
isAligned(const Value * Base,const APInt & Offset,Align Alignment,const DataLayout & DL)29 static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
30 const DataLayout &DL) {
31 Align BA = Base->getPointerAlignment(DL);
32 const APInt APAlign(Offset.getBitWidth(), Alignment.value());
33 assert(APAlign.isPowerOf2() && "must be a power of 2!");
34 return BA >= Alignment && !(Offset & (APAlign - 1));
35 }
36
37 /// Test if V is always a pointer to allocated and suitably aligned memory for
38 /// a simple load or store.
isDereferenceableAndAlignedPointer(const Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI,SmallPtrSetImpl<const Value * > & Visited,unsigned MaxDepth)39 static bool isDereferenceableAndAlignedPointer(
40 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
41 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
42 const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
43 unsigned MaxDepth) {
44 assert(V->getType()->isPointerTy() && "Base must be pointer");
45
46 // Recursion limit.
47 if (MaxDepth-- == 0)
48 return false;
49
50 // Already visited? Bail out, we've likely hit unreachable code.
51 if (!Visited.insert(V).second)
52 return false;
53
54 // Note that it is not safe to speculate into a malloc'd region because
55 // malloc may return null.
56
57 // For GEPs, determine if the indexing lands within the allocated object.
58 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
59 const Value *Base = GEP->getPointerOperand();
60
61 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
62 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
63 !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
64 .isMinValue())
65 return false;
66
67 // If the base pointer is dereferenceable for Offset+Size bytes, then the
68 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
69 // pointer is aligned to Align bytes, and the Offset is divisible by Align
70 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
71 // aligned to Align bytes.
72
73 // Offset and Size may have different bit widths if we have visited an
74 // addrspacecast, so we can't do arithmetic directly on the APInt values.
75 return isDereferenceableAndAlignedPointer(
76 Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
77 CtxI, AC, DT, TLI, Visited, MaxDepth);
78 }
79
80 // bitcast instructions are no-ops as far as dereferenceability is concerned.
81 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
82 if (BC->getSrcTy()->isPointerTy())
83 return isDereferenceableAndAlignedPointer(
84 BC->getOperand(0), Alignment, Size, DL, CtxI, AC, DT, TLI,
85 Visited, MaxDepth);
86 }
87
88 // Recurse into both hands of select.
89 if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
90 return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
91 Size, DL, CtxI, AC, DT, TLI,
92 Visited, MaxDepth) &&
93 isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
94 Size, DL, CtxI, AC, DT, TLI,
95 Visited, MaxDepth);
96 }
97
98 bool CheckForNonNull, CheckForFreed;
99 APInt KnownDerefBytes(Size.getBitWidth(),
100 V->getPointerDereferenceableBytes(DL, CheckForNonNull,
101 CheckForFreed));
102 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
103 !CheckForFreed)
104 if (!CheckForNonNull || isKnownNonZero(V, DL, 0, AC, CtxI, DT)) {
105 // As we recursed through GEPs to get here, we've incrementally checked
106 // that each step advanced by a multiple of the alignment. If our base is
107 // properly aligned, then the original offset accessed must also be.
108 APInt Offset(DL.getTypeStoreSizeInBits(V->getType()), 0);
109 return isAligned(V, Offset, Alignment, DL);
110 }
111
112 /// TODO refactor this function to be able to search independently for
113 /// Dereferencability and Alignment requirements.
114
115
116 if (const auto *Call = dyn_cast<CallBase>(V)) {
117 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
118 return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
119 AC, DT, TLI, Visited, MaxDepth);
120
121 // If we have a call we can't recurse through, check to see if this is an
122 // allocation function for which we can establish an minimum object size.
123 // Such a minimum object size is analogous to a deref_or_null attribute in
124 // that we still need to prove the result non-null at point of use.
125 // NOTE: We can only use the object size as a base fact as we a) need to
126 // prove alignment too, and b) don't want the compile time impact of a
127 // separate recursive walk.
128 ObjectSizeOpts Opts;
129 // TODO: It may be okay to round to align, but that would imply that
130 // accessing slightly out of bounds was legal, and we're currently
131 // inconsistent about that. For the moment, be conservative.
132 Opts.RoundToAlign = false;
133 Opts.NullIsUnknownSize = true;
134 uint64_t ObjSize;
135 if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
136 APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
137 if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
138 isKnownNonZero(V, DL, 0, AC, CtxI, DT) && !V->canBeFreed()) {
139 // As we recursed through GEPs to get here, we've incrementally
140 // checked that each step advanced by a multiple of the alignment. If
141 // our base is properly aligned, then the original offset accessed
142 // must also be.
143 APInt Offset(DL.getTypeStoreSizeInBits(V->getType()), 0);
144 return isAligned(V, Offset, Alignment, DL);
145 }
146 }
147 }
148
149 // For gc.relocate, look through relocations
150 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
151 return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
152 Alignment, Size, DL, CtxI, AC, DT,
153 TLI, Visited, MaxDepth);
154
155 if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
156 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
157 Size, DL, CtxI, AC, DT, TLI,
158 Visited, MaxDepth);
159
160 if (CtxI) {
161 /// Look through assumes to see if both dereferencability and alignment can
162 /// be provent by an assume
163 RetainedKnowledge AlignRK;
164 RetainedKnowledge DerefRK;
165 if (getKnowledgeForValue(
166 V, {Attribute::Dereferenceable, Attribute::Alignment}, AC,
167 [&](RetainedKnowledge RK, Instruction *Assume, auto) {
168 if (!isValidAssumeForContext(Assume, CtxI))
169 return false;
170 if (RK.AttrKind == Attribute::Alignment)
171 AlignRK = std::max(AlignRK, RK);
172 if (RK.AttrKind == Attribute::Dereferenceable)
173 DerefRK = std::max(DerefRK, RK);
174 if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() &&
175 DerefRK.ArgValue >= Size.getZExtValue())
176 return true; // We have found what we needed so we stop looking
177 return false; // Other assumes may have better information. so
178 // keep looking
179 }))
180 return true;
181 }
182
183 // If we don't know, assume the worst.
184 return false;
185 }
186
isDereferenceableAndAlignedPointer(const Value * V,Align Alignment,const APInt & Size,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)187 bool llvm::isDereferenceableAndAlignedPointer(
188 const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
189 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
190 const TargetLibraryInfo *TLI) {
191 // Note: At the moment, Size can be zero. This ends up being interpreted as
192 // a query of whether [Base, V] is dereferenceable and V is aligned (since
193 // that's what the implementation happened to do). It's unclear if this is
194 // the desired semantic, but at least SelectionDAG does exercise this case.
195
196 SmallPtrSet<const Value *, 32> Visited;
197 return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
198 DT, TLI, Visited, 16);
199 }
200
isDereferenceableAndAlignedPointer(const Value * V,Type * Ty,Align Alignment,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)201 bool llvm::isDereferenceableAndAlignedPointer(
202 const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
203 const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
204 const TargetLibraryInfo *TLI) {
205 // For unsized types or scalable vectors we don't know exactly how many bytes
206 // are dereferenced, so bail out.
207 if (!Ty->isSized() || isa<ScalableVectorType>(Ty))
208 return false;
209
210 // When dereferenceability information is provided by a dereferenceable
211 // attribute, we know exactly how many bytes are dereferenceable. If we can
212 // determine the exact offset to the attributed variable, we can use that
213 // information here.
214
215 APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
216 DL.getTypeStoreSize(Ty));
217 return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
218 AC, DT, TLI);
219 }
220
isDereferenceablePointer(const Value * V,Type * Ty,const DataLayout & DL,const Instruction * CtxI,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)221 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
222 const DataLayout &DL,
223 const Instruction *CtxI,
224 AssumptionCache *AC,
225 const DominatorTree *DT,
226 const TargetLibraryInfo *TLI) {
227 return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
228 TLI);
229 }
230
231 /// Test if A and B will obviously have the same value.
232 ///
233 /// This includes recognizing that %t0 and %t1 will have the same
234 /// value in code like this:
235 /// \code
236 /// %t0 = getelementptr \@a, 0, 3
237 /// store i32 0, i32* %t0
238 /// %t1 = getelementptr \@a, 0, 3
239 /// %t2 = load i32* %t1
240 /// \endcode
241 ///
AreEquivalentAddressValues(const Value * A,const Value * B)242 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
243 // Test if the values are trivially equivalent.
244 if (A == B)
245 return true;
246
247 // Test if the values come from identical arithmetic instructions.
248 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
249 // this function is only used when one address use dominates the
250 // other, which means that they'll always either have the same
251 // value or one of them will have an undefined value.
252 if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
253 isa<GetElementPtrInst>(A))
254 if (const Instruction *BI = dyn_cast<Instruction>(B))
255 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
256 return true;
257
258 // Otherwise they may not be equivalent.
259 return false;
260 }
261
isDereferenceableAndAlignedInLoop(LoadInst * LI,Loop * L,ScalarEvolution & SE,DominatorTree & DT,AssumptionCache * AC)262 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
263 ScalarEvolution &SE,
264 DominatorTree &DT,
265 AssumptionCache *AC) {
266 auto &DL = LI->getModule()->getDataLayout();
267 Value *Ptr = LI->getPointerOperand();
268
269 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
270 DL.getTypeStoreSize(LI->getType()).getFixedValue());
271 const Align Alignment = LI->getAlign();
272
273 Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
274
275 // If given a uniform (i.e. non-varying) address, see if we can prove the
276 // access is safe within the loop w/o needing predication.
277 if (L->isLoopInvariant(Ptr))
278 return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
279 HeaderFirstNonPHI, AC, &DT);
280
281 // Otherwise, check to see if we have a repeating access pattern where we can
282 // prove that all accesses are well aligned and dereferenceable.
283 auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
284 if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
285 return false;
286 auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
287 if (!Step)
288 return false;
289 // TODO: generalize to access patterns which have gaps
290 if (Step->getAPInt() != EltSize)
291 return false;
292
293 auto TC = SE.getSmallConstantMaxTripCount(L);
294 if (!TC)
295 return false;
296
297 const APInt AccessSize = TC * EltSize;
298
299 auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
300 if (!StartS)
301 return false;
302 assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
303 Value *Base = StartS->getValue();
304
305 // For the moment, restrict ourselves to the case where the access size is a
306 // multiple of the requested alignment and the base is aligned.
307 // TODO: generalize if a case found which warrants
308 if (EltSize.urem(Alignment.value()) != 0)
309 return false;
310 return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
311 HeaderFirstNonPHI, AC, &DT);
312 }
313
314 /// Check if executing a load of this pointer value cannot trap.
315 ///
316 /// If DT and ScanFrom are specified this method performs context-sensitive
317 /// analysis and returns true if it is safe to load immediately before ScanFrom.
318 ///
319 /// If it is not obviously safe to load from the specified pointer, we do
320 /// a quick local scan of the basic block containing \c ScanFrom, to determine
321 /// if the address is already accessed.
322 ///
323 /// This uses the pointee type to determine how many bytes need to be safe to
324 /// load from the pointer.
isSafeToLoadUnconditionally(Value * V,Align Alignment,APInt & Size,const DataLayout & DL,Instruction * ScanFrom,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)325 bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
326 const DataLayout &DL,
327 Instruction *ScanFrom,
328 AssumptionCache *AC,
329 const DominatorTree *DT,
330 const TargetLibraryInfo *TLI) {
331 // If DT is not specified we can't make context-sensitive query
332 const Instruction* CtxI = DT ? ScanFrom : nullptr;
333 if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT,
334 TLI))
335 return true;
336
337 if (!ScanFrom)
338 return false;
339
340 if (Size.getBitWidth() > 64)
341 return false;
342 const uint64_t LoadSize = Size.getZExtValue();
343
344 // Otherwise, be a little bit aggressive by scanning the local block where we
345 // want to check to see if the pointer is already being loaded or stored
346 // from/to. If so, the previous load or store would have already trapped,
347 // so there is no harm doing an extra load (also, CSE will later eliminate
348 // the load entirely).
349 BasicBlock::iterator BBI = ScanFrom->getIterator(),
350 E = ScanFrom->getParent()->begin();
351
352 // We can at least always strip pointer casts even though we can't use the
353 // base here.
354 V = V->stripPointerCasts();
355
356 while (BBI != E) {
357 --BBI;
358
359 // If we see a free or a call which may write to memory (i.e. which might do
360 // a free) the pointer could be marked invalid.
361 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
362 !isa<LifetimeIntrinsic>(BBI) && !isa<DbgInfoIntrinsic>(BBI))
363 return false;
364
365 Value *AccessedPtr;
366 Type *AccessedTy;
367 Align AccessedAlign;
368 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
369 // Ignore volatile loads. The execution of a volatile load cannot
370 // be used to prove an address is backed by regular memory; it can,
371 // for example, point to an MMIO register.
372 if (LI->isVolatile())
373 continue;
374 AccessedPtr = LI->getPointerOperand();
375 AccessedTy = LI->getType();
376 AccessedAlign = LI->getAlign();
377 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
378 // Ignore volatile stores (see comment for loads).
379 if (SI->isVolatile())
380 continue;
381 AccessedPtr = SI->getPointerOperand();
382 AccessedTy = SI->getValueOperand()->getType();
383 AccessedAlign = SI->getAlign();
384 } else
385 continue;
386
387 if (AccessedAlign < Alignment)
388 continue;
389
390 // Handle trivial cases.
391 if (AccessedPtr == V &&
392 LoadSize <= DL.getTypeStoreSize(AccessedTy))
393 return true;
394
395 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
396 LoadSize <= DL.getTypeStoreSize(AccessedTy))
397 return true;
398 }
399 return false;
400 }
401
isSafeToLoadUnconditionally(Value * V,Type * Ty,Align Alignment,const DataLayout & DL,Instruction * ScanFrom,AssumptionCache * AC,const DominatorTree * DT,const TargetLibraryInfo * TLI)402 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
403 const DataLayout &DL,
404 Instruction *ScanFrom,
405 AssumptionCache *AC,
406 const DominatorTree *DT,
407 const TargetLibraryInfo *TLI) {
408 TypeSize TySize = DL.getTypeStoreSize(Ty);
409 if (TySize.isScalable())
410 return false;
411 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
412 return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
413 TLI);
414 }
415
416 /// DefMaxInstsToScan - the default number of maximum instructions
417 /// to scan in the block, used by FindAvailableLoadedValue().
418 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
419 /// threading in part by eliminating partially redundant loads.
420 /// At that point, the value of MaxInstsToScan was already set to '6'
421 /// without documented explanation.
422 cl::opt<unsigned>
423 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
424 cl::desc("Use this to specify the default maximum number of instructions "
425 "to scan backward from a given instruction, when searching for "
426 "available loaded value"));
427
FindAvailableLoadedValue(LoadInst * Load,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,AAResults * AA,bool * IsLoad,unsigned * NumScanedInst)428 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
429 BasicBlock *ScanBB,
430 BasicBlock::iterator &ScanFrom,
431 unsigned MaxInstsToScan,
432 AAResults *AA, bool *IsLoad,
433 unsigned *NumScanedInst) {
434 // Don't CSE load that is volatile or anything stronger than unordered.
435 if (!Load->isUnordered())
436 return nullptr;
437
438 MemoryLocation Loc = MemoryLocation::get(Load);
439 return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
440 ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
441 NumScanedInst);
442 }
443
444 // Check if the load and the store have the same base, constant offsets and
445 // non-overlapping access ranges.
areNonOverlapSameBaseLoadAndStore(const Value * LoadPtr,Type * LoadTy,const Value * StorePtr,Type * StoreTy,const DataLayout & DL)446 static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
447 Type *LoadTy,
448 const Value *StorePtr,
449 Type *StoreTy,
450 const DataLayout &DL) {
451 APInt LoadOffset(DL.getIndexTypeSizeInBits(LoadPtr->getType()), 0);
452 APInt StoreOffset(DL.getIndexTypeSizeInBits(StorePtr->getType()), 0);
453 const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
454 DL, LoadOffset, /* AllowNonInbounds */ false);
455 const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
456 DL, StoreOffset, /* AllowNonInbounds */ false);
457 if (LoadBase != StoreBase)
458 return false;
459 auto LoadAccessSize = LocationSize::precise(DL.getTypeStoreSize(LoadTy));
460 auto StoreAccessSize = LocationSize::precise(DL.getTypeStoreSize(StoreTy));
461 ConstantRange LoadRange(LoadOffset,
462 LoadOffset + LoadAccessSize.toRaw());
463 ConstantRange StoreRange(StoreOffset,
464 StoreOffset + StoreAccessSize.toRaw());
465 return LoadRange.intersectWith(StoreRange).isEmptySet();
466 }
467
getAvailableLoadStore(Instruction * Inst,const Value * Ptr,Type * AccessTy,bool AtLeastAtomic,const DataLayout & DL,bool * IsLoadCSE)468 static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr,
469 Type *AccessTy, bool AtLeastAtomic,
470 const DataLayout &DL, bool *IsLoadCSE) {
471 // If this is a load of Ptr, the loaded value is available.
472 // (This is true even if the load is volatile or atomic, although
473 // those cases are unlikely.)
474 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
475 // We can value forward from an atomic to a non-atomic, but not the
476 // other way around.
477 if (LI->isAtomic() < AtLeastAtomic)
478 return nullptr;
479
480 Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
481 if (!AreEquivalentAddressValues(LoadPtr, Ptr))
482 return nullptr;
483
484 if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
485 if (IsLoadCSE)
486 *IsLoadCSE = true;
487 return LI;
488 }
489 }
490
491 // If this is a store through Ptr, the value is available!
492 // (This is true even if the store is volatile or atomic, although
493 // those cases are unlikely.)
494 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
495 // We can value forward from an atomic to a non-atomic, but not the
496 // other way around.
497 if (SI->isAtomic() < AtLeastAtomic)
498 return nullptr;
499
500 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
501 if (!AreEquivalentAddressValues(StorePtr, Ptr))
502 return nullptr;
503
504 if (IsLoadCSE)
505 *IsLoadCSE = false;
506
507 Value *Val = SI->getValueOperand();
508 if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
509 return Val;
510
511 TypeSize StoreSize = DL.getTypeSizeInBits(Val->getType());
512 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy);
513 if (TypeSize::isKnownLE(LoadSize, StoreSize))
514 if (auto *C = dyn_cast<Constant>(Val))
515 return ConstantFoldLoadFromConst(C, AccessTy, DL);
516 }
517
518 if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
519 // Don't forward from (non-atomic) memset to atomic load.
520 if (AtLeastAtomic)
521 return nullptr;
522
523 // Only handle constant memsets.
524 auto *Val = dyn_cast<ConstantInt>(MSI->getValue());
525 auto *Len = dyn_cast<ConstantInt>(MSI->getLength());
526 if (!Val || !Len)
527 return nullptr;
528
529 // TODO: Handle offsets.
530 Value *Dst = MSI->getDest();
531 if (!AreEquivalentAddressValues(Dst, Ptr))
532 return nullptr;
533
534 if (IsLoadCSE)
535 *IsLoadCSE = false;
536
537 TypeSize LoadTypeSize = DL.getTypeSizeInBits(AccessTy);
538 if (LoadTypeSize.isScalable())
539 return nullptr;
540
541 // Make sure the read bytes are contained in the memset.
542 uint64_t LoadSize = LoadTypeSize.getFixedValue();
543 if ((Len->getValue() * 8).ult(LoadSize))
544 return nullptr;
545
546 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue())
547 : Val->getValue().trunc(LoadSize);
548 ConstantInt *SplatC = ConstantInt::get(MSI->getContext(), Splat);
549 if (CastInst::isBitOrNoopPointerCastable(SplatC->getType(), AccessTy, DL))
550 return SplatC;
551
552 return nullptr;
553 }
554
555 return nullptr;
556 }
557
findAvailablePtrLoadStore(const MemoryLocation & Loc,Type * AccessTy,bool AtLeastAtomic,BasicBlock * ScanBB,BasicBlock::iterator & ScanFrom,unsigned MaxInstsToScan,AAResults * AA,bool * IsLoadCSE,unsigned * NumScanedInst)558 Value *llvm::findAvailablePtrLoadStore(
559 const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
560 BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
561 AAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
562 if (MaxInstsToScan == 0)
563 MaxInstsToScan = ~0U;
564
565 const DataLayout &DL = ScanBB->getModule()->getDataLayout();
566 const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
567
568 while (ScanFrom != ScanBB->begin()) {
569 // We must ignore debug info directives when counting (otherwise they
570 // would affect codegen).
571 Instruction *Inst = &*--ScanFrom;
572 if (Inst->isDebugOrPseudoInst())
573 continue;
574
575 // Restore ScanFrom to expected value in case next test succeeds
576 ScanFrom++;
577
578 if (NumScanedInst)
579 ++(*NumScanedInst);
580
581 // Don't scan huge blocks.
582 if (MaxInstsToScan-- == 0)
583 return nullptr;
584
585 --ScanFrom;
586
587 if (Value *Available = getAvailableLoadStore(Inst, StrippedPtr, AccessTy,
588 AtLeastAtomic, DL, IsLoadCSE))
589 return Available;
590
591 // Try to get the store size for the type.
592 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
593 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
594
595 // If both StrippedPtr and StorePtr reach all the way to an alloca or
596 // global and they are different, ignore the store. This is a trivial form
597 // of alias analysis that is important for reg2mem'd code.
598 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
599 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
600 StrippedPtr != StorePtr)
601 continue;
602
603 if (!AA) {
604 // When AA isn't available, but if the load and the store have the same
605 // base, constant offsets and non-overlapping access ranges, ignore the
606 // store. This is a simple form of alias analysis that is used by the
607 // inliner. FIXME: use BasicAA if possible.
608 if (areNonOverlapSameBaseLoadAndStore(
609 Loc.Ptr, AccessTy, SI->getPointerOperand(),
610 SI->getValueOperand()->getType(), DL))
611 continue;
612 } else {
613 // If we have alias analysis and it says the store won't modify the
614 // loaded value, ignore the store.
615 if (!isModSet(AA->getModRefInfo(SI, Loc)))
616 continue;
617 }
618
619 // Otherwise the store that may or may not alias the pointer, bail out.
620 ++ScanFrom;
621 return nullptr;
622 }
623
624 // If this is some other instruction that may clobber Ptr, bail out.
625 if (Inst->mayWriteToMemory()) {
626 // If alias analysis claims that it really won't modify the load,
627 // ignore it.
628 if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
629 continue;
630
631 // May modify the pointer, bail out.
632 ++ScanFrom;
633 return nullptr;
634 }
635 }
636
637 // Got to the start of the block, we didn't find it, but are done for this
638 // block.
639 return nullptr;
640 }
641
FindAvailableLoadedValue(LoadInst * Load,AAResults & AA,bool * IsLoadCSE,unsigned MaxInstsToScan)642 Value *llvm::FindAvailableLoadedValue(LoadInst *Load, AAResults &AA,
643 bool *IsLoadCSE,
644 unsigned MaxInstsToScan) {
645 const DataLayout &DL = Load->getModule()->getDataLayout();
646 Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts();
647 BasicBlock *ScanBB = Load->getParent();
648 Type *AccessTy = Load->getType();
649 bool AtLeastAtomic = Load->isAtomic();
650
651 if (!Load->isUnordered())
652 return nullptr;
653
654 // Try to find an available value first, and delay expensive alias analysis
655 // queries until later.
656 Value *Available = nullptr;;
657 SmallVector<Instruction *> MustNotAliasInsts;
658 for (Instruction &Inst : make_range(++Load->getReverseIterator(),
659 ScanBB->rend())) {
660 if (Inst.isDebugOrPseudoInst())
661 continue;
662
663 if (MaxInstsToScan-- == 0)
664 return nullptr;
665
666 Available = getAvailableLoadStore(&Inst, StrippedPtr, AccessTy,
667 AtLeastAtomic, DL, IsLoadCSE);
668 if (Available)
669 break;
670
671 if (Inst.mayWriteToMemory())
672 MustNotAliasInsts.push_back(&Inst);
673 }
674
675 // If we found an available value, ensure that the instructions in between
676 // did not modify the memory location.
677 if (Available) {
678 MemoryLocation Loc = MemoryLocation::get(Load);
679 for (Instruction *Inst : MustNotAliasInsts)
680 if (isModSet(AA.getModRefInfo(Inst, Loc)))
681 return nullptr;
682 }
683
684 return Available;
685 }
686
canReplacePointersIfEqual(Value * A,Value * B,const DataLayout & DL,Instruction * CtxI)687 bool llvm::canReplacePointersIfEqual(Value *A, Value *B, const DataLayout &DL,
688 Instruction *CtxI) {
689 Type *Ty = A->getType();
690 assert(Ty == B->getType() && Ty->isPointerTy() &&
691 "values must have matching pointer types");
692
693 // NOTE: The checks in the function are incomplete and currently miss illegal
694 // cases! The current implementation is a starting point and the
695 // implementation should be made stricter over time.
696 if (auto *C = dyn_cast<Constant>(B)) {
697 // Do not allow replacing a pointer with a constant pointer, unless it is
698 // either null or at least one byte is dereferenceable.
699 APInt OneByte(DL.getPointerTypeSizeInBits(Ty), 1);
700 return C->isNullValue() ||
701 isDereferenceableAndAlignedPointer(B, Align(1), OneByte, DL, CtxI);
702 }
703
704 return true;
705 }
706