1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/SaveAndRestore.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <optional>
61 #include <utility>
62
63 #define DEBUG_TYPE "basicaa"
64
65 using namespace llvm;
66
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69 cl::init(true));
70
71 static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden, cl::init(true));
73
74 /// SearchLimitReached / SearchTimes shows how often the limit of
75 /// to decompose GEPs is reached. It will affect the precision
76 /// of basic alias analysis.
77 STATISTIC(SearchLimitReached, "Number of times the limit to "
78 "decompose GEPs is reached");
79 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
80
81 // The max limit of the search depth in DecomposeGEPExpression() and
82 // getUnderlyingObject().
83 static const unsigned MaxLookupSearchDepth = 6;
84
invalidate(Function & Fn,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)85 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
86 FunctionAnalysisManager::Invalidator &Inv) {
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
90 // depend on them.
91 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
92 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
93 return true;
94
95 // Otherwise this analysis result remains valid.
96 return false;
97 }
98
99 //===----------------------------------------------------------------------===//
100 // Useful predicates
101 //===----------------------------------------------------------------------===//
102
103 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc,bool RoundToAlign=false)104 static std::optional<TypeSize> getObjectSize(const Value *V,
105 const DataLayout &DL,
106 const TargetLibraryInfo &TLI,
107 bool NullIsValidLoc,
108 bool RoundToAlign = false) {
109 uint64_t Size;
110 ObjectSizeOpts Opts;
111 Opts.RoundToAlign = RoundToAlign;
112 Opts.NullIsUnknownSize = NullIsValidLoc;
113 if (getObjectSize(V, Size, DL, &TLI, Opts))
114 return TypeSize::getFixed(Size);
115 return std::nullopt;
116 }
117
118 /// Returns true if we can prove that the object specified by V is smaller than
119 /// Size.
isObjectSmallerThan(const Value * V,TypeSize Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)120 static bool isObjectSmallerThan(const Value *V, TypeSize Size,
121 const DataLayout &DL,
122 const TargetLibraryInfo &TLI,
123 bool NullIsValidLoc) {
124 // Note that the meanings of the "object" are slightly different in the
125 // following contexts:
126 // c1: llvm::getObjectSize()
127 // c2: llvm.objectsize() intrinsic
128 // c3: isObjectSmallerThan()
129 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
130 // refers to the "entire object".
131 //
132 // Consider this example:
133 // char *p = (char*)malloc(100)
134 // char *q = p+80;
135 //
136 // In the context of c1 and c2, the "object" pointed by q refers to the
137 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138 //
139 // However, in the context of c3, the "object" refers to the chunk of memory
140 // being allocated. So, the "object" has 100 bytes, and q points to the middle
141 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
142 // parameter, before the llvm::getObjectSize() is called to get the size of
143 // entire object, we should:
144 // - either rewind the pointer q to the base-address of the object in
145 // question (in this case rewind to p), or
146 // - just give up. It is up to caller to make sure the pointer is pointing
147 // to the base address the object.
148 //
149 // We go for 2nd option for simplicity.
150 if (!isIdentifiedObject(V))
151 return false;
152
153 // This function needs to use the aligned object size because we allow
154 // reads a bit past the end given sufficient alignment.
155 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
156 /*RoundToAlign*/ true);
157
158 return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
159 }
160
161 /// Return the minimal extent from \p V to the end of the underlying object,
162 /// assuming the result is used in an aliasing query. E.g., we do use the query
163 /// location size and the fact that null pointers cannot alias here.
getMinimalExtentFrom(const Value & V,const LocationSize & LocSize,const DataLayout & DL,bool NullIsValidLoc)164 static TypeSize getMinimalExtentFrom(const Value &V,
165 const LocationSize &LocSize,
166 const DataLayout &DL,
167 bool NullIsValidLoc) {
168 // If we have dereferenceability information we know a lower bound for the
169 // extent as accesses for a lower offset would be valid. We need to exclude
170 // the "or null" part if null is a valid pointer. We can ignore frees, as an
171 // access after free would be undefined behavior.
172 bool CanBeNull, CanBeFreed;
173 uint64_t DerefBytes =
174 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
175 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
176 // If queried with a precise location size, we assume that location size to be
177 // accessed, thus valid.
178 if (LocSize.isPrecise())
179 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
180 return TypeSize::getFixed(DerefBytes);
181 }
182
183 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,TypeSize Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)184 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
185 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
186 std::optional<TypeSize> ObjectSize =
187 getObjectSize(V, DL, TLI, NullIsValidLoc);
188 return ObjectSize && *ObjectSize == Size;
189 }
190
191 //===----------------------------------------------------------------------===//
192 // CaptureInfo implementations
193 //===----------------------------------------------------------------------===//
194
195 CaptureInfo::~CaptureInfo() = default;
196
isNotCapturedBefore(const Value * Object,const Instruction * I,bool OrAt)197 bool SimpleCaptureInfo::isNotCapturedBefore(const Value *Object,
198 const Instruction *I, bool OrAt) {
199 return isNonEscapingLocalObject(Object, &IsCapturedCache);
200 }
201
isNotInCycle(const Instruction * I,const DominatorTree * DT,const LoopInfo * LI)202 static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
203 const LoopInfo *LI) {
204 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
205 SmallVector<BasicBlock *> Succs(successors(BB));
206 return Succs.empty() ||
207 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
208 }
209
isNotCapturedBefore(const Value * Object,const Instruction * I,bool OrAt)210 bool EarliestEscapeInfo::isNotCapturedBefore(const Value *Object,
211 const Instruction *I, bool OrAt) {
212 if (!isIdentifiedFunctionLocal(Object))
213 return false;
214
215 auto Iter = EarliestEscapes.insert({Object, nullptr});
216 if (Iter.second) {
217 Instruction *EarliestCapture = FindEarliestCapture(
218 Object, *const_cast<Function *>(DT.getRoot()->getParent()),
219 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
220 if (EarliestCapture) {
221 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
222 Ins.first->second.push_back(Object);
223 }
224 Iter.first->second = EarliestCapture;
225 }
226
227 // No capturing instruction.
228 if (!Iter.first->second)
229 return true;
230
231 // No context instruction means any use is capturing.
232 if (!I)
233 return false;
234
235 if (I == Iter.first->second) {
236 if (OrAt)
237 return false;
238 return isNotInCycle(I, &DT, LI);
239 }
240
241 return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
242 }
243
removeInstruction(Instruction * I)244 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
245 auto Iter = Inst2Obj.find(I);
246 if (Iter != Inst2Obj.end()) {
247 for (const Value *Obj : Iter->second)
248 EarliestEscapes.erase(Obj);
249 Inst2Obj.erase(I);
250 }
251 }
252
253 //===----------------------------------------------------------------------===//
254 // GetElementPtr Instruction Decomposition and Analysis
255 //===----------------------------------------------------------------------===//
256
257 namespace {
258 /// Represents zext(sext(trunc(V))).
259 struct CastedValue {
260 const Value *V;
261 unsigned ZExtBits = 0;
262 unsigned SExtBits = 0;
263 unsigned TruncBits = 0;
264
CastedValue__anon4c76bdfb0111::CastedValue265 explicit CastedValue(const Value *V) : V(V) {}
CastedValue__anon4c76bdfb0111::CastedValue266 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
267 unsigned TruncBits)
268 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
269
getBitWidth__anon4c76bdfb0111::CastedValue270 unsigned getBitWidth() const {
271 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
272 SExtBits;
273 }
274
withValue__anon4c76bdfb0111::CastedValue275 CastedValue withValue(const Value *NewV) const {
276 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
277 }
278
279 /// Replace V with zext(NewV)
withZExtOfValue__anon4c76bdfb0111::CastedValue280 CastedValue withZExtOfValue(const Value *NewV) const {
281 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
282 NewV->getType()->getPrimitiveSizeInBits();
283 if (ExtendBy <= TruncBits)
284 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
285
286 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
287 ExtendBy -= TruncBits;
288 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
289 }
290
291 /// Replace V with sext(NewV)
withSExtOfValue__anon4c76bdfb0111::CastedValue292 CastedValue withSExtOfValue(const Value *NewV) const {
293 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
294 NewV->getType()->getPrimitiveSizeInBits();
295 if (ExtendBy <= TruncBits)
296 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
297
298 // zext(sext(sext(NewV)))
299 ExtendBy -= TruncBits;
300 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
301 }
302
evaluateWith__anon4c76bdfb0111::CastedValue303 APInt evaluateWith(APInt N) const {
304 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
305 "Incompatible bit width");
306 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
307 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
308 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
309 return N;
310 }
311
evaluateWith__anon4c76bdfb0111::CastedValue312 ConstantRange evaluateWith(ConstantRange N) const {
313 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
314 "Incompatible bit width");
315 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
316 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
317 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
318 return N;
319 }
320
canDistributeOver__anon4c76bdfb0111::CastedValue321 bool canDistributeOver(bool NUW, bool NSW) const {
322 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
323 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
324 // trunc(x op y) == trunc(x) op trunc(y)
325 return (!ZExtBits || NUW) && (!SExtBits || NSW);
326 }
327
hasSameCastsAs__anon4c76bdfb0111::CastedValue328 bool hasSameCastsAs(const CastedValue &Other) const {
329 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
330 TruncBits == Other.TruncBits;
331 }
332 };
333
334 /// Represents zext(sext(trunc(V))) * Scale + Offset.
335 struct LinearExpression {
336 CastedValue Val;
337 APInt Scale;
338 APInt Offset;
339
340 /// True if all operations in this expression are NSW.
341 bool IsNSW;
342
LinearExpression__anon4c76bdfb0111::LinearExpression343 LinearExpression(const CastedValue &Val, const APInt &Scale,
344 const APInt &Offset, bool IsNSW)
345 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
346
LinearExpression__anon4c76bdfb0111::LinearExpression347 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
348 unsigned BitWidth = Val.getBitWidth();
349 Scale = APInt(BitWidth, 1);
350 Offset = APInt(BitWidth, 0);
351 }
352
mul__anon4c76bdfb0111::LinearExpression353 LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
354 // The check for zero offset is necessary, because generally
355 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
356 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
357 return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
358 }
359 };
360 }
361
362 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
363 /// B are constant integers.
GetLinearExpression(const CastedValue & Val,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT)364 static LinearExpression GetLinearExpression(
365 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
366 AssumptionCache *AC, DominatorTree *DT) {
367 // Limit our recursion depth.
368 if (Depth == 6)
369 return Val;
370
371 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
372 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
373 Val.evaluateWith(Const->getValue()), true);
374
375 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
376 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
377 APInt RHS = Val.evaluateWith(RHSC->getValue());
378 // The only non-OBO case we deal with is or, and only limited to the
379 // case where it is both nuw and nsw.
380 bool NUW = true, NSW = true;
381 if (isa<OverflowingBinaryOperator>(BOp)) {
382 NUW &= BOp->hasNoUnsignedWrap();
383 NSW &= BOp->hasNoSignedWrap();
384 }
385 if (!Val.canDistributeOver(NUW, NSW))
386 return Val;
387
388 // While we can distribute over trunc, we cannot preserve nowrap flags
389 // in that case.
390 if (Val.TruncBits)
391 NUW = NSW = false;
392
393 LinearExpression E(Val);
394 switch (BOp->getOpcode()) {
395 default:
396 // We don't understand this instruction, so we can't decompose it any
397 // further.
398 return Val;
399 case Instruction::Or:
400 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
401 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
402 return Val;
403
404 [[fallthrough]];
405 case Instruction::Add: {
406 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
407 Depth + 1, AC, DT);
408 E.Offset += RHS;
409 E.IsNSW &= NSW;
410 break;
411 }
412 case Instruction::Sub: {
413 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
414 Depth + 1, AC, DT);
415 E.Offset -= RHS;
416 E.IsNSW &= NSW;
417 break;
418 }
419 case Instruction::Mul:
420 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
421 Depth + 1, AC, DT)
422 .mul(RHS, NSW);
423 break;
424 case Instruction::Shl:
425 // We're trying to linearize an expression of the kind:
426 // shl i8 -128, 36
427 // where the shift count exceeds the bitwidth of the type.
428 // We can't decompose this further (the expression would return
429 // a poison value).
430 if (RHS.getLimitedValue() > Val.getBitWidth())
431 return Val;
432
433 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
434 Depth + 1, AC, DT);
435 E.Offset <<= RHS.getLimitedValue();
436 E.Scale <<= RHS.getLimitedValue();
437 E.IsNSW &= NSW;
438 break;
439 }
440 return E;
441 }
442 }
443
444 if (isa<ZExtInst>(Val.V))
445 return GetLinearExpression(
446 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
447 DL, Depth + 1, AC, DT);
448
449 if (isa<SExtInst>(Val.V))
450 return GetLinearExpression(
451 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
452 DL, Depth + 1, AC, DT);
453
454 return Val;
455 }
456
457 /// To ensure a pointer offset fits in an integer of size IndexSize
458 /// (in bits) when that size is smaller than the maximum index size. This is
459 /// an issue, for example, in particular for 32b pointers with negative indices
460 /// that rely on two's complement wrap-arounds for precise alias information
461 /// where the maximum index size is 64b.
adjustToIndexSize(APInt & Offset,unsigned IndexSize)462 static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
463 assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
464 unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
465 if (ShiftBits != 0) {
466 Offset <<= ShiftBits;
467 Offset.ashrInPlace(ShiftBits);
468 }
469 }
470
471 namespace {
472 // A linear transformation of a Value; this class represents
473 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
474 struct VariableGEPIndex {
475 CastedValue Val;
476 APInt Scale;
477
478 // Context instruction to use when querying information about this index.
479 const Instruction *CxtI;
480
481 /// True if all operations in this expression are NSW.
482 bool IsNSW;
483
484 /// True if the index should be subtracted rather than added. We don't simply
485 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
486 /// non-wrapping, while X + INT_MIN*(-1) wraps.
487 bool IsNegated;
488
hasNegatedScaleOf__anon4c76bdfb0211::VariableGEPIndex489 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
490 if (IsNegated == Other.IsNegated)
491 return Scale == -Other.Scale;
492 return Scale == Other.Scale;
493 }
494
dump__anon4c76bdfb0211::VariableGEPIndex495 void dump() const {
496 print(dbgs());
497 dbgs() << "\n";
498 }
print__anon4c76bdfb0211::VariableGEPIndex499 void print(raw_ostream &OS) const {
500 OS << "(V=" << Val.V->getName()
501 << ", zextbits=" << Val.ZExtBits
502 << ", sextbits=" << Val.SExtBits
503 << ", truncbits=" << Val.TruncBits
504 << ", scale=" << Scale
505 << ", nsw=" << IsNSW
506 << ", negated=" << IsNegated << ")";
507 }
508 };
509 }
510
511 // Represents the internal structure of a GEP, decomposed into a base pointer,
512 // constant offsets, and variable scaled indices.
513 struct BasicAAResult::DecomposedGEP {
514 // Base pointer of the GEP
515 const Value *Base;
516 // Total constant offset from base.
517 APInt Offset;
518 // Scaled variable (non-constant) indices.
519 SmallVector<VariableGEPIndex, 4> VarIndices;
520 // Are all operations inbounds GEPs or non-indexing operations?
521 // (std::nullopt iff expression doesn't involve any geps)
522 std::optional<bool> InBounds;
523
dumpBasicAAResult::DecomposedGEP524 void dump() const {
525 print(dbgs());
526 dbgs() << "\n";
527 }
printBasicAAResult::DecomposedGEP528 void print(raw_ostream &OS) const {
529 OS << "(DecomposedGEP Base=" << Base->getName()
530 << ", Offset=" << Offset
531 << ", VarIndices=[";
532 for (size_t i = 0; i < VarIndices.size(); i++) {
533 if (i != 0)
534 OS << ", ";
535 VarIndices[i].print(OS);
536 }
537 OS << "])";
538 }
539 };
540
541
542 /// If V is a symbolic pointer expression, decompose it into a base pointer
543 /// with a constant offset and a number of scaled symbolic offsets.
544 ///
545 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
546 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
547 /// specified amount, but which may have other unrepresented high bits. As
548 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
549 BasicAAResult::DecomposedGEP
DecomposeGEPExpression(const Value * V,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)550 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
551 AssumptionCache *AC, DominatorTree *DT) {
552 // Limit recursion depth to limit compile time in crazy cases.
553 unsigned MaxLookup = MaxLookupSearchDepth;
554 SearchTimes++;
555 const Instruction *CxtI = dyn_cast<Instruction>(V);
556
557 unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
558 DecomposedGEP Decomposed;
559 Decomposed.Offset = APInt(MaxIndexSize, 0);
560 do {
561 // See if this is a bitcast or GEP.
562 const Operator *Op = dyn_cast<Operator>(V);
563 if (!Op) {
564 // The only non-operator case we can handle are GlobalAliases.
565 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
566 if (!GA->isInterposable()) {
567 V = GA->getAliasee();
568 continue;
569 }
570 }
571 Decomposed.Base = V;
572 return Decomposed;
573 }
574
575 if (Op->getOpcode() == Instruction::BitCast ||
576 Op->getOpcode() == Instruction::AddrSpaceCast) {
577 V = Op->getOperand(0);
578 continue;
579 }
580
581 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
582 if (!GEPOp) {
583 if (const auto *PHI = dyn_cast<PHINode>(V)) {
584 // Look through single-arg phi nodes created by LCSSA.
585 if (PHI->getNumIncomingValues() == 1) {
586 V = PHI->getIncomingValue(0);
587 continue;
588 }
589 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
590 // CaptureTracking can know about special capturing properties of some
591 // intrinsics like launder.invariant.group, that can't be expressed with
592 // the attributes, but have properties like returning aliasing pointer.
593 // Because some analysis may assume that nocaptured pointer is not
594 // returned from some special intrinsic (because function would have to
595 // be marked with returns attribute), it is crucial to use this function
596 // because it should be in sync with CaptureTracking. Not using it may
597 // cause weird miscompilations where 2 aliasing pointers are assumed to
598 // noalias.
599 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
600 V = RP;
601 continue;
602 }
603 }
604
605 Decomposed.Base = V;
606 return Decomposed;
607 }
608
609 // Track whether we've seen at least one in bounds gep, and if so, whether
610 // all geps parsed were in bounds.
611 if (Decomposed.InBounds == std::nullopt)
612 Decomposed.InBounds = GEPOp->isInBounds();
613 else if (!GEPOp->isInBounds())
614 Decomposed.InBounds = false;
615
616 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
617
618 unsigned AS = GEPOp->getPointerAddressSpace();
619 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
620 gep_type_iterator GTI = gep_type_begin(GEPOp);
621 unsigned IndexSize = DL.getIndexSizeInBits(AS);
622 // Assume all GEP operands are constants until proven otherwise.
623 bool GepHasConstantOffset = true;
624 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
625 I != E; ++I, ++GTI) {
626 const Value *Index = *I;
627 // Compute the (potentially symbolic) offset in bytes for this index.
628 if (StructType *STy = GTI.getStructTypeOrNull()) {
629 // For a struct, add the member offset.
630 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
631 if (FieldNo == 0)
632 continue;
633
634 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
635 continue;
636 }
637
638 // For an array/pointer, add the element offset, explicitly scaled.
639 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
640 if (CIdx->isZero())
641 continue;
642
643 // Don't attempt to analyze GEPs if the scalable index is not zero.
644 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
645 if (AllocTypeSize.isScalable()) {
646 Decomposed.Base = V;
647 return Decomposed;
648 }
649
650 Decomposed.Offset += AllocTypeSize.getFixedValue() *
651 CIdx->getValue().sextOrTrunc(MaxIndexSize);
652 continue;
653 }
654
655 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
656 if (AllocTypeSize.isScalable()) {
657 Decomposed.Base = V;
658 return Decomposed;
659 }
660
661 GepHasConstantOffset = false;
662
663 // If the integer type is smaller than the index size, it is implicitly
664 // sign extended or truncated to index size.
665 unsigned Width = Index->getType()->getIntegerBitWidth();
666 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
667 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
668 LinearExpression LE = GetLinearExpression(
669 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
670
671 // Scale by the type size.
672 unsigned TypeSize = AllocTypeSize.getFixedValue();
673 LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
674 Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
675 APInt Scale = LE.Scale.sext(MaxIndexSize);
676
677 // If we already had an occurrence of this index variable, merge this
678 // scale into it. For example, we want to handle:
679 // A[x][x] -> x*16 + x*4 -> x*20
680 // This also ensures that 'x' only appears in the index list once.
681 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
682 if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
683 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
684 Scale += Decomposed.VarIndices[i].Scale;
685 LE.IsNSW = false; // We cannot guarantee nsw for the merge.
686 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
687 break;
688 }
689 }
690
691 // Make sure that we have a scale that makes sense for this target's
692 // index size.
693 adjustToIndexSize(Scale, IndexSize);
694
695 if (!!Scale) {
696 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
697 /* IsNegated */ false};
698 Decomposed.VarIndices.push_back(Entry);
699 }
700 }
701
702 // Take care of wrap-arounds
703 if (GepHasConstantOffset)
704 adjustToIndexSize(Decomposed.Offset, IndexSize);
705
706 // Analyze the base pointer next.
707 V = GEPOp->getOperand(0);
708 } while (--MaxLookup);
709
710 // If the chain of expressions is too deep, just return early.
711 Decomposed.Base = V;
712 SearchLimitReached++;
713 return Decomposed;
714 }
715
getModRefInfoMask(const MemoryLocation & Loc,AAQueryInfo & AAQI,bool IgnoreLocals)716 ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
717 AAQueryInfo &AAQI,
718 bool IgnoreLocals) {
719 assert(Visited.empty() && "Visited must be cleared after use!");
720 auto _ = make_scope_exit([&] { Visited.clear(); });
721
722 unsigned MaxLookup = 8;
723 SmallVector<const Value *, 16> Worklist;
724 Worklist.push_back(Loc.Ptr);
725 ModRefInfo Result = ModRefInfo::NoModRef;
726
727 do {
728 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
729 if (!Visited.insert(V).second)
730 continue;
731
732 // Ignore allocas if we were instructed to do so.
733 if (IgnoreLocals && isa<AllocaInst>(V))
734 continue;
735
736 // If the location points to memory that is known to be invariant for
737 // the life of the underlying SSA value, then we can exclude Mod from
738 // the set of valid memory effects.
739 //
740 // An argument that is marked readonly and noalias is known to be
741 // invariant while that function is executing.
742 if (const Argument *Arg = dyn_cast<Argument>(V)) {
743 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
744 Result |= ModRefInfo::Ref;
745 continue;
746 }
747 }
748
749 // A global constant can't be mutated.
750 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
751 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
752 // global to be marked constant in some modules and non-constant in
753 // others. GV may even be a declaration, not a definition.
754 if (!GV->isConstant())
755 return ModRefInfo::ModRef;
756 continue;
757 }
758
759 // If both select values point to local memory, then so does the select.
760 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
761 Worklist.push_back(SI->getTrueValue());
762 Worklist.push_back(SI->getFalseValue());
763 continue;
764 }
765
766 // If all values incoming to a phi node point to local memory, then so does
767 // the phi.
768 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
769 // Don't bother inspecting phi nodes with many operands.
770 if (PN->getNumIncomingValues() > MaxLookup)
771 return ModRefInfo::ModRef;
772 append_range(Worklist, PN->incoming_values());
773 continue;
774 }
775
776 // Otherwise be conservative.
777 return ModRefInfo::ModRef;
778 } while (!Worklist.empty() && --MaxLookup);
779
780 // If we hit the maximum number of instructions to examine, be conservative.
781 if (!Worklist.empty())
782 return ModRefInfo::ModRef;
783
784 return Result;
785 }
786
isIntrinsicCall(const CallBase * Call,Intrinsic::ID IID)787 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
788 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
789 return II && II->getIntrinsicID() == IID;
790 }
791
792 /// Returns the behavior when calling the given call site.
getMemoryEffects(const CallBase * Call,AAQueryInfo & AAQI)793 MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
794 AAQueryInfo &AAQI) {
795 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
796
797 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
798 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
799 // Operand bundles on the call may also read or write memory, in addition
800 // to the behavior of the called function.
801 if (Call->hasReadingOperandBundles())
802 FuncME |= MemoryEffects::readOnly();
803 if (Call->hasClobberingOperandBundles())
804 FuncME |= MemoryEffects::writeOnly();
805 Min &= FuncME;
806 }
807
808 return Min;
809 }
810
811 /// Returns the behavior when calling the given function. For use when the call
812 /// site is not known.
getMemoryEffects(const Function * F)813 MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
814 switch (F->getIntrinsicID()) {
815 case Intrinsic::experimental_guard:
816 case Intrinsic::experimental_deoptimize:
817 // These intrinsics can read arbitrary memory, and additionally modref
818 // inaccessible memory to model control dependence.
819 return MemoryEffects::readOnly() |
820 MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
821 }
822
823 return F->getMemoryEffects();
824 }
825
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)826 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
827 unsigned ArgIdx) {
828 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
829 return ModRefInfo::Mod;
830
831 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
832 return ModRefInfo::Ref;
833
834 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
835 return ModRefInfo::NoModRef;
836
837 return ModRefInfo::ModRef;
838 }
839
840 #ifndef NDEBUG
getParent(const Value * V)841 static const Function *getParent(const Value *V) {
842 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
843 if (!inst->getParent())
844 return nullptr;
845 return inst->getParent()->getParent();
846 }
847
848 if (const Argument *arg = dyn_cast<Argument>(V))
849 return arg->getParent();
850
851 return nullptr;
852 }
853
notDifferentParent(const Value * O1,const Value * O2)854 static bool notDifferentParent(const Value *O1, const Value *O2) {
855
856 const Function *F1 = getParent(O1);
857 const Function *F2 = getParent(O2);
858
859 return !F1 || !F2 || F1 == F2;
860 }
861 #endif
862
alias(const MemoryLocation & LocA,const MemoryLocation & LocB,AAQueryInfo & AAQI,const Instruction * CtxI)863 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
864 const MemoryLocation &LocB, AAQueryInfo &AAQI,
865 const Instruction *CtxI) {
866 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
867 "BasicAliasAnalysis doesn't support interprocedural queries.");
868 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
869 }
870
871 /// Checks to see if the specified callsite can clobber the specified memory
872 /// object.
873 ///
874 /// Since we only look at local properties of this function, we really can't
875 /// say much about this query. We do, however, use simple "address taken"
876 /// analysis on local objects.
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc,AAQueryInfo & AAQI)877 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
878 const MemoryLocation &Loc,
879 AAQueryInfo &AAQI) {
880 assert(notDifferentParent(Call, Loc.Ptr) &&
881 "AliasAnalysis query involving multiple functions!");
882
883 const Value *Object = getUnderlyingObject(Loc.Ptr);
884
885 // Calls marked 'tail' cannot read or write allocas from the current frame
886 // because the current frame might be destroyed by the time they run. However,
887 // a tail call may use an alloca with byval. Calling with byval copies the
888 // contents of the alloca into argument registers or stack slots, so there is
889 // no lifetime issue.
890 if (isa<AllocaInst>(Object))
891 if (const CallInst *CI = dyn_cast<CallInst>(Call))
892 if (CI->isTailCall() &&
893 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
894 return ModRefInfo::NoModRef;
895
896 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
897 // modify them even though the alloca is not escaped.
898 if (auto *AI = dyn_cast<AllocaInst>(Object))
899 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
900 return ModRefInfo::Mod;
901
902 // A call can access a locally allocated object either because it is passed as
903 // an argument to the call, or because it has escaped prior to the call.
904 //
905 // Make sure the object has not escaped here, and then check that none of the
906 // call arguments alias the object below.
907 if (!isa<Constant>(Object) && Call != Object &&
908 AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
909
910 // Optimistically assume that call doesn't touch Object and check this
911 // assumption in the following loop.
912 ModRefInfo Result = ModRefInfo::NoModRef;
913
914 unsigned OperandNo = 0;
915 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
916 CI != CE; ++CI, ++OperandNo) {
917 if (!(*CI)->getType()->isPointerTy())
918 continue;
919
920 // Call doesn't access memory through this operand, so we don't care
921 // if it aliases with Object.
922 if (Call->doesNotAccessMemory(OperandNo))
923 continue;
924
925 // If this is a no-capture pointer argument, see if we can tell that it
926 // is impossible to alias the pointer we're checking.
927 AliasResult AR =
928 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
929 MemoryLocation::getBeforeOrAfter(Object), AAQI);
930 // Operand doesn't alias 'Object', continue looking for other aliases
931 if (AR == AliasResult::NoAlias)
932 continue;
933 // Operand aliases 'Object', but call doesn't modify it. Strengthen
934 // initial assumption and keep looking in case if there are more aliases.
935 if (Call->onlyReadsMemory(OperandNo)) {
936 Result |= ModRefInfo::Ref;
937 continue;
938 }
939 // Operand aliases 'Object' but call only writes into it.
940 if (Call->onlyWritesMemory(OperandNo)) {
941 Result |= ModRefInfo::Mod;
942 continue;
943 }
944 // This operand aliases 'Object' and call reads and writes into it.
945 // Setting ModRef will not yield an early return below, MustAlias is not
946 // used further.
947 Result = ModRefInfo::ModRef;
948 break;
949 }
950
951 // Early return if we improved mod ref information
952 if (!isModAndRefSet(Result))
953 return Result;
954 }
955
956 // If the call is malloc/calloc like, we can assume that it doesn't
957 // modify any IR visible value. This is only valid because we assume these
958 // routines do not read values visible in the IR. TODO: Consider special
959 // casing realloc and strdup routines which access only their arguments as
960 // well. Or alternatively, replace all of this with inaccessiblememonly once
961 // that's implemented fully.
962 if (isMallocOrCallocLikeFn(Call, &TLI)) {
963 // Be conservative if the accessed pointer may alias the allocation -
964 // fallback to the generic handling below.
965 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
966 AliasResult::NoAlias)
967 return ModRefInfo::NoModRef;
968 }
969
970 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
971 // writing so that proper control dependencies are maintained but they never
972 // mod any particular memory location visible to the IR.
973 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
974 // intrinsic is now modeled as reading memory. This prevents hoisting the
975 // invariant.start intrinsic over stores. Consider:
976 // *ptr = 40;
977 // *ptr = 50;
978 // invariant_start(ptr)
979 // int val = *ptr;
980 // print(val);
981 //
982 // This cannot be transformed to:
983 //
984 // *ptr = 40;
985 // invariant_start(ptr)
986 // *ptr = 50;
987 // int val = *ptr;
988 // print(val);
989 //
990 // The transformation will cause the second store to be ignored (based on
991 // rules of invariant.start) and print 40, while the first program always
992 // prints 50.
993 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
994 return ModRefInfo::Ref;
995
996 // Be conservative.
997 return ModRefInfo::ModRef;
998 }
999
getModRefInfo(const CallBase * Call1,const CallBase * Call2,AAQueryInfo & AAQI)1000 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1001 const CallBase *Call2,
1002 AAQueryInfo &AAQI) {
1003 // Guard intrinsics are marked as arbitrarily writing so that proper control
1004 // dependencies are maintained but they never mods any particular memory
1005 // location.
1006 //
1007 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1008 // heap state at the point the guard is issued needs to be consistent in case
1009 // the guard invokes the "deopt" continuation.
1010
1011 // NB! This function is *not* commutative, so we special case two
1012 // possibilities for guard intrinsics.
1013
1014 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1015 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1016 ? ModRefInfo::Ref
1017 : ModRefInfo::NoModRef;
1018
1019 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1020 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1021 ? ModRefInfo::Mod
1022 : ModRefInfo::NoModRef;
1023
1024 // Be conservative.
1025 return ModRefInfo::ModRef;
1026 }
1027
1028 /// Return true if we know V to the base address of the corresponding memory
1029 /// object. This implies that any address less than V must be out of bounds
1030 /// for the underlying object. Note that just being isIdentifiedObject() is
1031 /// not enough - For example, a negative offset from a noalias argument or call
1032 /// can be inbounds w.r.t the actual underlying object.
isBaseOfObject(const Value * V)1033 static bool isBaseOfObject(const Value *V) {
1034 // TODO: We can handle other cases here
1035 // 1) For GC languages, arguments to functions are often required to be
1036 // base pointers.
1037 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1038 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1039 }
1040
1041 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1042 /// another pointer.
1043 ///
1044 /// We know that V1 is a GEP, but we don't know anything about V2.
1045 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1046 /// V2.
aliasGEP(const GEPOperator * GEP1,LocationSize V1Size,const Value * V2,LocationSize V2Size,const Value * UnderlyingV1,const Value * UnderlyingV2,AAQueryInfo & AAQI)1047 AliasResult BasicAAResult::aliasGEP(
1048 const GEPOperator *GEP1, LocationSize V1Size,
1049 const Value *V2, LocationSize V2Size,
1050 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1051 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1052 // TODO: This limitation exists for compile-time reasons. Relax it if we
1053 // can avoid exponential pathological cases.
1054 if (!isa<GEPOperator>(V2))
1055 return AliasResult::MayAlias;
1056
1057 // If both accesses have unknown size, we can only check whether the base
1058 // objects don't alias.
1059 AliasResult BaseAlias =
1060 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1061 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1062 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1063 : AliasResult::MayAlias;
1064 }
1065
1066 DominatorTree *DT = getDT(AAQI);
1067 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1068 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1069
1070 // Bail if we were not able to decompose anything.
1071 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1072 return AliasResult::MayAlias;
1073
1074 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1075 // symbolic difference.
1076 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1077
1078 // If an inbounds GEP would have to start from an out of bounds address
1079 // for the two to alias, then we can assume noalias.
1080 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1081 // size
1082 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1083 V2Size.hasValue() && !V2Size.isScalable() &&
1084 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1085 isBaseOfObject(DecompGEP2.Base))
1086 return AliasResult::NoAlias;
1087
1088 if (isa<GEPOperator>(V2)) {
1089 // Symmetric case to above.
1090 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1091 V1Size.hasValue() && !V1Size.isScalable() &&
1092 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1093 isBaseOfObject(DecompGEP1.Base))
1094 return AliasResult::NoAlias;
1095 }
1096
1097 // For GEPs with identical offsets, we can preserve the size and AAInfo
1098 // when performing the alias check on the underlying objects.
1099 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1100 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1101 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1102
1103 // Do the base pointers alias?
1104 AliasResult BaseAlias =
1105 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1106 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1107
1108 // If we get a No or May, then return it immediately, no amount of analysis
1109 // will improve this situation.
1110 if (BaseAlias != AliasResult::MustAlias) {
1111 assert(BaseAlias == AliasResult::NoAlias ||
1112 BaseAlias == AliasResult::MayAlias);
1113 return BaseAlias;
1114 }
1115
1116 // Bail on analysing scalable LocationSize
1117 if (V1Size.isScalable() || V2Size.isScalable())
1118 return AliasResult::MayAlias;
1119
1120 // If there is a constant difference between the pointers, but the difference
1121 // is less than the size of the associated memory object, then we know
1122 // that the objects are partially overlapping. If the difference is
1123 // greater, we know they do not overlap.
1124 if (DecompGEP1.VarIndices.empty()) {
1125 APInt &Off = DecompGEP1.Offset;
1126
1127 // Initialize for Off >= 0 (V2 <= GEP1) case.
1128 const Value *LeftPtr = V2;
1129 const Value *RightPtr = GEP1;
1130 LocationSize VLeftSize = V2Size;
1131 LocationSize VRightSize = V1Size;
1132 const bool Swapped = Off.isNegative();
1133
1134 if (Swapped) {
1135 // Swap if we have the situation where:
1136 // + +
1137 // | BaseOffset |
1138 // ---------------->|
1139 // |-->V1Size |-------> V2Size
1140 // GEP1 V2
1141 std::swap(LeftPtr, RightPtr);
1142 std::swap(VLeftSize, VRightSize);
1143 Off = -Off;
1144 }
1145
1146 if (!VLeftSize.hasValue())
1147 return AliasResult::MayAlias;
1148
1149 const uint64_t LSize = VLeftSize.getValue();
1150 if (Off.ult(LSize)) {
1151 // Conservatively drop processing if a phi was visited and/or offset is
1152 // too big.
1153 AliasResult AR = AliasResult::PartialAlias;
1154 if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1155 (Off + VRightSize.getValue()).ule(LSize)) {
1156 // Memory referenced by right pointer is nested. Save the offset in
1157 // cache. Note that originally offset estimated as GEP1-V2, but
1158 // AliasResult contains the shift that represents GEP1+Offset=V2.
1159 AR.setOffset(-Off.getSExtValue());
1160 AR.swap(Swapped);
1161 }
1162 return AR;
1163 }
1164 return AliasResult::NoAlias;
1165 }
1166
1167 // We need to know both acess sizes for all the following heuristics.
1168 if (!V1Size.hasValue() || !V2Size.hasValue())
1169 return AliasResult::MayAlias;
1170
1171 APInt GCD;
1172 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1173 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1174 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1175 const APInt &Scale = Index.Scale;
1176 APInt ScaleForGCD = Scale;
1177 if (!Index.IsNSW)
1178 ScaleForGCD =
1179 APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1180
1181 if (i == 0)
1182 GCD = ScaleForGCD.abs();
1183 else
1184 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1185
1186 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1187 true, &AC, Index.CxtI);
1188 KnownBits Known =
1189 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1190 CR = CR.intersectWith(
1191 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1192 ConstantRange::Signed);
1193 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1194
1195 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1196 "Bit widths are normalized to MaxIndexSize");
1197 if (Index.IsNSW)
1198 CR = CR.smul_sat(ConstantRange(Scale));
1199 else
1200 CR = CR.smul_fast(ConstantRange(Scale));
1201
1202 if (Index.IsNegated)
1203 OffsetRange = OffsetRange.sub(CR);
1204 else
1205 OffsetRange = OffsetRange.add(CR);
1206 }
1207
1208 // We now have accesses at two offsets from the same base:
1209 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1210 // 2. 0 with size V2Size
1211 // Using arithmetic modulo GCD, the accesses are at
1212 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1213 // into the range [V2Size..GCD), then we know they cannot overlap.
1214 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1215 if (ModOffset.isNegative())
1216 ModOffset += GCD; // We want mod, not rem.
1217 if (ModOffset.uge(V2Size.getValue()) &&
1218 (GCD - ModOffset).uge(V1Size.getValue()))
1219 return AliasResult::NoAlias;
1220
1221 // Compute ranges of potentially accessed bytes for both accesses. If the
1222 // interseciton is empty, there can be no overlap.
1223 unsigned BW = OffsetRange.getBitWidth();
1224 ConstantRange Range1 = OffsetRange.add(
1225 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1226 ConstantRange Range2 =
1227 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1228 if (Range1.intersectWith(Range2).isEmptySet())
1229 return AliasResult::NoAlias;
1230
1231 // Try to determine the range of values for VarIndex such that
1232 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1233 std::optional<APInt> MinAbsVarIndex;
1234 if (DecompGEP1.VarIndices.size() == 1) {
1235 // VarIndex = Scale*V.
1236 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1237 if (Var.Val.TruncBits == 0 &&
1238 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1239 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1240 // potentially wrapping math.
1241 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1242 if (Var.IsNSW)
1243 return true;
1244
1245 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1246 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1247 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1248 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1249 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1250 if (MaxScaleValueBW <= 0)
1251 return false;
1252 return Var.Scale.ule(
1253 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1254 };
1255 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1256 // presence of potentially wrapping math.
1257 if (MultiplyByScaleNoWrap(Var)) {
1258 // If V != 0 then abs(VarIndex) >= abs(Scale).
1259 MinAbsVarIndex = Var.Scale.abs();
1260 }
1261 }
1262 } else if (DecompGEP1.VarIndices.size() == 2) {
1263 // VarIndex = Scale*V0 + (-Scale)*V1.
1264 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1265 // Check that MayBeCrossIteration is false, to avoid reasoning about
1266 // inequality of values across loop iterations.
1267 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1268 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1269 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1270 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1271 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1272 DT))
1273 MinAbsVarIndex = Var0.Scale.abs();
1274 }
1275
1276 if (MinAbsVarIndex) {
1277 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1278 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1279 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1280 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1281 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1282 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1283 return AliasResult::NoAlias;
1284 }
1285
1286 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1287 return AliasResult::NoAlias;
1288
1289 // Statically, we can see that the base objects are the same, but the
1290 // pointers have dynamic offsets which we can't resolve. And none of our
1291 // little tricks above worked.
1292 return AliasResult::MayAlias;
1293 }
1294
MergeAliasResults(AliasResult A,AliasResult B)1295 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1296 // If the results agree, take it.
1297 if (A == B)
1298 return A;
1299 // A mix of PartialAlias and MustAlias is PartialAlias.
1300 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1301 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1302 return AliasResult::PartialAlias;
1303 // Otherwise, we don't know anything.
1304 return AliasResult::MayAlias;
1305 }
1306
1307 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1308 /// against another.
1309 AliasResult
aliasSelect(const SelectInst * SI,LocationSize SISize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1310 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1311 const Value *V2, LocationSize V2Size,
1312 AAQueryInfo &AAQI) {
1313 // If the values are Selects with the same condition, we can do a more precise
1314 // check: just check for aliases between the values on corresponding arms.
1315 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1316 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1317 AAQI)) {
1318 AliasResult Alias =
1319 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1320 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1321 if (Alias == AliasResult::MayAlias)
1322 return AliasResult::MayAlias;
1323 AliasResult ThisAlias =
1324 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1325 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1326 return MergeAliasResults(ThisAlias, Alias);
1327 }
1328
1329 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1330 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1331 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1332 MemoryLocation(V2, V2Size), AAQI);
1333 if (Alias == AliasResult::MayAlias)
1334 return AliasResult::MayAlias;
1335
1336 AliasResult ThisAlias =
1337 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1338 MemoryLocation(V2, V2Size), AAQI);
1339 return MergeAliasResults(ThisAlias, Alias);
1340 }
1341
1342 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1343 /// another.
aliasPHI(const PHINode * PN,LocationSize PNSize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1344 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1345 const Value *V2, LocationSize V2Size,
1346 AAQueryInfo &AAQI) {
1347 if (!PN->getNumIncomingValues())
1348 return AliasResult::NoAlias;
1349 // If the values are PHIs in the same block, we can do a more precise
1350 // as well as efficient check: just check for aliases between the values
1351 // on corresponding edges.
1352 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1353 if (PN2->getParent() == PN->getParent()) {
1354 std::optional<AliasResult> Alias;
1355 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1356 AliasResult ThisAlias = AAQI.AAR.alias(
1357 MemoryLocation(PN->getIncomingValue(i), PNSize),
1358 MemoryLocation(
1359 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1360 AAQI);
1361 if (Alias)
1362 *Alias = MergeAliasResults(*Alias, ThisAlias);
1363 else
1364 Alias = ThisAlias;
1365 if (*Alias == AliasResult::MayAlias)
1366 break;
1367 }
1368 return *Alias;
1369 }
1370
1371 SmallVector<Value *, 4> V1Srcs;
1372 // If a phi operand recurses back to the phi, we can still determine NoAlias
1373 // if we don't alias the underlying objects of the other phi operands, as we
1374 // know that the recursive phi needs to be based on them in some way.
1375 bool isRecursive = false;
1376 auto CheckForRecPhi = [&](Value *PV) {
1377 if (!EnableRecPhiAnalysis)
1378 return false;
1379 if (getUnderlyingObject(PV) == PN) {
1380 isRecursive = true;
1381 return true;
1382 }
1383 return false;
1384 };
1385
1386 SmallPtrSet<Value *, 4> UniqueSrc;
1387 Value *OnePhi = nullptr;
1388 for (Value *PV1 : PN->incoming_values()) {
1389 // Skip the phi itself being the incoming value.
1390 if (PV1 == PN)
1391 continue;
1392
1393 if (isa<PHINode>(PV1)) {
1394 if (OnePhi && OnePhi != PV1) {
1395 // To control potential compile time explosion, we choose to be
1396 // conserviate when we have more than one Phi input. It is important
1397 // that we handle the single phi case as that lets us handle LCSSA
1398 // phi nodes and (combined with the recursive phi handling) simple
1399 // pointer induction variable patterns.
1400 return AliasResult::MayAlias;
1401 }
1402 OnePhi = PV1;
1403 }
1404
1405 if (CheckForRecPhi(PV1))
1406 continue;
1407
1408 if (UniqueSrc.insert(PV1).second)
1409 V1Srcs.push_back(PV1);
1410 }
1411
1412 if (OnePhi && UniqueSrc.size() > 1)
1413 // Out of an abundance of caution, allow only the trivial lcssa and
1414 // recursive phi cases.
1415 return AliasResult::MayAlias;
1416
1417 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1418 // value. This should only be possible in blocks unreachable from the entry
1419 // block, but return MayAlias just in case.
1420 if (V1Srcs.empty())
1421 return AliasResult::MayAlias;
1422
1423 // If this PHI node is recursive, indicate that the pointer may be moved
1424 // across iterations. We can only prove NoAlias if different underlying
1425 // objects are involved.
1426 if (isRecursive)
1427 PNSize = LocationSize::beforeOrAfterPointer();
1428
1429 // In the recursive alias queries below, we may compare values from two
1430 // different loop iterations.
1431 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1432
1433 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1434 MemoryLocation(V2, V2Size), AAQI);
1435
1436 // Early exit if the check of the first PHI source against V2 is MayAlias.
1437 // Other results are not possible.
1438 if (Alias == AliasResult::MayAlias)
1439 return AliasResult::MayAlias;
1440 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1441 // remain valid to all elements and needs to conservatively return MayAlias.
1442 if (isRecursive && Alias != AliasResult::NoAlias)
1443 return AliasResult::MayAlias;
1444
1445 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1446 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1447 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1448 Value *V = V1Srcs[i];
1449
1450 AliasResult ThisAlias = AAQI.AAR.alias(
1451 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1452 Alias = MergeAliasResults(ThisAlias, Alias);
1453 if (Alias == AliasResult::MayAlias)
1454 break;
1455 }
1456
1457 return Alias;
1458 }
1459
1460 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1461 /// array references.
aliasCheck(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI,const Instruction * CtxI)1462 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1463 const Value *V2, LocationSize V2Size,
1464 AAQueryInfo &AAQI,
1465 const Instruction *CtxI) {
1466 // If either of the memory references is empty, it doesn't matter what the
1467 // pointer values are.
1468 if (V1Size.isZero() || V2Size.isZero())
1469 return AliasResult::NoAlias;
1470
1471 // Strip off any casts if they exist.
1472 V1 = V1->stripPointerCastsForAliasAnalysis();
1473 V2 = V2->stripPointerCastsForAliasAnalysis();
1474
1475 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1476 // value for undef that aliases nothing in the program.
1477 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1478 return AliasResult::NoAlias;
1479
1480 // Are we checking for alias of the same value?
1481 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1482 // different iterations. We must therefore make sure that this is not the
1483 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1484 // happen by looking at the visited phi nodes and making sure they cannot
1485 // reach the value.
1486 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1487 return AliasResult::MustAlias;
1488
1489 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1490 return AliasResult::NoAlias; // Scalars cannot alias each other
1491
1492 // Figure out what objects these things are pointing to if we can.
1493 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1494 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1495
1496 // Null values in the default address space don't point to any object, so they
1497 // don't alias any other pointer.
1498 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1499 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1500 return AliasResult::NoAlias;
1501 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1502 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1503 return AliasResult::NoAlias;
1504
1505 if (O1 != O2) {
1506 // If V1/V2 point to two different objects, we know that we have no alias.
1507 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1508 return AliasResult::NoAlias;
1509
1510 // Function arguments can't alias with things that are known to be
1511 // unambigously identified at the function level.
1512 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1513 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1514 return AliasResult::NoAlias;
1515
1516 // If one pointer is the result of a call/invoke or load and the other is a
1517 // non-escaping local object within the same function, then we know the
1518 // object couldn't escape to a point where the call could return it.
1519 //
1520 // Note that if the pointers are in different functions, there are a
1521 // variety of complications. A call with a nocapture argument may still
1522 // temporary store the nocapture argument's value in a temporary memory
1523 // location if that memory location doesn't escape. Or it may pass a
1524 // nocapture value to other functions as long as they don't capture it.
1525 if (isEscapeSource(O1) && AAQI.CI->isNotCapturedBefore(
1526 O2, dyn_cast<Instruction>(O1), /*OrAt*/ true))
1527 return AliasResult::NoAlias;
1528 if (isEscapeSource(O2) && AAQI.CI->isNotCapturedBefore(
1529 O1, dyn_cast<Instruction>(O2), /*OrAt*/ true))
1530 return AliasResult::NoAlias;
1531 }
1532
1533 // If the size of one access is larger than the entire object on the other
1534 // side, then we know such behavior is undefined and can assume no alias.
1535 bool NullIsValidLocation = NullPointerIsDefined(&F);
1536 if ((isObjectSmallerThan(
1537 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1538 TLI, NullIsValidLocation)) ||
1539 (isObjectSmallerThan(
1540 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1541 TLI, NullIsValidLocation)))
1542 return AliasResult::NoAlias;
1543
1544 if (EnableSeparateStorageAnalysis) {
1545 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1546 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1547 continue;
1548
1549 AssumeInst *Assume = cast<AssumeInst>(Elem);
1550 OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1551 if (OBU.getTagName() == "separate_storage") {
1552 assert(OBU.Inputs.size() == 2);
1553 const Value *Hint1 = OBU.Inputs[0].get();
1554 const Value *Hint2 = OBU.Inputs[1].get();
1555 // This is often a no-op; instcombine rewrites this for us. No-op
1556 // getUnderlyingObject calls are fast, though.
1557 const Value *HintO1 = getUnderlyingObject(Hint1);
1558 const Value *HintO2 = getUnderlyingObject(Hint2);
1559
1560 DominatorTree *DT = getDT(AAQI);
1561 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1562 if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1563 return isValidAssumeForContext(Assume, PtrI, DT,
1564 /* AllowEphemerals */ true);
1565 }
1566 if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1567 const Instruction *FirstI =
1568 &*PtrA->getParent()->getEntryBlock().begin();
1569 return isValidAssumeForContext(Assume, FirstI, DT,
1570 /* AllowEphemerals */ true);
1571 }
1572 return false;
1573 };
1574
1575 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1576 // Note that we go back to V1 and V2 for the
1577 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1578 // so strictly more assumptions are valid for them.
1579 if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1580 /* AllowEphemerals */ true)) ||
1581 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1582 return AliasResult::NoAlias;
1583 }
1584 }
1585 }
1586 }
1587 }
1588
1589 // If one the accesses may be before the accessed pointer, canonicalize this
1590 // by using unknown after-pointer sizes for both accesses. This is
1591 // equivalent, because regardless of which pointer is lower, one of them
1592 // will always came after the other, as long as the underlying objects aren't
1593 // disjoint. We do this so that the rest of BasicAA does not have to deal
1594 // with accesses before the base pointer, and to improve cache utilization by
1595 // merging equivalent states.
1596 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1597 V1Size = LocationSize::afterPointer();
1598 V2Size = LocationSize::afterPointer();
1599 }
1600
1601 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1602 // for recursive queries. For this reason, this limit is chosen to be large
1603 // enough to be very rarely hit, while still being small enough to avoid
1604 // stack overflows.
1605 if (AAQI.Depth >= 512)
1606 return AliasResult::MayAlias;
1607
1608 // Check the cache before climbing up use-def chains. This also terminates
1609 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1610 // cache key, because some cases where MayBeCrossIteration==false returns
1611 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1612 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1613 {V2, V2Size, AAQI.MayBeCrossIteration});
1614 const bool Swapped = V1 > V2;
1615 if (Swapped)
1616 std::swap(Locs.first, Locs.second);
1617 const auto &Pair = AAQI.AliasCache.try_emplace(
1618 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1619 if (!Pair.second) {
1620 auto &Entry = Pair.first->second;
1621 if (!Entry.isDefinitive()) {
1622 // Remember that we used an assumption.
1623 ++Entry.NumAssumptionUses;
1624 ++AAQI.NumAssumptionUses;
1625 }
1626 // Cache contains sorted {V1,V2} pairs but we should return original order.
1627 auto Result = Entry.Result;
1628 Result.swap(Swapped);
1629 return Result;
1630 }
1631
1632 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1633 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1634 AliasResult Result =
1635 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1636
1637 auto It = AAQI.AliasCache.find(Locs);
1638 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1639 auto &Entry = It->second;
1640
1641 // Check whether a NoAlias assumption has been used, but disproven.
1642 bool AssumptionDisproven =
1643 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1644 if (AssumptionDisproven)
1645 Result = AliasResult::MayAlias;
1646
1647 // This is a definitive result now, when considered as a root query.
1648 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1649 Entry.Result = Result;
1650 // Cache contains sorted {V1,V2} pairs.
1651 Entry.Result.swap(Swapped);
1652 Entry.NumAssumptionUses = -1;
1653
1654 // If the assumption has been disproven, remove any results that may have
1655 // been based on this assumption. Do this after the Entry updates above to
1656 // avoid iterator invalidation.
1657 if (AssumptionDisproven)
1658 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1659 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1660
1661 // The result may still be based on assumptions higher up in the chain.
1662 // Remember it, so it can be purged from the cache later.
1663 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1664 Result != AliasResult::MayAlias)
1665 AAQI.AssumptionBasedResults.push_back(Locs);
1666 return Result;
1667 }
1668
aliasCheckRecursive(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI,const Value * O1,const Value * O2)1669 AliasResult BasicAAResult::aliasCheckRecursive(
1670 const Value *V1, LocationSize V1Size,
1671 const Value *V2, LocationSize V2Size,
1672 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1673 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1674 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1675 if (Result != AliasResult::MayAlias)
1676 return Result;
1677 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1678 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1679 Result.swap();
1680 if (Result != AliasResult::MayAlias)
1681 return Result;
1682 }
1683
1684 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1685 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1686 if (Result != AliasResult::MayAlias)
1687 return Result;
1688 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1689 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1690 Result.swap();
1691 if (Result != AliasResult::MayAlias)
1692 return Result;
1693 }
1694
1695 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1696 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1697 if (Result != AliasResult::MayAlias)
1698 return Result;
1699 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1700 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1701 Result.swap();
1702 if (Result != AliasResult::MayAlias)
1703 return Result;
1704 }
1705
1706 // If both pointers are pointing into the same object and one of them
1707 // accesses the entire object, then the accesses must overlap in some way.
1708 if (O1 == O2) {
1709 bool NullIsValidLocation = NullPointerIsDefined(&F);
1710 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1711 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1712 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1713 return AliasResult::PartialAlias;
1714 }
1715
1716 return AliasResult::MayAlias;
1717 }
1718
1719 /// Check whether two Values can be considered equivalent.
1720 ///
1721 /// If the values may come from different cycle iterations, this will also
1722 /// check that the values are not part of cycle. We have to do this because we
1723 /// are looking through phi nodes, that is we say
1724 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2,const AAQueryInfo & AAQI)1725 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1726 const Value *V2,
1727 const AAQueryInfo &AAQI) {
1728 if (V != V2)
1729 return false;
1730
1731 if (!AAQI.MayBeCrossIteration)
1732 return true;
1733
1734 // Non-instructions and instructions in the entry block cannot be part of
1735 // a loop.
1736 const Instruction *Inst = dyn_cast<Instruction>(V);
1737 if (!Inst || Inst->getParent()->isEntryBlock())
1738 return true;
1739
1740 return isNotInCycle(Inst, getDT(AAQI), /*LI*/ nullptr);
1741 }
1742
1743 /// Computes the symbolic difference between two de-composed GEPs.
subtractDecomposedGEPs(DecomposedGEP & DestGEP,const DecomposedGEP & SrcGEP,const AAQueryInfo & AAQI)1744 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1745 const DecomposedGEP &SrcGEP,
1746 const AAQueryInfo &AAQI) {
1747 DestGEP.Offset -= SrcGEP.Offset;
1748 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1749 // Find V in Dest. This is N^2, but pointer indices almost never have more
1750 // than a few variable indexes.
1751 bool Found = false;
1752 for (auto I : enumerate(DestGEP.VarIndices)) {
1753 VariableGEPIndex &Dest = I.value();
1754 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) ||
1755 !Dest.Val.hasSameCastsAs(Src.Val))
1756 continue;
1757
1758 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1759 if (Dest.IsNegated) {
1760 Dest.Scale = -Dest.Scale;
1761 Dest.IsNegated = false;
1762 Dest.IsNSW = false;
1763 }
1764
1765 // If we found it, subtract off Scale V's from the entry in Dest. If it
1766 // goes to zero, remove the entry.
1767 if (Dest.Scale != Src.Scale) {
1768 Dest.Scale -= Src.Scale;
1769 Dest.IsNSW = false;
1770 } else {
1771 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1772 }
1773 Found = true;
1774 break;
1775 }
1776
1777 // If we didn't consume this entry, add it to the end of the Dest list.
1778 if (!Found) {
1779 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1780 /* IsNegated */ true};
1781 DestGEP.VarIndices.push_back(Entry);
1782 }
1783 }
1784 }
1785
constantOffsetHeuristic(const DecomposedGEP & GEP,LocationSize MaybeV1Size,LocationSize MaybeV2Size,AssumptionCache * AC,DominatorTree * DT,const AAQueryInfo & AAQI)1786 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1787 LocationSize MaybeV1Size,
1788 LocationSize MaybeV2Size,
1789 AssumptionCache *AC,
1790 DominatorTree *DT,
1791 const AAQueryInfo &AAQI) {
1792 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1793 !MaybeV2Size.hasValue())
1794 return false;
1795
1796 const uint64_t V1Size = MaybeV1Size.getValue();
1797 const uint64_t V2Size = MaybeV2Size.getValue();
1798
1799 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1800
1801 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1802 !Var0.hasNegatedScaleOf(Var1) ||
1803 Var0.Val.V->getType() != Var1.Val.V->getType())
1804 return false;
1805
1806 // We'll strip off the Extensions of Var0 and Var1 and do another round
1807 // of GetLinearExpression decomposition. In the example above, if Var0
1808 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1809
1810 LinearExpression E0 =
1811 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1812 LinearExpression E1 =
1813 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1814 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1815 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1816 return false;
1817
1818 // We have a hit - Var0 and Var1 only differ by a constant offset!
1819
1820 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1821 // Var1 is possible to calculate, but we're just interested in the absolute
1822 // minimum difference between the two. The minimum distance may occur due to
1823 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1824 // the minimum distance between %i and %i + 5 is 3.
1825 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1826 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1827 APInt MinDiffBytes =
1828 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1829
1830 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1831 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1832 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1833 // V2Size can fit in the MinDiffBytes gap.
1834 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1835 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1836 }
1837
1838 //===----------------------------------------------------------------------===//
1839 // BasicAliasAnalysis Pass
1840 //===----------------------------------------------------------------------===//
1841
1842 AnalysisKey BasicAA::Key;
1843
run(Function & F,FunctionAnalysisManager & AM)1844 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1845 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1846 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1847 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1848 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT);
1849 }
1850
BasicAAWrapperPass()1851 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1852 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1853 }
1854
1855 char BasicAAWrapperPass::ID = 0;
1856
anchor()1857 void BasicAAWrapperPass::anchor() {}
1858
1859 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1860 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1861 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1862 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1863 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1864 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1865 "Basic Alias Analysis (stateless AA impl)", true, true)
1866
1867 FunctionPass *llvm::createBasicAAWrapperPass() {
1868 return new BasicAAWrapperPass();
1869 }
1870
runOnFunction(Function & F)1871 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1872 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1873 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1874 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1875
1876 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1877 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1878 &DTWP.getDomTree()));
1879
1880 return false;
1881 }
1882
getAnalysisUsage(AnalysisUsage & AU) const1883 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1884 AU.setPreservesAll();
1885 AU.addRequiredTransitive<AssumptionCacheTracker>();
1886 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1887 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1888 }
1889