1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/ConstantRange.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <cstdlib>
62 #include <utility>
63
64 #define DEBUG_TYPE "basicaa"
65
66 using namespace llvm;
67
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70 cl::init(true));
71
72 /// SearchLimitReached / SearchTimes shows how often the limit of
73 /// to decompose GEPs is reached. It will affect the precision
74 /// of basic alias analysis.
75 STATISTIC(SearchLimitReached, "Number of times the limit to "
76 "decompose GEPs is reached");
77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
78
79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
81 /// careful with value equivalence. We use reachability to make sure a value
82 /// cannot be involved in a cycle.
83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
84
85 // The max limit of the search depth in DecomposeGEPExpression() and
86 // getUnderlyingObject(), both functions need to use the same search
87 // depth otherwise the algorithm in aliasGEP will assert.
88 static const unsigned MaxLookupSearchDepth = 6;
89
invalidate(Function & Fn,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)90 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
91 FunctionAnalysisManager::Invalidator &Inv) {
92 // We don't care if this analysis itself is preserved, it has no state. But
93 // we need to check that the analyses it depends on have been. Note that we
94 // may be created without handles to some analyses and in that case don't
95 // depend on them.
96 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
97 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
98 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
99 return true;
100
101 // Otherwise this analysis result remains valid.
102 return false;
103 }
104
105 //===----------------------------------------------------------------------===//
106 // Useful predicates
107 //===----------------------------------------------------------------------===//
108
109 /// Returns true if the pointer is one which would have been considered an
110 /// escape by isNonEscapingLocalObject.
isEscapeSource(const Value * V)111 static bool isEscapeSource(const Value *V) {
112 if (isa<CallBase>(V))
113 return true;
114
115 // The load case works because isNonEscapingLocalObject considers all
116 // stores to be escapes (it passes true for the StoreCaptures argument
117 // to PointerMayBeCaptured).
118 if (isa<LoadInst>(V))
119 return true;
120
121 // The inttoptr case works because isNonEscapingLocalObject considers all
122 // means of converting or equating a pointer to an int (ptrtoint, ptr store
123 // which could be followed by an integer load, ptr<->int compare) as
124 // escaping, and objects located at well-known addresses via platform-specific
125 // means cannot be considered non-escaping local objects.
126 if (isa<IntToPtrInst>(V))
127 return true;
128
129 return false;
130 }
131
132 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc,bool RoundToAlign=false)133 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
134 const TargetLibraryInfo &TLI,
135 bool NullIsValidLoc,
136 bool RoundToAlign = false) {
137 uint64_t Size;
138 ObjectSizeOpts Opts;
139 Opts.RoundToAlign = RoundToAlign;
140 Opts.NullIsUnknownSize = NullIsValidLoc;
141 if (getObjectSize(V, Size, DL, &TLI, Opts))
142 return Size;
143 return MemoryLocation::UnknownSize;
144 }
145
146 /// Returns true if we can prove that the object specified by V is smaller than
147 /// Size.
isObjectSmallerThan(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)148 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
149 const DataLayout &DL,
150 const TargetLibraryInfo &TLI,
151 bool NullIsValidLoc) {
152 // Note that the meanings of the "object" are slightly different in the
153 // following contexts:
154 // c1: llvm::getObjectSize()
155 // c2: llvm.objectsize() intrinsic
156 // c3: isObjectSmallerThan()
157 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
158 // refers to the "entire object".
159 //
160 // Consider this example:
161 // char *p = (char*)malloc(100)
162 // char *q = p+80;
163 //
164 // In the context of c1 and c2, the "object" pointed by q refers to the
165 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
166 //
167 // However, in the context of c3, the "object" refers to the chunk of memory
168 // being allocated. So, the "object" has 100 bytes, and q points to the middle
169 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
170 // parameter, before the llvm::getObjectSize() is called to get the size of
171 // entire object, we should:
172 // - either rewind the pointer q to the base-address of the object in
173 // question (in this case rewind to p), or
174 // - just give up. It is up to caller to make sure the pointer is pointing
175 // to the base address the object.
176 //
177 // We go for 2nd option for simplicity.
178 if (!isIdentifiedObject(V))
179 return false;
180
181 // This function needs to use the aligned object size because we allow
182 // reads a bit past the end given sufficient alignment.
183 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
184 /*RoundToAlign*/ true);
185
186 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
187 }
188
189 /// Return the minimal extent from \p V to the end of the underlying object,
190 /// assuming the result is used in an aliasing query. E.g., we do use the query
191 /// location size and the fact that null pointers cannot alias here.
getMinimalExtentFrom(const Value & V,const LocationSize & LocSize,const DataLayout & DL,bool NullIsValidLoc)192 static uint64_t getMinimalExtentFrom(const Value &V,
193 const LocationSize &LocSize,
194 const DataLayout &DL,
195 bool NullIsValidLoc) {
196 // If we have dereferenceability information we know a lower bound for the
197 // extent as accesses for a lower offset would be valid. We need to exclude
198 // the "or null" part if null is a valid pointer. We can ignore frees, as an
199 // access after free would be undefined behavior.
200 bool CanBeNull, CanBeFreed;
201 uint64_t DerefBytes =
202 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
203 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
204 // If queried with a precise location size, we assume that location size to be
205 // accessed, thus valid.
206 if (LocSize.isPrecise())
207 DerefBytes = std::max(DerefBytes, LocSize.getValue());
208 return DerefBytes;
209 }
210
211 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)212 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
213 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
214 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
215 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
216 }
217
218 //===----------------------------------------------------------------------===//
219 // CaptureInfo implementations
220 //===----------------------------------------------------------------------===//
221
222 CaptureInfo::~CaptureInfo() = default;
223
isNotCapturedBeforeOrAt(const Value * Object,const Instruction * I)224 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
225 const Instruction *I) {
226 return isNonEscapingLocalObject(Object, &IsCapturedCache);
227 }
228
isNotCapturedBeforeOrAt(const Value * Object,const Instruction * I)229 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
230 const Instruction *I) {
231 if (!isIdentifiedFunctionLocal(Object))
232 return false;
233
234 auto Iter = EarliestEscapes.insert({Object, nullptr});
235 if (Iter.second) {
236 Instruction *EarliestCapture = FindEarliestCapture(
237 Object, *const_cast<Function *>(I->getFunction()),
238 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
239 if (EarliestCapture) {
240 auto Ins = Inst2Obj.insert({EarliestCapture, {}});
241 Ins.first->second.push_back(Object);
242 }
243 Iter.first->second = EarliestCapture;
244 }
245
246 // No capturing instruction.
247 if (!Iter.first->second)
248 return true;
249
250 return I != Iter.first->second &&
251 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
252 }
253
removeInstruction(Instruction * I)254 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
255 auto Iter = Inst2Obj.find(I);
256 if (Iter != Inst2Obj.end()) {
257 for (const Value *Obj : Iter->second)
258 EarliestEscapes.erase(Obj);
259 Inst2Obj.erase(I);
260 }
261 }
262
263 //===----------------------------------------------------------------------===//
264 // GetElementPtr Instruction Decomposition and Analysis
265 //===----------------------------------------------------------------------===//
266
267 namespace {
268 /// Represents zext(sext(V)).
269 struct ExtendedValue {
270 const Value *V;
271 unsigned ZExtBits;
272 unsigned SExtBits;
273
ExtendedValue__anond93f8c0c0111::ExtendedValue274 explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
275 unsigned SExtBits = 0)
276 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
277
getBitWidth__anond93f8c0c0111::ExtendedValue278 unsigned getBitWidth() const {
279 return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
280 }
281
withValue__anond93f8c0c0111::ExtendedValue282 ExtendedValue withValue(const Value *NewV) const {
283 return ExtendedValue(NewV, ZExtBits, SExtBits);
284 }
285
withZExtOfValue__anond93f8c0c0111::ExtendedValue286 ExtendedValue withZExtOfValue(const Value *NewV) const {
287 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
288 NewV->getType()->getPrimitiveSizeInBits();
289 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
290 return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
291 }
292
withSExtOfValue__anond93f8c0c0111::ExtendedValue293 ExtendedValue withSExtOfValue(const Value *NewV) const {
294 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
295 NewV->getType()->getPrimitiveSizeInBits();
296 // zext(sext(sext(NewV)))
297 return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
298 }
299
evaluateWith__anond93f8c0c0111::ExtendedValue300 APInt evaluateWith(APInt N) const {
301 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
302 "Incompatible bit width");
303 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
304 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
305 return N;
306 }
307
evaluateWith__anond93f8c0c0111::ExtendedValue308 KnownBits evaluateWith(KnownBits N) const {
309 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
310 "Incompatible bit width");
311 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
312 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
313 return N;
314 }
315
evaluateWith__anond93f8c0c0111::ExtendedValue316 ConstantRange evaluateWith(ConstantRange N) const {
317 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
318 "Incompatible bit width");
319 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
320 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
321 return N;
322 }
323
canDistributeOver__anond93f8c0c0111::ExtendedValue324 bool canDistributeOver(bool NUW, bool NSW) const {
325 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
326 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
327 return (!ZExtBits || NUW) && (!SExtBits || NSW);
328 }
329
hasSameExtensionsAs__anond93f8c0c0111::ExtendedValue330 bool hasSameExtensionsAs(const ExtendedValue &Other) const {
331 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits;
332 }
333 };
334
335 /// Represents zext(sext(V)) * Scale + Offset.
336 struct LinearExpression {
337 ExtendedValue Val;
338 APInt Scale;
339 APInt Offset;
340
341 /// True if all operations in this expression are NSW.
342 bool IsNSW;
343
LinearExpression__anond93f8c0c0111::LinearExpression344 LinearExpression(const ExtendedValue &Val, const APInt &Scale,
345 const APInt &Offset, bool IsNSW)
346 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
347
LinearExpression__anond93f8c0c0111::LinearExpression348 LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
349 unsigned BitWidth = Val.getBitWidth();
350 Scale = APInt(BitWidth, 1);
351 Offset = APInt(BitWidth, 0);
352 }
353 };
354 }
355
356 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
357 /// B are constant integers.
GetLinearExpression(const ExtendedValue & Val,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT)358 static LinearExpression GetLinearExpression(
359 const ExtendedValue &Val, const DataLayout &DL, unsigned Depth,
360 AssumptionCache *AC, DominatorTree *DT) {
361 // Limit our recursion depth.
362 if (Depth == 6)
363 return Val;
364
365 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
366 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
367 Val.evaluateWith(Const->getValue()), true);
368
369 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
370 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
371 APInt RHS = Val.evaluateWith(RHSC->getValue());
372 // The only non-OBO case we deal with is or, and only limited to the
373 // case where it is both nuw and nsw.
374 bool NUW = true, NSW = true;
375 if (isa<OverflowingBinaryOperator>(BOp)) {
376 NUW &= BOp->hasNoUnsignedWrap();
377 NSW &= BOp->hasNoSignedWrap();
378 }
379 if (!Val.canDistributeOver(NUW, NSW))
380 return Val;
381
382 LinearExpression E(Val);
383 switch (BOp->getOpcode()) {
384 default:
385 // We don't understand this instruction, so we can't decompose it any
386 // further.
387 return Val;
388 case Instruction::Or:
389 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
390 // analyze it.
391 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
392 BOp, DT))
393 return Val;
394
395 LLVM_FALLTHROUGH;
396 case Instruction::Add: {
397 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
398 Depth + 1, AC, DT);
399 E.Offset += RHS;
400 E.IsNSW &= NSW;
401 break;
402 }
403 case Instruction::Sub: {
404 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
405 Depth + 1, AC, DT);
406 E.Offset -= RHS;
407 E.IsNSW &= NSW;
408 break;
409 }
410 case Instruction::Mul: {
411 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
412 Depth + 1, AC, DT);
413 E.Offset *= RHS;
414 E.Scale *= RHS;
415 E.IsNSW &= NSW;
416 break;
417 }
418 case Instruction::Shl:
419 // We're trying to linearize an expression of the kind:
420 // shl i8 -128, 36
421 // where the shift count exceeds the bitwidth of the type.
422 // We can't decompose this further (the expression would return
423 // a poison value).
424 if (RHS.getLimitedValue() > Val.getBitWidth())
425 return Val;
426
427 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
428 Depth + 1, AC, DT);
429 E.Offset <<= RHS.getLimitedValue();
430 E.Scale <<= RHS.getLimitedValue();
431 E.IsNSW &= NSW;
432 break;
433 }
434 return E;
435 }
436 }
437
438 if (isa<ZExtInst>(Val.V))
439 return GetLinearExpression(
440 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
441 DL, Depth + 1, AC, DT);
442
443 if (isa<SExtInst>(Val.V))
444 return GetLinearExpression(
445 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
446 DL, Depth + 1, AC, DT);
447
448 return Val;
449 }
450
451 /// To ensure a pointer offset fits in an integer of size PointerSize
452 /// (in bits) when that size is smaller than the maximum pointer size. This is
453 /// an issue, for example, in particular for 32b pointers with negative indices
454 /// that rely on two's complement wrap-arounds for precise alias information
455 /// where the maximum pointer size is 64b.
adjustToPointerSize(const APInt & Offset,unsigned PointerSize)456 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
457 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
458 unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
459 return (Offset << ShiftBits).ashr(ShiftBits);
460 }
461
462 namespace {
463 // A linear transformation of a Value; this class represents
464 // ZExt(SExt(V, SExtBits), ZExtBits) * Scale.
465 struct VariableGEPIndex {
466 ExtendedValue Val;
467 APInt Scale;
468
469 // Context instruction to use when querying information about this index.
470 const Instruction *CxtI;
471
472 /// True if all operations in this expression are NSW.
473 bool IsNSW;
474
dump__anond93f8c0c0211::VariableGEPIndex475 void dump() const {
476 print(dbgs());
477 dbgs() << "\n";
478 }
print__anond93f8c0c0211::VariableGEPIndex479 void print(raw_ostream &OS) const {
480 OS << "(V=" << Val.V->getName()
481 << ", zextbits=" << Val.ZExtBits
482 << ", sextbits=" << Val.SExtBits
483 << ", scale=" << Scale << ")";
484 }
485 };
486 }
487
488 // Represents the internal structure of a GEP, decomposed into a base pointer,
489 // constant offsets, and variable scaled indices.
490 struct BasicAAResult::DecomposedGEP {
491 // Base pointer of the GEP
492 const Value *Base;
493 // Total constant offset from base.
494 APInt Offset;
495 // Scaled variable (non-constant) indices.
496 SmallVector<VariableGEPIndex, 4> VarIndices;
497 // Is GEP index scale compile-time constant.
498 bool HasCompileTimeConstantScale;
499 // Are all operations inbounds GEPs or non-indexing operations?
500 // (None iff expression doesn't involve any geps)
501 Optional<bool> InBounds;
502
dumpBasicAAResult::DecomposedGEP503 void dump() const {
504 print(dbgs());
505 dbgs() << "\n";
506 }
printBasicAAResult::DecomposedGEP507 void print(raw_ostream &OS) const {
508 OS << "(DecomposedGEP Base=" << Base->getName()
509 << ", Offset=" << Offset
510 << ", VarIndices=[";
511 for (size_t i = 0; i < VarIndices.size(); i++) {
512 if (i != 0)
513 OS << ", ";
514 VarIndices[i].print(OS);
515 }
516 OS << "], HasCompileTimeConstantScale=" << HasCompileTimeConstantScale
517 << ")";
518 }
519 };
520
521
522 /// If V is a symbolic pointer expression, decompose it into a base pointer
523 /// with a constant offset and a number of scaled symbolic offsets.
524 ///
525 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
526 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
527 /// specified amount, but which may have other unrepresented high bits. As
528 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
529 ///
530 /// This function is capable of analyzing everything that getUnderlyingObject
531 /// can look through. To be able to do that getUnderlyingObject and
532 /// DecomposeGEPExpression must use the same search depth
533 /// (MaxLookupSearchDepth).
534 BasicAAResult::DecomposedGEP
DecomposeGEPExpression(const Value * V,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)535 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
536 AssumptionCache *AC, DominatorTree *DT) {
537 // Limit recursion depth to limit compile time in crazy cases.
538 unsigned MaxLookup = MaxLookupSearchDepth;
539 SearchTimes++;
540 const Instruction *CxtI = dyn_cast<Instruction>(V);
541
542 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
543 DecomposedGEP Decomposed;
544 Decomposed.Offset = APInt(MaxPointerSize, 0);
545 Decomposed.HasCompileTimeConstantScale = true;
546 do {
547 // See if this is a bitcast or GEP.
548 const Operator *Op = dyn_cast<Operator>(V);
549 if (!Op) {
550 // The only non-operator case we can handle are GlobalAliases.
551 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
552 if (!GA->isInterposable()) {
553 V = GA->getAliasee();
554 continue;
555 }
556 }
557 Decomposed.Base = V;
558 return Decomposed;
559 }
560
561 if (Op->getOpcode() == Instruction::BitCast ||
562 Op->getOpcode() == Instruction::AddrSpaceCast) {
563 V = Op->getOperand(0);
564 continue;
565 }
566
567 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
568 if (!GEPOp) {
569 if (const auto *PHI = dyn_cast<PHINode>(V)) {
570 // Look through single-arg phi nodes created by LCSSA.
571 if (PHI->getNumIncomingValues() == 1) {
572 V = PHI->getIncomingValue(0);
573 continue;
574 }
575 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
576 // CaptureTracking can know about special capturing properties of some
577 // intrinsics like launder.invariant.group, that can't be expressed with
578 // the attributes, but have properties like returning aliasing pointer.
579 // Because some analysis may assume that nocaptured pointer is not
580 // returned from some special intrinsic (because function would have to
581 // be marked with returns attribute), it is crucial to use this function
582 // because it should be in sync with CaptureTracking. Not using it may
583 // cause weird miscompilations where 2 aliasing pointers are assumed to
584 // noalias.
585 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
586 V = RP;
587 continue;
588 }
589 }
590
591 Decomposed.Base = V;
592 return Decomposed;
593 }
594
595 // Track whether we've seen at least one in bounds gep, and if so, whether
596 // all geps parsed were in bounds.
597 if (Decomposed.InBounds == None)
598 Decomposed.InBounds = GEPOp->isInBounds();
599 else if (!GEPOp->isInBounds())
600 Decomposed.InBounds = false;
601
602 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
603
604 // Don't attempt to analyze GEPs if index scale is not a compile-time
605 // constant.
606 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
607 Decomposed.Base = V;
608 Decomposed.HasCompileTimeConstantScale = false;
609 return Decomposed;
610 }
611
612 unsigned AS = GEPOp->getPointerAddressSpace();
613 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
614 gep_type_iterator GTI = gep_type_begin(GEPOp);
615 unsigned PointerSize = DL.getPointerSizeInBits(AS);
616 // Assume all GEP operands are constants until proven otherwise.
617 bool GepHasConstantOffset = true;
618 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
619 I != E; ++I, ++GTI) {
620 const Value *Index = *I;
621 // Compute the (potentially symbolic) offset in bytes for this index.
622 if (StructType *STy = GTI.getStructTypeOrNull()) {
623 // For a struct, add the member offset.
624 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
625 if (FieldNo == 0)
626 continue;
627
628 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
629 continue;
630 }
631
632 // For an array/pointer, add the element offset, explicitly scaled.
633 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
634 if (CIdx->isZero())
635 continue;
636 Decomposed.Offset +=
637 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
638 CIdx->getValue().sextOrTrunc(MaxPointerSize);
639 continue;
640 }
641
642 GepHasConstantOffset = false;
643
644 APInt Scale(MaxPointerSize,
645 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
646 // If the integer type is smaller than the pointer size, it is implicitly
647 // sign extended to pointer size.
648 unsigned Width = Index->getType()->getIntegerBitWidth();
649 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
650 LinearExpression LE = GetLinearExpression(
651 ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
652
653 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
654 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
655
656 // It can be the case that, even through C1*V+C2 does not overflow for
657 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
658 // decompose the expression in this way.
659 //
660 // FIXME: C1*Scale and the other operations in the decomposed
661 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
662 // possibility.
663 bool Overflow;
664 APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
665 .smul_ov(Scale, Overflow);
666 if (Overflow) {
667 LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
668 } else {
669 Decomposed.Offset += ScaledOffset;
670 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
671 }
672
673 // If we already had an occurrence of this index variable, merge this
674 // scale into it. For example, we want to handle:
675 // A[x][x] -> x*16 + x*4 -> x*20
676 // This also ensures that 'x' only appears in the index list once.
677 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
678 if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
679 Decomposed.VarIndices[i].Val.hasSameExtensionsAs(LE.Val)) {
680 Scale += Decomposed.VarIndices[i].Scale;
681 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
682 break;
683 }
684 }
685
686 // Make sure that we have a scale that makes sense for this target's
687 // pointer size.
688 Scale = adjustToPointerSize(Scale, PointerSize);
689
690 if (!!Scale) {
691 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
692 Decomposed.VarIndices.push_back(Entry);
693 }
694 }
695
696 // Take care of wrap-arounds
697 if (GepHasConstantOffset)
698 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
699
700 // Analyze the base pointer next.
701 V = GEPOp->getOperand(0);
702 } while (--MaxLookup);
703
704 // If the chain of expressions is too deep, just return early.
705 Decomposed.Base = V;
706 SearchLimitReached++;
707 return Decomposed;
708 }
709
710 /// Returns whether the given pointer value points to memory that is local to
711 /// the function, with global constants being considered local to all
712 /// functions.
pointsToConstantMemory(const MemoryLocation & Loc,AAQueryInfo & AAQI,bool OrLocal)713 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
714 AAQueryInfo &AAQI, bool OrLocal) {
715 assert(Visited.empty() && "Visited must be cleared after use!");
716
717 unsigned MaxLookup = 8;
718 SmallVector<const Value *, 16> Worklist;
719 Worklist.push_back(Loc.Ptr);
720 do {
721 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
722 if (!Visited.insert(V).second) {
723 Visited.clear();
724 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
725 }
726
727 // An alloca instruction defines local memory.
728 if (OrLocal && isa<AllocaInst>(V))
729 continue;
730
731 // A global constant counts as local memory for our purposes.
732 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
733 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
734 // global to be marked constant in some modules and non-constant in
735 // others. GV may even be a declaration, not a definition.
736 if (!GV->isConstant()) {
737 Visited.clear();
738 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
739 }
740 continue;
741 }
742
743 // If both select values point to local memory, then so does the select.
744 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
745 Worklist.push_back(SI->getTrueValue());
746 Worklist.push_back(SI->getFalseValue());
747 continue;
748 }
749
750 // If all values incoming to a phi node point to local memory, then so does
751 // the phi.
752 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
753 // Don't bother inspecting phi nodes with many operands.
754 if (PN->getNumIncomingValues() > MaxLookup) {
755 Visited.clear();
756 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
757 }
758 append_range(Worklist, PN->incoming_values());
759 continue;
760 }
761
762 // Otherwise be conservative.
763 Visited.clear();
764 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
765 } while (!Worklist.empty() && --MaxLookup);
766
767 Visited.clear();
768 return Worklist.empty();
769 }
770
isIntrinsicCall(const CallBase * Call,Intrinsic::ID IID)771 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
772 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
773 return II && II->getIntrinsicID() == IID;
774 }
775
776 /// Returns the behavior when calling the given call site.
getModRefBehavior(const CallBase * Call)777 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
778 if (Call->doesNotAccessMemory())
779 // Can't do better than this.
780 return FMRB_DoesNotAccessMemory;
781
782 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
783
784 // If the callsite knows it only reads memory, don't return worse
785 // than that.
786 if (Call->onlyReadsMemory())
787 Min = FMRB_OnlyReadsMemory;
788 else if (Call->doesNotReadMemory())
789 Min = FMRB_OnlyWritesMemory;
790
791 if (Call->onlyAccessesArgMemory())
792 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
793 else if (Call->onlyAccessesInaccessibleMemory())
794 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
795 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
796 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
797
798 // If the call has operand bundles then aliasing attributes from the function
799 // it calls do not directly apply to the call. This can be made more precise
800 // in the future.
801 if (!Call->hasOperandBundles())
802 if (const Function *F = Call->getCalledFunction())
803 Min =
804 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
805
806 return Min;
807 }
808
809 /// Returns the behavior when calling the given function. For use when the call
810 /// site is not known.
getModRefBehavior(const Function * F)811 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
812 // If the function declares it doesn't access memory, we can't do better.
813 if (F->doesNotAccessMemory())
814 return FMRB_DoesNotAccessMemory;
815
816 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
817
818 // If the function declares it only reads memory, go with that.
819 if (F->onlyReadsMemory())
820 Min = FMRB_OnlyReadsMemory;
821 else if (F->doesNotReadMemory())
822 Min = FMRB_OnlyWritesMemory;
823
824 if (F->onlyAccessesArgMemory())
825 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
826 else if (F->onlyAccessesInaccessibleMemory())
827 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
828 else if (F->onlyAccessesInaccessibleMemOrArgMem())
829 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
830
831 return Min;
832 }
833
834 /// Returns true if this is a writeonly (i.e Mod only) parameter.
isWriteOnlyParam(const CallBase * Call,unsigned ArgIdx,const TargetLibraryInfo & TLI)835 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
836 const TargetLibraryInfo &TLI) {
837 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
838 return true;
839
840 // We can bound the aliasing properties of memset_pattern16 just as we can
841 // for memcpy/memset. This is particularly important because the
842 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
843 // whenever possible.
844 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
845 // attributes.
846 LibFunc F;
847 if (Call->getCalledFunction() &&
848 TLI.getLibFunc(*Call->getCalledFunction(), F) &&
849 F == LibFunc_memset_pattern16 && TLI.has(F))
850 if (ArgIdx == 0)
851 return true;
852
853 // TODO: memset_pattern4, memset_pattern8
854 // TODO: _chk variants
855 // TODO: strcmp, strcpy
856
857 return false;
858 }
859
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)860 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
861 unsigned ArgIdx) {
862 // Checking for known builtin intrinsics and target library functions.
863 if (isWriteOnlyParam(Call, ArgIdx, TLI))
864 return ModRefInfo::Mod;
865
866 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
867 return ModRefInfo::Ref;
868
869 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
870 return ModRefInfo::NoModRef;
871
872 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
873 }
874
875 #ifndef NDEBUG
getParent(const Value * V)876 static const Function *getParent(const Value *V) {
877 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
878 if (!inst->getParent())
879 return nullptr;
880 return inst->getParent()->getParent();
881 }
882
883 if (const Argument *arg = dyn_cast<Argument>(V))
884 return arg->getParent();
885
886 return nullptr;
887 }
888
notDifferentParent(const Value * O1,const Value * O2)889 static bool notDifferentParent(const Value *O1, const Value *O2) {
890
891 const Function *F1 = getParent(O1);
892 const Function *F2 = getParent(O2);
893
894 return !F1 || !F2 || F1 == F2;
895 }
896 #endif
897
alias(const MemoryLocation & LocA,const MemoryLocation & LocB,AAQueryInfo & AAQI)898 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
899 const MemoryLocation &LocB,
900 AAQueryInfo &AAQI) {
901 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
902 "BasicAliasAnalysis doesn't support interprocedural queries.");
903 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
904 }
905
906 /// Checks to see if the specified callsite can clobber the specified memory
907 /// object.
908 ///
909 /// Since we only look at local properties of this function, we really can't
910 /// say much about this query. We do, however, use simple "address taken"
911 /// analysis on local objects.
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc,AAQueryInfo & AAQI)912 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
913 const MemoryLocation &Loc,
914 AAQueryInfo &AAQI) {
915 assert(notDifferentParent(Call, Loc.Ptr) &&
916 "AliasAnalysis query involving multiple functions!");
917
918 const Value *Object = getUnderlyingObject(Loc.Ptr);
919
920 // Calls marked 'tail' cannot read or write allocas from the current frame
921 // because the current frame might be destroyed by the time they run. However,
922 // a tail call may use an alloca with byval. Calling with byval copies the
923 // contents of the alloca into argument registers or stack slots, so there is
924 // no lifetime issue.
925 if (isa<AllocaInst>(Object))
926 if (const CallInst *CI = dyn_cast<CallInst>(Call))
927 if (CI->isTailCall() &&
928 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
929 return ModRefInfo::NoModRef;
930
931 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
932 // modify them even though the alloca is not escaped.
933 if (auto *AI = dyn_cast<AllocaInst>(Object))
934 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
935 return ModRefInfo::Mod;
936
937 // If the pointer is to a locally allocated object that does not escape,
938 // then the call can not mod/ref the pointer unless the call takes the pointer
939 // as an argument, and itself doesn't capture it.
940 if (!isa<Constant>(Object) && Call != Object &&
941 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
942
943 // Optimistically assume that call doesn't touch Object and check this
944 // assumption in the following loop.
945 ModRefInfo Result = ModRefInfo::NoModRef;
946 bool IsMustAlias = true;
947
948 unsigned OperandNo = 0;
949 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
950 CI != CE; ++CI, ++OperandNo) {
951 // Only look at the no-capture or byval pointer arguments. If this
952 // pointer were passed to arguments that were neither of these, then it
953 // couldn't be no-capture.
954 if (!(*CI)->getType()->isPointerTy() ||
955 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
956 !Call->isByValArgument(OperandNo)))
957 continue;
958
959 // Call doesn't access memory through this operand, so we don't care
960 // if it aliases with Object.
961 if (Call->doesNotAccessMemory(OperandNo))
962 continue;
963
964 // If this is a no-capture pointer argument, see if we can tell that it
965 // is impossible to alias the pointer we're checking.
966 AliasResult AR = getBestAAResults().alias(
967 MemoryLocation::getBeforeOrAfter(*CI),
968 MemoryLocation::getBeforeOrAfter(Object), AAQI);
969 if (AR != AliasResult::MustAlias)
970 IsMustAlias = false;
971 // Operand doesn't alias 'Object', continue looking for other aliases
972 if (AR == AliasResult::NoAlias)
973 continue;
974 // Operand aliases 'Object', but call doesn't modify it. Strengthen
975 // initial assumption and keep looking in case if there are more aliases.
976 if (Call->onlyReadsMemory(OperandNo)) {
977 Result = setRef(Result);
978 continue;
979 }
980 // Operand aliases 'Object' but call only writes into it.
981 if (Call->doesNotReadMemory(OperandNo)) {
982 Result = setMod(Result);
983 continue;
984 }
985 // This operand aliases 'Object' and call reads and writes into it.
986 // Setting ModRef will not yield an early return below, MustAlias is not
987 // used further.
988 Result = ModRefInfo::ModRef;
989 break;
990 }
991
992 // No operand aliases, reset Must bit. Add below if at least one aliases
993 // and all aliases found are MustAlias.
994 if (isNoModRef(Result))
995 IsMustAlias = false;
996
997 // Early return if we improved mod ref information
998 if (!isModAndRefSet(Result)) {
999 if (isNoModRef(Result))
1000 return ModRefInfo::NoModRef;
1001 return IsMustAlias ? setMust(Result) : clearMust(Result);
1002 }
1003 }
1004
1005 // If the call is malloc/calloc like, we can assume that it doesn't
1006 // modify any IR visible value. This is only valid because we assume these
1007 // routines do not read values visible in the IR. TODO: Consider special
1008 // casing realloc and strdup routines which access only their arguments as
1009 // well. Or alternatively, replace all of this with inaccessiblememonly once
1010 // that's implemented fully.
1011 if (isMallocOrCallocLikeFn(Call, &TLI)) {
1012 // Be conservative if the accessed pointer may alias the allocation -
1013 // fallback to the generic handling below.
1014 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
1015 AAQI) == AliasResult::NoAlias)
1016 return ModRefInfo::NoModRef;
1017 }
1018
1019 // The semantics of memcpy intrinsics either exactly overlap or do not
1020 // overlap, i.e., source and destination of any given memcpy are either
1021 // no-alias or must-alias.
1022 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
1023 AliasResult SrcAA =
1024 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
1025 AliasResult DestAA =
1026 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
1027 // It's also possible for Loc to alias both src and dest, or neither.
1028 ModRefInfo rv = ModRefInfo::NoModRef;
1029 if (SrcAA != AliasResult::NoAlias)
1030 rv = setRef(rv);
1031 if (DestAA != AliasResult::NoAlias)
1032 rv = setMod(rv);
1033 return rv;
1034 }
1035
1036 // Guard intrinsics are marked as arbitrarily writing so that proper control
1037 // dependencies are maintained but they never mods any particular memory
1038 // location.
1039 //
1040 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1041 // heap state at the point the guard is issued needs to be consistent in case
1042 // the guard invokes the "deopt" continuation.
1043 if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1044 return ModRefInfo::Ref;
1045 // The same applies to deoptimize which is essentially a guard(false).
1046 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1047 return ModRefInfo::Ref;
1048
1049 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1050 // writing so that proper control dependencies are maintained but they never
1051 // mod any particular memory location visible to the IR.
1052 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1053 // intrinsic is now modeled as reading memory. This prevents hoisting the
1054 // invariant.start intrinsic over stores. Consider:
1055 // *ptr = 40;
1056 // *ptr = 50;
1057 // invariant_start(ptr)
1058 // int val = *ptr;
1059 // print(val);
1060 //
1061 // This cannot be transformed to:
1062 //
1063 // *ptr = 40;
1064 // invariant_start(ptr)
1065 // *ptr = 50;
1066 // int val = *ptr;
1067 // print(val);
1068 //
1069 // The transformation will cause the second store to be ignored (based on
1070 // rules of invariant.start) and print 40, while the first program always
1071 // prints 50.
1072 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1073 return ModRefInfo::Ref;
1074
1075 // The AAResultBase base class has some smarts, lets use them.
1076 return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1077 }
1078
getModRefInfo(const CallBase * Call1,const CallBase * Call2,AAQueryInfo & AAQI)1079 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1080 const CallBase *Call2,
1081 AAQueryInfo &AAQI) {
1082 // Guard intrinsics are marked as arbitrarily writing so that proper control
1083 // dependencies are maintained but they never mods any particular memory
1084 // location.
1085 //
1086 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1087 // heap state at the point the guard is issued needs to be consistent in case
1088 // the guard invokes the "deopt" continuation.
1089
1090 // NB! This function is *not* commutative, so we special case two
1091 // possibilities for guard intrinsics.
1092
1093 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1094 return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1095 ? ModRefInfo::Ref
1096 : ModRefInfo::NoModRef;
1097
1098 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1099 return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1100 ? ModRefInfo::Mod
1101 : ModRefInfo::NoModRef;
1102
1103 // The AAResultBase base class has some smarts, lets use them.
1104 return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1105 }
1106
1107 /// Return true if we know V to the base address of the corresponding memory
1108 /// object. This implies that any address less than V must be out of bounds
1109 /// for the underlying object. Note that just being isIdentifiedObject() is
1110 /// not enough - For example, a negative offset from a noalias argument or call
1111 /// can be inbounds w.r.t the actual underlying object.
isBaseOfObject(const Value * V)1112 static bool isBaseOfObject(const Value *V) {
1113 // TODO: We can handle other cases here
1114 // 1) For GC languages, arguments to functions are often required to be
1115 // base pointers.
1116 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1117 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1118 }
1119
1120 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1121 /// another pointer.
1122 ///
1123 /// We know that V1 is a GEP, but we don't know anything about V2.
1124 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1125 /// V2.
aliasGEP(const GEPOperator * GEP1,LocationSize V1Size,const Value * V2,LocationSize V2Size,const Value * UnderlyingV1,const Value * UnderlyingV2,AAQueryInfo & AAQI)1126 AliasResult BasicAAResult::aliasGEP(
1127 const GEPOperator *GEP1, LocationSize V1Size,
1128 const Value *V2, LocationSize V2Size,
1129 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1130 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1131 // TODO: This limitation exists for compile-time reasons. Relax it if we
1132 // can avoid exponential pathological cases.
1133 if (!isa<GEPOperator>(V2))
1134 return AliasResult::MayAlias;
1135
1136 // If both accesses have unknown size, we can only check whether the base
1137 // objects don't alias.
1138 AliasResult BaseAlias = getBestAAResults().alias(
1139 MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1140 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1141 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1142 : AliasResult::MayAlias;
1143 }
1144
1145 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1146 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1147
1148 // Don't attempt to analyze the decomposed GEP if index scale is not a
1149 // compile-time constant.
1150 if (!DecompGEP1.HasCompileTimeConstantScale ||
1151 !DecompGEP2.HasCompileTimeConstantScale)
1152 return AliasResult::MayAlias;
1153
1154 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1155 "DecomposeGEPExpression returned a result different from "
1156 "getUnderlyingObject");
1157
1158 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1159 // symbolic difference.
1160 subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1161
1162 // If an inbounds GEP would have to start from an out of bounds address
1163 // for the two to alias, then we can assume noalias.
1164 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1165 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1166 isBaseOfObject(DecompGEP2.Base))
1167 return AliasResult::NoAlias;
1168
1169 if (isa<GEPOperator>(V2)) {
1170 // Symmetric case to above.
1171 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1172 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1173 isBaseOfObject(DecompGEP1.Base))
1174 return AliasResult::NoAlias;
1175 }
1176
1177 // For GEPs with identical offsets, we can preserve the size and AAInfo
1178 // when performing the alias check on the underlying objects.
1179 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1180 return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size),
1181 MemoryLocation(DecompGEP2.Base, V2Size),
1182 AAQI);
1183
1184 // Do the base pointers alias?
1185 AliasResult BaseAlias = getBestAAResults().alias(
1186 MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1187 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1188
1189 // If we get a No or May, then return it immediately, no amount of analysis
1190 // will improve this situation.
1191 if (BaseAlias != AliasResult::MustAlias) {
1192 assert(BaseAlias == AliasResult::NoAlias ||
1193 BaseAlias == AliasResult::MayAlias);
1194 return BaseAlias;
1195 }
1196
1197 // If there is a constant difference between the pointers, but the difference
1198 // is less than the size of the associated memory object, then we know
1199 // that the objects are partially overlapping. If the difference is
1200 // greater, we know they do not overlap.
1201 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1202 APInt &Off = DecompGEP1.Offset;
1203
1204 // Initialize for Off >= 0 (V2 <= GEP1) case.
1205 const Value *LeftPtr = V2;
1206 const Value *RightPtr = GEP1;
1207 LocationSize VLeftSize = V2Size;
1208 LocationSize VRightSize = V1Size;
1209 const bool Swapped = Off.isNegative();
1210
1211 if (Swapped) {
1212 // Swap if we have the situation where:
1213 // + +
1214 // | BaseOffset |
1215 // ---------------->|
1216 // |-->V1Size |-------> V2Size
1217 // GEP1 V2
1218 std::swap(LeftPtr, RightPtr);
1219 std::swap(VLeftSize, VRightSize);
1220 Off = -Off;
1221 }
1222
1223 if (VLeftSize.hasValue()) {
1224 const uint64_t LSize = VLeftSize.getValue();
1225 if (Off.ult(LSize)) {
1226 // Conservatively drop processing if a phi was visited and/or offset is
1227 // too big.
1228 AliasResult AR = AliasResult::PartialAlias;
1229 if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1230 (Off + VRightSize.getValue()).ule(LSize)) {
1231 // Memory referenced by right pointer is nested. Save the offset in
1232 // cache. Note that originally offset estimated as GEP1-V2, but
1233 // AliasResult contains the shift that represents GEP1+Offset=V2.
1234 AR.setOffset(-Off.getSExtValue());
1235 AR.swap(Swapped);
1236 }
1237 return AR;
1238 }
1239 return AliasResult::NoAlias;
1240 }
1241 }
1242
1243 if (!DecompGEP1.VarIndices.empty()) {
1244 APInt GCD;
1245 bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1246 bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1247 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1248 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1249 const APInt &Scale = Index.Scale;
1250 APInt ScaleForGCD = Scale;
1251 if (!Index.IsNSW)
1252 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1253 Scale.countTrailingZeros());
1254
1255 if (i == 0)
1256 GCD = ScaleForGCD.abs();
1257 else
1258 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1259
1260 if (AllNonNegative || AllNonPositive) {
1261 KnownBits Known = Index.Val.evaluateWith(
1262 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT));
1263 // TODO: Account for implicit trunc.
1264 bool SignKnownZero = Known.isNonNegative();
1265 bool SignKnownOne = Known.isNegative();
1266 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1267 (SignKnownOne && Scale.isNonPositive());
1268 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1269 (SignKnownOne && Scale.isNonNegative());
1270 }
1271 }
1272
1273 // We now have accesses at two offsets from the same base:
1274 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1275 // 2. 0 with size V2Size
1276 // Using arithmetic modulo GCD, the accesses are at
1277 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1278 // into the range [V2Size..GCD), then we know they cannot overlap.
1279 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1280 if (ModOffset.isNegative())
1281 ModOffset += GCD; // We want mod, not rem.
1282 if (V1Size.hasValue() && V2Size.hasValue() &&
1283 ModOffset.uge(V2Size.getValue()) &&
1284 (GCD - ModOffset).uge(V1Size.getValue()))
1285 return AliasResult::NoAlias;
1286
1287 // If we know all the variables are non-negative, then the total offset is
1288 // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1289 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1290 // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1291 if (AllNonNegative && V2Size.hasValue() &&
1292 DecompGEP1.Offset.uge(V2Size.getValue()))
1293 return AliasResult::NoAlias;
1294 // Similarly, if the variables are non-positive, then the total offset is
1295 // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1296 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1297 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1298 if (AllNonPositive && V1Size.hasValue() &&
1299 (-DecompGEP1.Offset).uge(V1Size.getValue()))
1300 return AliasResult::NoAlias;
1301
1302 if (V1Size.hasValue() && V2Size.hasValue()) {
1303 // Try to determine the range of values for VarIndex.
1304 // VarIndexRange is such that:
1305 // (VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex) &&
1306 // VarIndexRange.contains(VarIndex)
1307 Optional<APInt> MinAbsVarIndex;
1308 Optional<ConstantRange> VarIndexRange;
1309 if (DecompGEP1.VarIndices.size() == 1) {
1310 // VarIndex = Scale*V.
1311 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1312 if (isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1313 // If V != 0 then abs(VarIndex) >= abs(Scale).
1314 MinAbsVarIndex = Var.Scale.abs();
1315 }
1316 ConstantRange R = Var.Val.evaluateWith(
1317 computeConstantRange(Var.Val.V, true, &AC, Var.CxtI));
1318 if (!R.isFullSet() && !R.isEmptySet())
1319 VarIndexRange = R.sextOrTrunc(Var.Scale.getBitWidth())
1320 .multiply(ConstantRange(Var.Scale));
1321 } else if (DecompGEP1.VarIndices.size() == 2) {
1322 // VarIndex = Scale*V0 + (-Scale)*V1.
1323 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1324 // Check that VisitedPhiBBs is empty, to avoid reasoning about
1325 // inequality of values across loop iterations.
1326 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1327 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1328 if (Var0.Scale == -Var1.Scale &&
1329 Var0.Val.hasSameExtensionsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1330 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1331 DT))
1332 MinAbsVarIndex = Var0.Scale.abs();
1333 }
1334
1335 if (MinAbsVarIndex) {
1336 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1337 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1338 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1339 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1340 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1341 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1342 return AliasResult::NoAlias;
1343 }
1344
1345 if (VarIndexRange) {
1346 ConstantRange OffsetRange =
1347 VarIndexRange->add(ConstantRange(DecompGEP1.Offset));
1348
1349 // We know that Offset >= MinOffset.
1350 // (MinOffset >= V2Size) => (Offset >= V2Size) => NoAlias.
1351 if (OffsetRange.getSignedMin().sge(V2Size.getValue()))
1352 return AliasResult::NoAlias;
1353
1354 // We know that Offset <= MaxOffset.
1355 // (MaxOffset <= -V1Size) => (Offset <= -V1Size) => NoAlias.
1356 if (OffsetRange.getSignedMax().sle(-V1Size.getValue()))
1357 return AliasResult::NoAlias;
1358 }
1359 }
1360
1361 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1362 return AliasResult::NoAlias;
1363 }
1364
1365 // Statically, we can see that the base objects are the same, but the
1366 // pointers have dynamic offsets which we can't resolve. And none of our
1367 // little tricks above worked.
1368 return AliasResult::MayAlias;
1369 }
1370
MergeAliasResults(AliasResult A,AliasResult B)1371 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1372 // If the results agree, take it.
1373 if (A == B)
1374 return A;
1375 // A mix of PartialAlias and MustAlias is PartialAlias.
1376 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1377 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1378 return AliasResult::PartialAlias;
1379 // Otherwise, we don't know anything.
1380 return AliasResult::MayAlias;
1381 }
1382
1383 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1384 /// against another.
1385 AliasResult
aliasSelect(const SelectInst * SI,LocationSize SISize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1386 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1387 const Value *V2, LocationSize V2Size,
1388 AAQueryInfo &AAQI) {
1389 // If the values are Selects with the same condition, we can do a more precise
1390 // check: just check for aliases between the values on corresponding arms.
1391 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1392 if (SI->getCondition() == SI2->getCondition()) {
1393 AliasResult Alias = getBestAAResults().alias(
1394 MemoryLocation(SI->getTrueValue(), SISize),
1395 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1396 if (Alias == AliasResult::MayAlias)
1397 return AliasResult::MayAlias;
1398 AliasResult ThisAlias = getBestAAResults().alias(
1399 MemoryLocation(SI->getFalseValue(), SISize),
1400 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1401 return MergeAliasResults(ThisAlias, Alias);
1402 }
1403
1404 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1405 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1406 AliasResult Alias = getBestAAResults().alias(
1407 MemoryLocation(V2, V2Size),
1408 MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1409 if (Alias == AliasResult::MayAlias)
1410 return AliasResult::MayAlias;
1411
1412 AliasResult ThisAlias = getBestAAResults().alias(
1413 MemoryLocation(V2, V2Size),
1414 MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1415 return MergeAliasResults(ThisAlias, Alias);
1416 }
1417
1418 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1419 /// another.
aliasPHI(const PHINode * PN,LocationSize PNSize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1420 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1421 const Value *V2, LocationSize V2Size,
1422 AAQueryInfo &AAQI) {
1423 if (!PN->getNumIncomingValues())
1424 return AliasResult::NoAlias;
1425 // If the values are PHIs in the same block, we can do a more precise
1426 // as well as efficient check: just check for aliases between the values
1427 // on corresponding edges.
1428 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1429 if (PN2->getParent() == PN->getParent()) {
1430 Optional<AliasResult> Alias;
1431 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1432 AliasResult ThisAlias = getBestAAResults().alias(
1433 MemoryLocation(PN->getIncomingValue(i), PNSize),
1434 MemoryLocation(
1435 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1436 AAQI);
1437 if (Alias)
1438 *Alias = MergeAliasResults(*Alias, ThisAlias);
1439 else
1440 Alias = ThisAlias;
1441 if (*Alias == AliasResult::MayAlias)
1442 break;
1443 }
1444 return *Alias;
1445 }
1446
1447 SmallVector<Value *, 4> V1Srcs;
1448 // If a phi operand recurses back to the phi, we can still determine NoAlias
1449 // if we don't alias the underlying objects of the other phi operands, as we
1450 // know that the recursive phi needs to be based on them in some way.
1451 bool isRecursive = false;
1452 auto CheckForRecPhi = [&](Value *PV) {
1453 if (!EnableRecPhiAnalysis)
1454 return false;
1455 if (getUnderlyingObject(PV) == PN) {
1456 isRecursive = true;
1457 return true;
1458 }
1459 return false;
1460 };
1461
1462 if (PV) {
1463 // If we have PhiValues then use it to get the underlying phi values.
1464 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1465 // If we have more phi values than the search depth then return MayAlias
1466 // conservatively to avoid compile time explosion. The worst possible case
1467 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1468 // where 'm' and 'n' are the number of PHI sources.
1469 if (PhiValueSet.size() > MaxLookupSearchDepth)
1470 return AliasResult::MayAlias;
1471 // Add the values to V1Srcs
1472 for (Value *PV1 : PhiValueSet) {
1473 if (CheckForRecPhi(PV1))
1474 continue;
1475 V1Srcs.push_back(PV1);
1476 }
1477 } else {
1478 // If we don't have PhiInfo then just look at the operands of the phi itself
1479 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1480 SmallPtrSet<Value *, 4> UniqueSrc;
1481 Value *OnePhi = nullptr;
1482 for (Value *PV1 : PN->incoming_values()) {
1483 if (isa<PHINode>(PV1)) {
1484 if (OnePhi && OnePhi != PV1) {
1485 // To control potential compile time explosion, we choose to be
1486 // conserviate when we have more than one Phi input. It is important
1487 // that we handle the single phi case as that lets us handle LCSSA
1488 // phi nodes and (combined with the recursive phi handling) simple
1489 // pointer induction variable patterns.
1490 return AliasResult::MayAlias;
1491 }
1492 OnePhi = PV1;
1493 }
1494
1495 if (CheckForRecPhi(PV1))
1496 continue;
1497
1498 if (UniqueSrc.insert(PV1).second)
1499 V1Srcs.push_back(PV1);
1500 }
1501
1502 if (OnePhi && UniqueSrc.size() > 1)
1503 // Out of an abundance of caution, allow only the trivial lcssa and
1504 // recursive phi cases.
1505 return AliasResult::MayAlias;
1506 }
1507
1508 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1509 // value. This should only be possible in blocks unreachable from the entry
1510 // block, but return MayAlias just in case.
1511 if (V1Srcs.empty())
1512 return AliasResult::MayAlias;
1513
1514 // If this PHI node is recursive, indicate that the pointer may be moved
1515 // across iterations. We can only prove NoAlias if different underlying
1516 // objects are involved.
1517 if (isRecursive)
1518 PNSize = LocationSize::beforeOrAfterPointer();
1519
1520 // In the recursive alias queries below, we may compare values from two
1521 // different loop iterations. Keep track of visited phi blocks, which will
1522 // be used when determining value equivalence.
1523 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1524 auto _ = make_scope_exit([&]() {
1525 if (BlockInserted)
1526 VisitedPhiBBs.erase(PN->getParent());
1527 });
1528
1529 // If we inserted a block into VisitedPhiBBs, alias analysis results that
1530 // have been cached earlier may no longer be valid. Perform recursive queries
1531 // with a new AAQueryInfo.
1532 AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1533 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1534
1535 AliasResult Alias = getBestAAResults().alias(
1536 MemoryLocation(V2, V2Size),
1537 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1538
1539 // Early exit if the check of the first PHI source against V2 is MayAlias.
1540 // Other results are not possible.
1541 if (Alias == AliasResult::MayAlias)
1542 return AliasResult::MayAlias;
1543 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1544 // remain valid to all elements and needs to conservatively return MayAlias.
1545 if (isRecursive && Alias != AliasResult::NoAlias)
1546 return AliasResult::MayAlias;
1547
1548 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1549 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1550 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1551 Value *V = V1Srcs[i];
1552
1553 AliasResult ThisAlias = getBestAAResults().alias(
1554 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1555 Alias = MergeAliasResults(ThisAlias, Alias);
1556 if (Alias == AliasResult::MayAlias)
1557 break;
1558 }
1559
1560 return Alias;
1561 }
1562
1563 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1564 /// array references.
aliasCheck(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1565 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1566 const Value *V2, LocationSize V2Size,
1567 AAQueryInfo &AAQI) {
1568 // If either of the memory references is empty, it doesn't matter what the
1569 // pointer values are.
1570 if (V1Size.isZero() || V2Size.isZero())
1571 return AliasResult::NoAlias;
1572
1573 // Strip off any casts if they exist.
1574 V1 = V1->stripPointerCastsForAliasAnalysis();
1575 V2 = V2->stripPointerCastsForAliasAnalysis();
1576
1577 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1578 // value for undef that aliases nothing in the program.
1579 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1580 return AliasResult::NoAlias;
1581
1582 // Are we checking for alias of the same value?
1583 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1584 // different iterations. We must therefore make sure that this is not the
1585 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1586 // happen by looking at the visited phi nodes and making sure they cannot
1587 // reach the value.
1588 if (isValueEqualInPotentialCycles(V1, V2))
1589 return AliasResult::MustAlias;
1590
1591 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1592 return AliasResult::NoAlias; // Scalars cannot alias each other
1593
1594 // Figure out what objects these things are pointing to if we can.
1595 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1596 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1597
1598 // Null values in the default address space don't point to any object, so they
1599 // don't alias any other pointer.
1600 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1601 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1602 return AliasResult::NoAlias;
1603 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1604 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1605 return AliasResult::NoAlias;
1606
1607 if (O1 != O2) {
1608 // If V1/V2 point to two different objects, we know that we have no alias.
1609 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1610 return AliasResult::NoAlias;
1611
1612 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1613 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1614 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1615 return AliasResult::NoAlias;
1616
1617 // Function arguments can't alias with things that are known to be
1618 // unambigously identified at the function level.
1619 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1620 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1621 return AliasResult::NoAlias;
1622
1623 // If one pointer is the result of a call/invoke or load and the other is a
1624 // non-escaping local object within the same function, then we know the
1625 // object couldn't escape to a point where the call could return it.
1626 //
1627 // Note that if the pointers are in different functions, there are a
1628 // variety of complications. A call with a nocapture argument may still
1629 // temporary store the nocapture argument's value in a temporary memory
1630 // location if that memory location doesn't escape. Or it may pass a
1631 // nocapture value to other functions as long as they don't capture it.
1632 if (isEscapeSource(O1) &&
1633 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1634 return AliasResult::NoAlias;
1635 if (isEscapeSource(O2) &&
1636 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1637 return AliasResult::NoAlias;
1638 }
1639
1640 // If the size of one access is larger than the entire object on the other
1641 // side, then we know such behavior is undefined and can assume no alias.
1642 bool NullIsValidLocation = NullPointerIsDefined(&F);
1643 if ((isObjectSmallerThan(
1644 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1645 TLI, NullIsValidLocation)) ||
1646 (isObjectSmallerThan(
1647 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1648 TLI, NullIsValidLocation)))
1649 return AliasResult::NoAlias;
1650
1651 // If one the accesses may be before the accessed pointer, canonicalize this
1652 // by using unknown after-pointer sizes for both accesses. This is
1653 // equivalent, because regardless of which pointer is lower, one of them
1654 // will always came after the other, as long as the underlying objects aren't
1655 // disjoint. We do this so that the rest of BasicAA does not have to deal
1656 // with accesses before the base pointer, and to improve cache utilization by
1657 // merging equivalent states.
1658 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1659 V1Size = LocationSize::afterPointer();
1660 V2Size = LocationSize::afterPointer();
1661 }
1662
1663 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1664 // for recursive queries. For this reason, this limit is chosen to be large
1665 // enough to be very rarely hit, while still being small enough to avoid
1666 // stack overflows.
1667 if (AAQI.Depth >= 512)
1668 return AliasResult::MayAlias;
1669
1670 // Check the cache before climbing up use-def chains. This also terminates
1671 // otherwise infinitely recursive queries.
1672 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1673 const bool Swapped = V1 > V2;
1674 if (Swapped)
1675 std::swap(Locs.first, Locs.second);
1676 const auto &Pair = AAQI.AliasCache.try_emplace(
1677 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1678 if (!Pair.second) {
1679 auto &Entry = Pair.first->second;
1680 if (!Entry.isDefinitive()) {
1681 // Remember that we used an assumption.
1682 ++Entry.NumAssumptionUses;
1683 ++AAQI.NumAssumptionUses;
1684 }
1685 // Cache contains sorted {V1,V2} pairs but we should return original order.
1686 auto Result = Entry.Result;
1687 Result.swap(Swapped);
1688 return Result;
1689 }
1690
1691 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1692 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1693 AliasResult Result =
1694 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1695
1696 auto It = AAQI.AliasCache.find(Locs);
1697 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1698 auto &Entry = It->second;
1699
1700 // Check whether a NoAlias assumption has been used, but disproven.
1701 bool AssumptionDisproven =
1702 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1703 if (AssumptionDisproven)
1704 Result = AliasResult::MayAlias;
1705
1706 // This is a definitive result now, when considered as a root query.
1707 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1708 Entry.Result = Result;
1709 // Cache contains sorted {V1,V2} pairs.
1710 Entry.Result.swap(Swapped);
1711 Entry.NumAssumptionUses = -1;
1712
1713 // If the assumption has been disproven, remove any results that may have
1714 // been based on this assumption. Do this after the Entry updates above to
1715 // avoid iterator invalidation.
1716 if (AssumptionDisproven)
1717 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1718 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1719
1720 // The result may still be based on assumptions higher up in the chain.
1721 // Remember it, so it can be purged from the cache later.
1722 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1723 Result != AliasResult::MayAlias)
1724 AAQI.AssumptionBasedResults.push_back(Locs);
1725 return Result;
1726 }
1727
aliasCheckRecursive(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI,const Value * O1,const Value * O2)1728 AliasResult BasicAAResult::aliasCheckRecursive(
1729 const Value *V1, LocationSize V1Size,
1730 const Value *V2, LocationSize V2Size,
1731 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1732 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1733 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1734 if (Result != AliasResult::MayAlias)
1735 return Result;
1736 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1737 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1738 if (Result != AliasResult::MayAlias)
1739 return Result;
1740 }
1741
1742 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1743 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1744 if (Result != AliasResult::MayAlias)
1745 return Result;
1746 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1747 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1748 if (Result != AliasResult::MayAlias)
1749 return Result;
1750 }
1751
1752 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1753 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1754 if (Result != AliasResult::MayAlias)
1755 return Result;
1756 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1757 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1758 if (Result != AliasResult::MayAlias)
1759 return Result;
1760 }
1761
1762 // If both pointers are pointing into the same object and one of them
1763 // accesses the entire object, then the accesses must overlap in some way.
1764 if (O1 == O2) {
1765 bool NullIsValidLocation = NullPointerIsDefined(&F);
1766 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1767 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1768 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1769 return AliasResult::PartialAlias;
1770 }
1771
1772 return AliasResult::MayAlias;
1773 }
1774
1775 /// Check whether two Values can be considered equivalent.
1776 ///
1777 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1778 /// they can not be part of a cycle in the value graph by looking at all
1779 /// visited phi nodes an making sure that the phis cannot reach the value. We
1780 /// have to do this because we are looking through phi nodes (That is we say
1781 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2)1782 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1783 const Value *V2) {
1784 if (V != V2)
1785 return false;
1786
1787 const Instruction *Inst = dyn_cast<Instruction>(V);
1788 if (!Inst)
1789 return true;
1790
1791 if (VisitedPhiBBs.empty())
1792 return true;
1793
1794 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1795 return false;
1796
1797 // Make sure that the visited phis cannot reach the Value. This ensures that
1798 // the Values cannot come from different iterations of a potential cycle the
1799 // phi nodes could be involved in.
1800 for (auto *P : VisitedPhiBBs)
1801 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1802 return false;
1803
1804 return true;
1805 }
1806
1807 /// Computes the symbolic difference between two de-composed GEPs.
subtractDecomposedGEPs(DecomposedGEP & DestGEP,const DecomposedGEP & SrcGEP)1808 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1809 const DecomposedGEP &SrcGEP) {
1810 DestGEP.Offset -= SrcGEP.Offset;
1811 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1812 // Find V in Dest. This is N^2, but pointer indices almost never have more
1813 // than a few variable indexes.
1814 bool Found = false;
1815 for (auto I : enumerate(DestGEP.VarIndices)) {
1816 VariableGEPIndex &Dest = I.value();
1817 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1818 !Dest.Val.hasSameExtensionsAs(Src.Val))
1819 continue;
1820
1821 // If we found it, subtract off Scale V's from the entry in Dest. If it
1822 // goes to zero, remove the entry.
1823 if (Dest.Scale != Src.Scale) {
1824 Dest.Scale -= Src.Scale;
1825 Dest.IsNSW = false;
1826 } else {
1827 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1828 }
1829 Found = true;
1830 break;
1831 }
1832
1833 // If we didn't consume this entry, add it to the end of the Dest list.
1834 if (!Found) {
1835 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1836 DestGEP.VarIndices.push_back(Entry);
1837 }
1838 }
1839 }
1840
constantOffsetHeuristic(const DecomposedGEP & GEP,LocationSize MaybeV1Size,LocationSize MaybeV2Size,AssumptionCache * AC,DominatorTree * DT)1841 bool BasicAAResult::constantOffsetHeuristic(
1842 const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1843 LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1844 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1845 !MaybeV2Size.hasValue())
1846 return false;
1847
1848 const uint64_t V1Size = MaybeV1Size.getValue();
1849 const uint64_t V2Size = MaybeV2Size.getValue();
1850
1851 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1852
1853 if (!Var0.Val.hasSameExtensionsAs(Var1.Val) || Var0.Scale != -Var1.Scale ||
1854 Var0.Val.V->getType() != Var1.Val.V->getType())
1855 return false;
1856
1857 // We'll strip off the Extensions of Var0 and Var1 and do another round
1858 // of GetLinearExpression decomposition. In the example above, if Var0
1859 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1860
1861 LinearExpression E0 =
1862 GetLinearExpression(ExtendedValue(Var0.Val.V), DL, 0, AC, DT);
1863 LinearExpression E1 =
1864 GetLinearExpression(ExtendedValue(Var1.Val.V), DL, 0, AC, DT);
1865 if (E0.Scale != E1.Scale || !E0.Val.hasSameExtensionsAs(E1.Val) ||
1866 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1867 return false;
1868
1869 // We have a hit - Var0 and Var1 only differ by a constant offset!
1870
1871 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1872 // Var1 is possible to calculate, but we're just interested in the absolute
1873 // minimum difference between the two. The minimum distance may occur due to
1874 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1875 // the minimum distance between %i and %i + 5 is 3.
1876 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1877 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1878 APInt MinDiffBytes =
1879 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1880
1881 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1882 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1883 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1884 // V2Size can fit in the MinDiffBytes gap.
1885 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1886 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1887 }
1888
1889 //===----------------------------------------------------------------------===//
1890 // BasicAliasAnalysis Pass
1891 //===----------------------------------------------------------------------===//
1892
1893 AnalysisKey BasicAA::Key;
1894
run(Function & F,FunctionAnalysisManager & AM)1895 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1896 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1897 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1898 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1899 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1900 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1901 }
1902
BasicAAWrapperPass()1903 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1904 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1905 }
1906
1907 char BasicAAWrapperPass::ID = 0;
1908
anchor()1909 void BasicAAWrapperPass::anchor() {}
1910
1911 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1912 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1913 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1914 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1915 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1916 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1917 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1918 "Basic Alias Analysis (stateless AA impl)", true, true)
1919
1920 FunctionPass *llvm::createBasicAAWrapperPass() {
1921 return new BasicAAWrapperPass();
1922 }
1923
runOnFunction(Function & F)1924 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1925 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1926 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1927 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1928 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1929
1930 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1931 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1932 &DTWP.getDomTree(),
1933 PVWP ? &PVWP->getResult() : nullptr));
1934
1935 return false;
1936 }
1937
getAnalysisUsage(AnalysisUsage & AU) const1938 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1939 AU.setPreservesAll();
1940 AU.addRequiredTransitive<AssumptionCacheTracker>();
1941 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1942 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1943 AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1944 }
1945
createLegacyPMBasicAAResult(Pass & P,Function & F)1946 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1947 return BasicAAResult(
1948 F.getParent()->getDataLayout(), F,
1949 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1950 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1951 }
1952