1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62
63 #define DEBUG_TYPE "basicaa"
64
65 using namespace llvm;
66
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69 cl::init(true));
70
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76 cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78 cl::Hidden, cl::init(false));
79
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84 "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97
invalidate(Function & Fn,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99 FunctionAnalysisManager::Invalidator &Inv) {
100 // We don't care if this analysis itself is preserved, it has no state. But
101 // we need to check that the analyses it depends on have been. Note that we
102 // may be created without handles to some analyses and in that case don't
103 // depend on them.
104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107 return true;
108
109 // Otherwise this analysis result remains valid.
110 return false;
111 }
112
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116
117 /// Returns true if the pointer is one which would have been considered an
118 /// escape by isNonEscapingLocalObject.
isEscapeSource(const Value * V)119 static bool isEscapeSource(const Value *V) {
120 if (isa<CallBase>(V))
121 return true;
122
123 if (isa<Argument>(V))
124 return true;
125
126 // The load case works because isNonEscapingLocalObject considers all
127 // stores to be escapes (it passes true for the StoreCaptures argument
128 // to PointerMayBeCaptured).
129 if (isa<LoadInst>(V))
130 return true;
131
132 // The inttoptr case works because isNonEscapingLocalObject considers all
133 // means of converting or equating a pointer to an int (ptrtoint, ptr store
134 // which could be followed by an integer load, ptr<->int compare) as
135 // escaping, and objects located at well-known addresses via platform-specific
136 // means cannot be considered non-escaping local objects.
137 if (isa<IntToPtrInst>(V))
138 return true;
139
140 return false;
141 }
142
143 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc,bool RoundToAlign=false)144 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
145 const TargetLibraryInfo &TLI,
146 bool NullIsValidLoc,
147 bool RoundToAlign = false) {
148 uint64_t Size;
149 ObjectSizeOpts Opts;
150 Opts.RoundToAlign = RoundToAlign;
151 Opts.NullIsUnknownSize = NullIsValidLoc;
152 if (getObjectSize(V, Size, DL, &TLI, Opts))
153 return Size;
154 return MemoryLocation::UnknownSize;
155 }
156
157 /// Returns true if we can prove that the object specified by V is smaller than
158 /// Size.
isObjectSmallerThan(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)159 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
160 const DataLayout &DL,
161 const TargetLibraryInfo &TLI,
162 bool NullIsValidLoc) {
163 // Note that the meanings of the "object" are slightly different in the
164 // following contexts:
165 // c1: llvm::getObjectSize()
166 // c2: llvm.objectsize() intrinsic
167 // c3: isObjectSmallerThan()
168 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
169 // refers to the "entire object".
170 //
171 // Consider this example:
172 // char *p = (char*)malloc(100)
173 // char *q = p+80;
174 //
175 // In the context of c1 and c2, the "object" pointed by q refers to the
176 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
177 //
178 // However, in the context of c3, the "object" refers to the chunk of memory
179 // being allocated. So, the "object" has 100 bytes, and q points to the middle
180 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
181 // parameter, before the llvm::getObjectSize() is called to get the size of
182 // entire object, we should:
183 // - either rewind the pointer q to the base-address of the object in
184 // question (in this case rewind to p), or
185 // - just give up. It is up to caller to make sure the pointer is pointing
186 // to the base address the object.
187 //
188 // We go for 2nd option for simplicity.
189 if (!isIdentifiedObject(V))
190 return false;
191
192 // This function needs to use the aligned object size because we allow
193 // reads a bit past the end given sufficient alignment.
194 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
195 /*RoundToAlign*/ true);
196
197 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198 }
199
200 /// Return the minimal extent from \p V to the end of the underlying object,
201 /// assuming the result is used in an aliasing query. E.g., we do use the query
202 /// location size and the fact that null pointers cannot alias here.
getMinimalExtentFrom(const Value & V,const LocationSize & LocSize,const DataLayout & DL,bool NullIsValidLoc)203 static uint64_t getMinimalExtentFrom(const Value &V,
204 const LocationSize &LocSize,
205 const DataLayout &DL,
206 bool NullIsValidLoc) {
207 // If we have dereferenceability information we know a lower bound for the
208 // extent as accesses for a lower offset would be valid. We need to exclude
209 // the "or null" part if null is a valid pointer.
210 bool CanBeNull, CanBeFreed;
211 uint64_t DerefBytes =
212 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
213 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
214 DerefBytes = CanBeFreed ? 0 : DerefBytes;
215 // If queried with a precise location size, we assume that location size to be
216 // accessed, thus valid.
217 if (LocSize.isPrecise())
218 DerefBytes = std::max(DerefBytes, LocSize.getValue());
219 return DerefBytes;
220 }
221
222 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)223 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
224 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
225 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
226 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
227 }
228
229 //===----------------------------------------------------------------------===//
230 // GetElementPtr Instruction Decomposition and Analysis
231 //===----------------------------------------------------------------------===//
232
233 namespace {
234 /// Represents zext(sext(V)).
235 struct ExtendedValue {
236 const Value *V;
237 unsigned ZExtBits;
238 unsigned SExtBits;
239
ExtendedValue__anon800a57740111::ExtendedValue240 explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
241 unsigned SExtBits = 0)
242 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
243
getBitWidth__anon800a57740111::ExtendedValue244 unsigned getBitWidth() const {
245 return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
246 }
247
withValue__anon800a57740111::ExtendedValue248 ExtendedValue withValue(const Value *NewV) const {
249 return ExtendedValue(NewV, ZExtBits, SExtBits);
250 }
251
withZExtOfValue__anon800a57740111::ExtendedValue252 ExtendedValue withZExtOfValue(const Value *NewV) const {
253 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
254 NewV->getType()->getPrimitiveSizeInBits();
255 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
256 return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
257 }
258
withSExtOfValue__anon800a57740111::ExtendedValue259 ExtendedValue withSExtOfValue(const Value *NewV) const {
260 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
261 NewV->getType()->getPrimitiveSizeInBits();
262 // zext(sext(sext(NewV)))
263 return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
264 }
265
evaluateWith__anon800a57740111::ExtendedValue266 APInt evaluateWith(APInt N) const {
267 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
268 "Incompatible bit width");
269 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
270 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
271 return N;
272 }
273
canDistributeOver__anon800a57740111::ExtendedValue274 bool canDistributeOver(bool NUW, bool NSW) const {
275 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
276 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
277 return (!ZExtBits || NUW) && (!SExtBits || NSW);
278 }
279 };
280
281 /// Represents zext(sext(V)) * Scale + Offset.
282 struct LinearExpression {
283 ExtendedValue Val;
284 APInt Scale;
285 APInt Offset;
286
LinearExpression__anon800a57740111::LinearExpression287 LinearExpression(const ExtendedValue &Val, const APInt &Scale,
288 const APInt &Offset)
289 : Val(Val), Scale(Scale), Offset(Offset) {}
290
LinearExpression__anon800a57740111::LinearExpression291 LinearExpression(const ExtendedValue &Val) : Val(Val) {
292 unsigned BitWidth = Val.getBitWidth();
293 Scale = APInt(BitWidth, 1);
294 Offset = APInt(BitWidth, 0);
295 }
296 };
297 }
298
299 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
300 /// B are constant integers.
GetLinearExpression(const ExtendedValue & Val,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT)301 static LinearExpression GetLinearExpression(
302 const ExtendedValue &Val, const DataLayout &DL, unsigned Depth,
303 AssumptionCache *AC, DominatorTree *DT) {
304 // Limit our recursion depth.
305 if (Depth == 6)
306 return Val;
307
308 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
309 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
310 Val.evaluateWith(Const->getValue()));
311
312 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
313 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
314 APInt RHS = Val.evaluateWith(RHSC->getValue());
315 // The only non-OBO case we deal with is or, and only limited to the
316 // case where it is both nuw and nsw.
317 bool NUW = true, NSW = true;
318 if (isa<OverflowingBinaryOperator>(BOp)) {
319 NUW &= BOp->hasNoUnsignedWrap();
320 NSW &= BOp->hasNoSignedWrap();
321 }
322 if (!Val.canDistributeOver(NUW, NSW))
323 return Val;
324
325 switch (BOp->getOpcode()) {
326 default:
327 // We don't understand this instruction, so we can't decompose it any
328 // further.
329 return Val;
330 case Instruction::Or:
331 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
332 // analyze it.
333 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
334 BOp, DT))
335 return Val;
336
337 LLVM_FALLTHROUGH;
338 case Instruction::Add: {
339 LinearExpression E = GetLinearExpression(
340 Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
341 E.Offset += RHS;
342 return E;
343 }
344 case Instruction::Sub: {
345 LinearExpression E = GetLinearExpression(
346 Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
347 E.Offset -= RHS;
348 return E;
349 }
350 case Instruction::Mul: {
351 LinearExpression E = GetLinearExpression(
352 Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
353 E.Offset *= RHS;
354 E.Scale *= RHS;
355 return E;
356 }
357 case Instruction::Shl:
358 // We're trying to linearize an expression of the kind:
359 // shl i8 -128, 36
360 // where the shift count exceeds the bitwidth of the type.
361 // We can't decompose this further (the expression would return
362 // a poison value).
363 if (RHS.getLimitedValue() > Val.getBitWidth())
364 return Val;
365
366 LinearExpression E = GetLinearExpression(
367 Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
368 E.Offset <<= RHS.getLimitedValue();
369 E.Scale <<= RHS.getLimitedValue();
370 return E;
371 }
372 }
373 }
374
375 if (isa<ZExtInst>(Val.V))
376 return GetLinearExpression(
377 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
378 DL, Depth + 1, AC, DT);
379
380 if (isa<SExtInst>(Val.V))
381 return GetLinearExpression(
382 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
383 DL, Depth + 1, AC, DT);
384
385 return Val;
386 }
387
388 /// To ensure a pointer offset fits in an integer of size PointerSize
389 /// (in bits) when that size is smaller than the maximum pointer size. This is
390 /// an issue, for example, in particular for 32b pointers with negative indices
391 /// that rely on two's complement wrap-arounds for precise alias information
392 /// where the maximum pointer size is 64b.
adjustToPointerSize(const APInt & Offset,unsigned PointerSize)393 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
394 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
395 unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
396 return (Offset << ShiftBits).ashr(ShiftBits);
397 }
398
getMaxPointerSize(const DataLayout & DL)399 static unsigned getMaxPointerSize(const DataLayout &DL) {
400 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
401 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
402 if (DoubleCalcBits) MaxPointerSize *= 2;
403
404 return MaxPointerSize;
405 }
406
407 /// If V is a symbolic pointer expression, decompose it into a base pointer
408 /// with a constant offset and a number of scaled symbolic offsets.
409 ///
410 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
411 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
412 /// specified amount, but which may have other unrepresented high bits. As
413 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
414 ///
415 /// This function is capable of analyzing everything that getUnderlyingObject
416 /// can look through. To be able to do that getUnderlyingObject and
417 /// DecomposeGEPExpression must use the same search depth
418 /// (MaxLookupSearchDepth).
419 BasicAAResult::DecomposedGEP
DecomposeGEPExpression(const Value * V,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)420 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
421 AssumptionCache *AC, DominatorTree *DT) {
422 // Limit recursion depth to limit compile time in crazy cases.
423 unsigned MaxLookup = MaxLookupSearchDepth;
424 SearchTimes++;
425 const Instruction *CxtI = dyn_cast<Instruction>(V);
426
427 unsigned MaxPointerSize = getMaxPointerSize(DL);
428 DecomposedGEP Decomposed;
429 Decomposed.Offset = APInt(MaxPointerSize, 0);
430 Decomposed.HasCompileTimeConstantScale = true;
431 do {
432 // See if this is a bitcast or GEP.
433 const Operator *Op = dyn_cast<Operator>(V);
434 if (!Op) {
435 // The only non-operator case we can handle are GlobalAliases.
436 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
437 if (!GA->isInterposable()) {
438 V = GA->getAliasee();
439 continue;
440 }
441 }
442 Decomposed.Base = V;
443 return Decomposed;
444 }
445
446 if (Op->getOpcode() == Instruction::BitCast ||
447 Op->getOpcode() == Instruction::AddrSpaceCast) {
448 V = Op->getOperand(0);
449 continue;
450 }
451
452 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
453 if (!GEPOp) {
454 if (const auto *PHI = dyn_cast<PHINode>(V)) {
455 // Look through single-arg phi nodes created by LCSSA.
456 if (PHI->getNumIncomingValues() == 1) {
457 V = PHI->getIncomingValue(0);
458 continue;
459 }
460 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
461 // CaptureTracking can know about special capturing properties of some
462 // intrinsics like launder.invariant.group, that can't be expressed with
463 // the attributes, but have properties like returning aliasing pointer.
464 // Because some analysis may assume that nocaptured pointer is not
465 // returned from some special intrinsic (because function would have to
466 // be marked with returns attribute), it is crucial to use this function
467 // because it should be in sync with CaptureTracking. Not using it may
468 // cause weird miscompilations where 2 aliasing pointers are assumed to
469 // noalias.
470 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
471 V = RP;
472 continue;
473 }
474 }
475
476 Decomposed.Base = V;
477 return Decomposed;
478 }
479
480 // Track whether we've seen at least one in bounds gep, and if so, whether
481 // all geps parsed were in bounds.
482 if (Decomposed.InBounds == None)
483 Decomposed.InBounds = GEPOp->isInBounds();
484 else if (!GEPOp->isInBounds())
485 Decomposed.InBounds = false;
486
487 // Don't attempt to analyze GEPs over unsized objects.
488 if (!GEPOp->getSourceElementType()->isSized()) {
489 Decomposed.Base = V;
490 return Decomposed;
491 }
492
493 // Don't attempt to analyze GEPs if index scale is not a compile-time
494 // constant.
495 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
496 Decomposed.Base = V;
497 Decomposed.HasCompileTimeConstantScale = false;
498 return Decomposed;
499 }
500
501 unsigned AS = GEPOp->getPointerAddressSpace();
502 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
503 gep_type_iterator GTI = gep_type_begin(GEPOp);
504 unsigned PointerSize = DL.getPointerSizeInBits(AS);
505 // Assume all GEP operands are constants until proven otherwise.
506 bool GepHasConstantOffset = true;
507 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
508 I != E; ++I, ++GTI) {
509 const Value *Index = *I;
510 // Compute the (potentially symbolic) offset in bytes for this index.
511 if (StructType *STy = GTI.getStructTypeOrNull()) {
512 // For a struct, add the member offset.
513 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
514 if (FieldNo == 0)
515 continue;
516
517 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
518 continue;
519 }
520
521 // For an array/pointer, add the element offset, explicitly scaled.
522 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
523 if (CIdx->isZero())
524 continue;
525 Decomposed.Offset +=
526 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
527 CIdx->getValue().sextOrTrunc(MaxPointerSize);
528 continue;
529 }
530
531 GepHasConstantOffset = false;
532
533 APInt Scale(MaxPointerSize,
534 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
535 // If the integer type is smaller than the pointer size, it is implicitly
536 // sign extended to pointer size.
537 unsigned Width = Index->getType()->getIntegerBitWidth();
538 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
539 LinearExpression LE = GetLinearExpression(
540 ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
541
542 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
543 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
544
545 // It can be the case that, even through C1*V+C2 does not overflow for
546 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
547 // decompose the expression in this way.
548 //
549 // FIXME: C1*Scale and the other operations in the decomposed
550 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
551 // possibility.
552 bool Overflow;
553 APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
554 .smul_ov(Scale, Overflow);
555 if (Overflow) {
556 LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
557 } else {
558 Decomposed.Offset += ScaledOffset;
559 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
560 }
561
562 // If we already had an occurrence of this index variable, merge this
563 // scale into it. For example, we want to handle:
564 // A[x][x] -> x*16 + x*4 -> x*20
565 // This also ensures that 'x' only appears in the index list once.
566 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
567 if (Decomposed.VarIndices[i].V == LE.Val.V &&
568 Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
569 Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
570 Scale += Decomposed.VarIndices[i].Scale;
571 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
572 break;
573 }
574 }
575
576 // Make sure that we have a scale that makes sense for this target's
577 // pointer size.
578 Scale = adjustToPointerSize(Scale, PointerSize);
579
580 if (!!Scale) {
581 VariableGEPIndex Entry = {LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits,
582 Scale, CxtI};
583 Decomposed.VarIndices.push_back(Entry);
584 }
585 }
586
587 // Take care of wrap-arounds
588 if (GepHasConstantOffset)
589 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
590
591 // Analyze the base pointer next.
592 V = GEPOp->getOperand(0);
593 } while (--MaxLookup);
594
595 // If the chain of expressions is too deep, just return early.
596 Decomposed.Base = V;
597 SearchLimitReached++;
598 return Decomposed;
599 }
600
601 /// Returns whether the given pointer value points to memory that is local to
602 /// the function, with global constants being considered local to all
603 /// functions.
pointsToConstantMemory(const MemoryLocation & Loc,AAQueryInfo & AAQI,bool OrLocal)604 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
605 AAQueryInfo &AAQI, bool OrLocal) {
606 assert(Visited.empty() && "Visited must be cleared after use!");
607
608 unsigned MaxLookup = 8;
609 SmallVector<const Value *, 16> Worklist;
610 Worklist.push_back(Loc.Ptr);
611 do {
612 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
613 if (!Visited.insert(V).second) {
614 Visited.clear();
615 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
616 }
617
618 // An alloca instruction defines local memory.
619 if (OrLocal && isa<AllocaInst>(V))
620 continue;
621
622 // A global constant counts as local memory for our purposes.
623 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
624 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
625 // global to be marked constant in some modules and non-constant in
626 // others. GV may even be a declaration, not a definition.
627 if (!GV->isConstant()) {
628 Visited.clear();
629 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
630 }
631 continue;
632 }
633
634 // If both select values point to local memory, then so does the select.
635 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
636 Worklist.push_back(SI->getTrueValue());
637 Worklist.push_back(SI->getFalseValue());
638 continue;
639 }
640
641 // If all values incoming to a phi node point to local memory, then so does
642 // the phi.
643 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
644 // Don't bother inspecting phi nodes with many operands.
645 if (PN->getNumIncomingValues() > MaxLookup) {
646 Visited.clear();
647 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
648 }
649 append_range(Worklist, PN->incoming_values());
650 continue;
651 }
652
653 // Otherwise be conservative.
654 Visited.clear();
655 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
656 } while (!Worklist.empty() && --MaxLookup);
657
658 Visited.clear();
659 return Worklist.empty();
660 }
661
isIntrinsicCall(const CallBase * Call,Intrinsic::ID IID)662 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
663 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
664 return II && II->getIntrinsicID() == IID;
665 }
666
667 /// Returns the behavior when calling the given call site.
getModRefBehavior(const CallBase * Call)668 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
669 if (Call->doesNotAccessMemory())
670 // Can't do better than this.
671 return FMRB_DoesNotAccessMemory;
672
673 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
674
675 // If the callsite knows it only reads memory, don't return worse
676 // than that.
677 if (Call->onlyReadsMemory())
678 Min = FMRB_OnlyReadsMemory;
679 else if (Call->doesNotReadMemory())
680 Min = FMRB_OnlyWritesMemory;
681
682 if (Call->onlyAccessesArgMemory())
683 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
684 else if (Call->onlyAccessesInaccessibleMemory())
685 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
686 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
687 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
688
689 // If the call has operand bundles then aliasing attributes from the function
690 // it calls do not directly apply to the call. This can be made more precise
691 // in the future.
692 if (!Call->hasOperandBundles())
693 if (const Function *F = Call->getCalledFunction())
694 Min =
695 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
696
697 return Min;
698 }
699
700 /// Returns the behavior when calling the given function. For use when the call
701 /// site is not known.
getModRefBehavior(const Function * F)702 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
703 // If the function declares it doesn't access memory, we can't do better.
704 if (F->doesNotAccessMemory())
705 return FMRB_DoesNotAccessMemory;
706
707 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
708
709 // If the function declares it only reads memory, go with that.
710 if (F->onlyReadsMemory())
711 Min = FMRB_OnlyReadsMemory;
712 else if (F->doesNotReadMemory())
713 Min = FMRB_OnlyWritesMemory;
714
715 if (F->onlyAccessesArgMemory())
716 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
717 else if (F->onlyAccessesInaccessibleMemory())
718 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
719 else if (F->onlyAccessesInaccessibleMemOrArgMem())
720 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
721
722 return Min;
723 }
724
725 /// Returns true if this is a writeonly (i.e Mod only) parameter.
isWriteOnlyParam(const CallBase * Call,unsigned ArgIdx,const TargetLibraryInfo & TLI)726 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
727 const TargetLibraryInfo &TLI) {
728 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
729 return true;
730
731 // We can bound the aliasing properties of memset_pattern16 just as we can
732 // for memcpy/memset. This is particularly important because the
733 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
734 // whenever possible.
735 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
736 // attributes.
737 LibFunc F;
738 if (Call->getCalledFunction() &&
739 TLI.getLibFunc(*Call->getCalledFunction(), F) &&
740 F == LibFunc_memset_pattern16 && TLI.has(F))
741 if (ArgIdx == 0)
742 return true;
743
744 // TODO: memset_pattern4, memset_pattern8
745 // TODO: _chk variants
746 // TODO: strcmp, strcpy
747
748 return false;
749 }
750
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)751 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
752 unsigned ArgIdx) {
753 // Checking for known builtin intrinsics and target library functions.
754 if (isWriteOnlyParam(Call, ArgIdx, TLI))
755 return ModRefInfo::Mod;
756
757 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
758 return ModRefInfo::Ref;
759
760 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
761 return ModRefInfo::NoModRef;
762
763 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
764 }
765
766 #ifndef NDEBUG
getParent(const Value * V)767 static const Function *getParent(const Value *V) {
768 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
769 if (!inst->getParent())
770 return nullptr;
771 return inst->getParent()->getParent();
772 }
773
774 if (const Argument *arg = dyn_cast<Argument>(V))
775 return arg->getParent();
776
777 return nullptr;
778 }
779
notDifferentParent(const Value * O1,const Value * O2)780 static bool notDifferentParent(const Value *O1, const Value *O2) {
781
782 const Function *F1 = getParent(O1);
783 const Function *F2 = getParent(O2);
784
785 return !F1 || !F2 || F1 == F2;
786 }
787 #endif
788
alias(const MemoryLocation & LocA,const MemoryLocation & LocB,AAQueryInfo & AAQI)789 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
790 const MemoryLocation &LocB,
791 AAQueryInfo &AAQI) {
792 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
793 "BasicAliasAnalysis doesn't support interprocedural queries.");
794 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
795 }
796
797 /// Checks to see if the specified callsite can clobber the specified memory
798 /// object.
799 ///
800 /// Since we only look at local properties of this function, we really can't
801 /// say much about this query. We do, however, use simple "address taken"
802 /// analysis on local objects.
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc,AAQueryInfo & AAQI)803 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
804 const MemoryLocation &Loc,
805 AAQueryInfo &AAQI) {
806 assert(notDifferentParent(Call, Loc.Ptr) &&
807 "AliasAnalysis query involving multiple functions!");
808
809 const Value *Object = getUnderlyingObject(Loc.Ptr);
810
811 // Calls marked 'tail' cannot read or write allocas from the current frame
812 // because the current frame might be destroyed by the time they run. However,
813 // a tail call may use an alloca with byval. Calling with byval copies the
814 // contents of the alloca into argument registers or stack slots, so there is
815 // no lifetime issue.
816 if (isa<AllocaInst>(Object))
817 if (const CallInst *CI = dyn_cast<CallInst>(Call))
818 if (CI->isTailCall() &&
819 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
820 return ModRefInfo::NoModRef;
821
822 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
823 // modify them even though the alloca is not escaped.
824 if (auto *AI = dyn_cast<AllocaInst>(Object))
825 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
826 return ModRefInfo::Mod;
827
828 // If the pointer is to a locally allocated object that does not escape,
829 // then the call can not mod/ref the pointer unless the call takes the pointer
830 // as an argument, and itself doesn't capture it.
831 if (!isa<Constant>(Object) && Call != Object &&
832 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
833
834 // Optimistically assume that call doesn't touch Object and check this
835 // assumption in the following loop.
836 ModRefInfo Result = ModRefInfo::NoModRef;
837 bool IsMustAlias = true;
838
839 unsigned OperandNo = 0;
840 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
841 CI != CE; ++CI, ++OperandNo) {
842 // Only look at the no-capture or byval pointer arguments. If this
843 // pointer were passed to arguments that were neither of these, then it
844 // couldn't be no-capture.
845 if (!(*CI)->getType()->isPointerTy() ||
846 (!Call->doesNotCapture(OperandNo) &&
847 OperandNo < Call->getNumArgOperands() &&
848 !Call->isByValArgument(OperandNo)))
849 continue;
850
851 // Call doesn't access memory through this operand, so we don't care
852 // if it aliases with Object.
853 if (Call->doesNotAccessMemory(OperandNo))
854 continue;
855
856 // If this is a no-capture pointer argument, see if we can tell that it
857 // is impossible to alias the pointer we're checking.
858 AliasResult AR = getBestAAResults().alias(
859 MemoryLocation::getBeforeOrAfter(*CI),
860 MemoryLocation::getBeforeOrAfter(Object), AAQI);
861 if (AR != AliasResult::MustAlias)
862 IsMustAlias = false;
863 // Operand doesn't alias 'Object', continue looking for other aliases
864 if (AR == AliasResult::NoAlias)
865 continue;
866 // Operand aliases 'Object', but call doesn't modify it. Strengthen
867 // initial assumption and keep looking in case if there are more aliases.
868 if (Call->onlyReadsMemory(OperandNo)) {
869 Result = setRef(Result);
870 continue;
871 }
872 // Operand aliases 'Object' but call only writes into it.
873 if (Call->doesNotReadMemory(OperandNo)) {
874 Result = setMod(Result);
875 continue;
876 }
877 // This operand aliases 'Object' and call reads and writes into it.
878 // Setting ModRef will not yield an early return below, MustAlias is not
879 // used further.
880 Result = ModRefInfo::ModRef;
881 break;
882 }
883
884 // No operand aliases, reset Must bit. Add below if at least one aliases
885 // and all aliases found are MustAlias.
886 if (isNoModRef(Result))
887 IsMustAlias = false;
888
889 // Early return if we improved mod ref information
890 if (!isModAndRefSet(Result)) {
891 if (isNoModRef(Result))
892 return ModRefInfo::NoModRef;
893 return IsMustAlias ? setMust(Result) : clearMust(Result);
894 }
895 }
896
897 // If the call is malloc/calloc like, we can assume that it doesn't
898 // modify any IR visible value. This is only valid because we assume these
899 // routines do not read values visible in the IR. TODO: Consider special
900 // casing realloc and strdup routines which access only their arguments as
901 // well. Or alternatively, replace all of this with inaccessiblememonly once
902 // that's implemented fully.
903 if (isMallocOrCallocLikeFn(Call, &TLI)) {
904 // Be conservative if the accessed pointer may alias the allocation -
905 // fallback to the generic handling below.
906 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
907 AAQI) == AliasResult::NoAlias)
908 return ModRefInfo::NoModRef;
909 }
910
911 // The semantics of memcpy intrinsics either exactly overlap or do not
912 // overlap, i.e., source and destination of any given memcpy are either
913 // no-alias or must-alias.
914 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
915 AliasResult SrcAA =
916 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
917 AliasResult DestAA =
918 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
919 // It's also possible for Loc to alias both src and dest, or neither.
920 ModRefInfo rv = ModRefInfo::NoModRef;
921 if (SrcAA != AliasResult::NoAlias)
922 rv = setRef(rv);
923 if (DestAA != AliasResult::NoAlias)
924 rv = setMod(rv);
925 return rv;
926 }
927
928 // Guard intrinsics are marked as arbitrarily writing so that proper control
929 // dependencies are maintained but they never mods any particular memory
930 // location.
931 //
932 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
933 // heap state at the point the guard is issued needs to be consistent in case
934 // the guard invokes the "deopt" continuation.
935 if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
936 return ModRefInfo::Ref;
937 // The same applies to deoptimize which is essentially a guard(false).
938 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
939 return ModRefInfo::Ref;
940
941 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
942 // writing so that proper control dependencies are maintained but they never
943 // mod any particular memory location visible to the IR.
944 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
945 // intrinsic is now modeled as reading memory. This prevents hoisting the
946 // invariant.start intrinsic over stores. Consider:
947 // *ptr = 40;
948 // *ptr = 50;
949 // invariant_start(ptr)
950 // int val = *ptr;
951 // print(val);
952 //
953 // This cannot be transformed to:
954 //
955 // *ptr = 40;
956 // invariant_start(ptr)
957 // *ptr = 50;
958 // int val = *ptr;
959 // print(val);
960 //
961 // The transformation will cause the second store to be ignored (based on
962 // rules of invariant.start) and print 40, while the first program always
963 // prints 50.
964 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
965 return ModRefInfo::Ref;
966
967 // The AAResultBase base class has some smarts, lets use them.
968 return AAResultBase::getModRefInfo(Call, Loc, AAQI);
969 }
970
getModRefInfo(const CallBase * Call1,const CallBase * Call2,AAQueryInfo & AAQI)971 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
972 const CallBase *Call2,
973 AAQueryInfo &AAQI) {
974 // Guard intrinsics are marked as arbitrarily writing so that proper control
975 // dependencies are maintained but they never mods any particular memory
976 // location.
977 //
978 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
979 // heap state at the point the guard is issued needs to be consistent in case
980 // the guard invokes the "deopt" continuation.
981
982 // NB! This function is *not* commutative, so we special case two
983 // possibilities for guard intrinsics.
984
985 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
986 return isModSet(createModRefInfo(getModRefBehavior(Call2)))
987 ? ModRefInfo::Ref
988 : ModRefInfo::NoModRef;
989
990 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
991 return isModSet(createModRefInfo(getModRefBehavior(Call1)))
992 ? ModRefInfo::Mod
993 : ModRefInfo::NoModRef;
994
995 // The AAResultBase base class has some smarts, lets use them.
996 return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
997 }
998
999 /// Return true if we know V to the base address of the corresponding memory
1000 /// object. This implies that any address less than V must be out of bounds
1001 /// for the underlying object. Note that just being isIdentifiedObject() is
1002 /// not enough - For example, a negative offset from a noalias argument or call
1003 /// can be inbounds w.r.t the actual underlying object.
isBaseOfObject(const Value * V)1004 static bool isBaseOfObject(const Value *V) {
1005 // TODO: We can handle other cases here
1006 // 1) For GC languages, arguments to functions are often required to be
1007 // base pointers.
1008 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1009 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1010 }
1011
1012 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1013 /// another pointer.
1014 ///
1015 /// We know that V1 is a GEP, but we don't know anything about V2.
1016 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1017 /// V2.
aliasGEP(const GEPOperator * GEP1,LocationSize V1Size,const Value * V2,LocationSize V2Size,const Value * UnderlyingV1,const Value * UnderlyingV2,AAQueryInfo & AAQI)1018 AliasResult BasicAAResult::aliasGEP(
1019 const GEPOperator *GEP1, LocationSize V1Size,
1020 const Value *V2, LocationSize V2Size,
1021 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1022 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1023 // TODO: This limitation exists for compile-time reasons. Relax it if we
1024 // can avoid exponential pathological cases.
1025 if (!isa<GEPOperator>(V2))
1026 return AliasResult::MayAlias;
1027
1028 // If both accesses have unknown size, we can only check whether the base
1029 // objects don't alias.
1030 AliasResult BaseAlias = getBestAAResults().alias(
1031 MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1032 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1033 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1034 : AliasResult::MayAlias;
1035 }
1036
1037 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1038 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1039
1040 // Don't attempt to analyze the decomposed GEP if index scale is not a
1041 // compile-time constant.
1042 if (!DecompGEP1.HasCompileTimeConstantScale ||
1043 !DecompGEP2.HasCompileTimeConstantScale)
1044 return AliasResult::MayAlias;
1045
1046 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1047 "DecomposeGEPExpression returned a result different from "
1048 "getUnderlyingObject");
1049
1050 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1051 // symbolic difference.
1052 DecompGEP1.Offset -= DecompGEP2.Offset;
1053 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1054
1055 // If an inbounds GEP would have to start from an out of bounds address
1056 // for the two to alias, then we can assume noalias.
1057 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1058 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1059 isBaseOfObject(DecompGEP2.Base))
1060 return AliasResult::NoAlias;
1061
1062 if (isa<GEPOperator>(V2)) {
1063 // Symmetric case to above.
1064 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1065 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1066 isBaseOfObject(DecompGEP1.Base))
1067 return AliasResult::NoAlias;
1068 }
1069
1070 // For GEPs with identical offsets, we can preserve the size and AAInfo
1071 // when performing the alias check on the underlying objects.
1072 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1073 return getBestAAResults().alias(
1074 MemoryLocation(UnderlyingV1, V1Size),
1075 MemoryLocation(UnderlyingV2, V2Size), AAQI);
1076
1077 // Do the base pointers alias?
1078 AliasResult BaseAlias = getBestAAResults().alias(
1079 MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1080 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1081
1082 // If we get a No or May, then return it immediately, no amount of analysis
1083 // will improve this situation.
1084 if (BaseAlias != AliasResult::MustAlias) {
1085 assert(BaseAlias == AliasResult::NoAlias ||
1086 BaseAlias == AliasResult::MayAlias);
1087 return BaseAlias;
1088 }
1089
1090 // If there is a constant difference between the pointers, but the difference
1091 // is less than the size of the associated memory object, then we know
1092 // that the objects are partially overlapping. If the difference is
1093 // greater, we know they do not overlap.
1094 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1095 APInt &Off = DecompGEP1.Offset;
1096
1097 // Initialize for Off >= 0 (V2 <= GEP1) case.
1098 const Value *LeftPtr = V2;
1099 const Value *RightPtr = GEP1;
1100 LocationSize VLeftSize = V2Size;
1101 LocationSize VRightSize = V1Size;
1102 const bool Swapped = Off.isNegative();
1103
1104 if (Swapped) {
1105 // Swap if we have the situation where:
1106 // + +
1107 // | BaseOffset |
1108 // ---------------->|
1109 // |-->V1Size |-------> V2Size
1110 // GEP1 V2
1111 std::swap(LeftPtr, RightPtr);
1112 std::swap(VLeftSize, VRightSize);
1113 Off = -Off;
1114 }
1115
1116 if (VLeftSize.hasValue()) {
1117 const uint64_t LSize = VLeftSize.getValue();
1118 if (Off.ult(LSize)) {
1119 // Conservatively drop processing if a phi was visited and/or offset is
1120 // too big.
1121 AliasResult AR = AliasResult::PartialAlias;
1122 if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1123 (Off + VRightSize.getValue()).ule(LSize)) {
1124 // Memory referenced by right pointer is nested. Save the offset in
1125 // cache. Note that originally offset estimated as GEP1-V2, but
1126 // AliasResult contains the shift that represents GEP1+Offset=V2.
1127 AR.setOffset(-Off.getSExtValue());
1128 AR.swap(Swapped);
1129 }
1130 return AR;
1131 }
1132 return AliasResult::NoAlias;
1133 }
1134 }
1135
1136 if (!DecompGEP1.VarIndices.empty()) {
1137 APInt GCD;
1138 bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1139 bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1140 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1141 const APInt &Scale = DecompGEP1.VarIndices[i].Scale;
1142 if (i == 0)
1143 GCD = Scale.abs();
1144 else
1145 GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs());
1146
1147 if (AllNonNegative || AllNonPositive) {
1148 // If the Value could change between cycles, then any reasoning about
1149 // the Value this cycle may not hold in the next cycle. We'll just
1150 // give up if we can't determine conditions that hold for every cycle:
1151 const Value *V = DecompGEP1.VarIndices[i].V;
1152 const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1153
1154 KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1155 bool SignKnownZero = Known.isNonNegative();
1156 bool SignKnownOne = Known.isNegative();
1157
1158 // Zero-extension widens the variable, and so forces the sign
1159 // bit to zero.
1160 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1161 SignKnownZero |= IsZExt;
1162 SignKnownOne &= !IsZExt;
1163
1164 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1165 (SignKnownOne && Scale.isNonPositive());
1166 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1167 (SignKnownOne && Scale.isNonNegative());
1168 }
1169 }
1170
1171 // We now have accesses at two offsets from the same base:
1172 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1173 // 2. 0 with size V2Size
1174 // Using arithmetic modulo GCD, the accesses are at
1175 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1176 // into the range [V2Size..GCD), then we know they cannot overlap.
1177 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1178 if (ModOffset.isNegative())
1179 ModOffset += GCD; // We want mod, not rem.
1180 if (V1Size.hasValue() && V2Size.hasValue() &&
1181 ModOffset.uge(V2Size.getValue()) &&
1182 (GCD - ModOffset).uge(V1Size.getValue()))
1183 return AliasResult::NoAlias;
1184
1185 // If we know all the variables are non-negative, then the total offset is
1186 // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1187 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1188 // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1189 if (AllNonNegative && V2Size.hasValue() &&
1190 DecompGEP1.Offset.uge(V2Size.getValue()))
1191 return AliasResult::NoAlias;
1192 // Similarly, if the variables are non-positive, then the total offset is
1193 // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1194 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1195 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1196 if (AllNonPositive && V1Size.hasValue() &&
1197 (-DecompGEP1.Offset).uge(V1Size.getValue()))
1198 return AliasResult::NoAlias;
1199
1200 if (V1Size.hasValue() && V2Size.hasValue()) {
1201 // Try to determine whether abs(VarIndex) > 0.
1202 Optional<APInt> MinAbsVarIndex;
1203 if (DecompGEP1.VarIndices.size() == 1) {
1204 // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1205 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1206 if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1207 MinAbsVarIndex = Var.Scale.abs();
1208 } else if (DecompGEP1.VarIndices.size() == 2) {
1209 // VarIndex = Scale*V0 + (-Scale)*V1.
1210 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1211 // Check that VisitedPhiBBs is empty, to avoid reasoning about
1212 // inequality of values across loop iterations.
1213 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1214 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1215 if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1216 Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1217 isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1218 MinAbsVarIndex = Var0.Scale.abs();
1219 }
1220
1221 if (MinAbsVarIndex) {
1222 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1223 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1224 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1225 // Check that an access at OffsetLo or lower, and an access at OffsetHi
1226 // or higher both do not alias.
1227 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1228 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1229 return AliasResult::NoAlias;
1230 }
1231 }
1232
1233 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1234 DecompGEP1.Offset, &AC, DT))
1235 return AliasResult::NoAlias;
1236 }
1237
1238 // Statically, we can see that the base objects are the same, but the
1239 // pointers have dynamic offsets which we can't resolve. And none of our
1240 // little tricks above worked.
1241 return AliasResult::MayAlias;
1242 }
1243
MergeAliasResults(AliasResult A,AliasResult B)1244 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1245 // If the results agree, take it.
1246 if (A == B)
1247 return A;
1248 // A mix of PartialAlias and MustAlias is PartialAlias.
1249 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1250 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1251 return AliasResult::PartialAlias;
1252 // Otherwise, we don't know anything.
1253 return AliasResult::MayAlias;
1254 }
1255
1256 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1257 /// against another.
1258 AliasResult
aliasSelect(const SelectInst * SI,LocationSize SISize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1259 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1260 const Value *V2, LocationSize V2Size,
1261 AAQueryInfo &AAQI) {
1262 // If the values are Selects with the same condition, we can do a more precise
1263 // check: just check for aliases between the values on corresponding arms.
1264 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1265 if (SI->getCondition() == SI2->getCondition()) {
1266 AliasResult Alias = getBestAAResults().alias(
1267 MemoryLocation(SI->getTrueValue(), SISize),
1268 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1269 if (Alias == AliasResult::MayAlias)
1270 return AliasResult::MayAlias;
1271 AliasResult ThisAlias = getBestAAResults().alias(
1272 MemoryLocation(SI->getFalseValue(), SISize),
1273 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1274 return MergeAliasResults(ThisAlias, Alias);
1275 }
1276
1277 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1278 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1279 AliasResult Alias = getBestAAResults().alias(
1280 MemoryLocation(V2, V2Size),
1281 MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1282 if (Alias == AliasResult::MayAlias)
1283 return AliasResult::MayAlias;
1284
1285 AliasResult ThisAlias = getBestAAResults().alias(
1286 MemoryLocation(V2, V2Size),
1287 MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1288 return MergeAliasResults(ThisAlias, Alias);
1289 }
1290
1291 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1292 /// another.
aliasPHI(const PHINode * PN,LocationSize PNSize,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1293 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1294 const Value *V2, LocationSize V2Size,
1295 AAQueryInfo &AAQI) {
1296 // If the values are PHIs in the same block, we can do a more precise
1297 // as well as efficient check: just check for aliases between the values
1298 // on corresponding edges.
1299 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1300 if (PN2->getParent() == PN->getParent()) {
1301 Optional<AliasResult> Alias;
1302 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1303 AliasResult ThisAlias = getBestAAResults().alias(
1304 MemoryLocation(PN->getIncomingValue(i), PNSize),
1305 MemoryLocation(
1306 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1307 AAQI);
1308 if (Alias)
1309 *Alias = MergeAliasResults(*Alias, ThisAlias);
1310 else
1311 Alias = ThisAlias;
1312 if (*Alias == AliasResult::MayAlias)
1313 break;
1314 }
1315 return *Alias;
1316 }
1317
1318 SmallVector<Value *, 4> V1Srcs;
1319 // If a phi operand recurses back to the phi, we can still determine NoAlias
1320 // if we don't alias the underlying objects of the other phi operands, as we
1321 // know that the recursive phi needs to be based on them in some way.
1322 bool isRecursive = false;
1323 auto CheckForRecPhi = [&](Value *PV) {
1324 if (!EnableRecPhiAnalysis)
1325 return false;
1326 if (getUnderlyingObject(PV) == PN) {
1327 isRecursive = true;
1328 return true;
1329 }
1330 return false;
1331 };
1332
1333 if (PV) {
1334 // If we have PhiValues then use it to get the underlying phi values.
1335 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1336 // If we have more phi values than the search depth then return MayAlias
1337 // conservatively to avoid compile time explosion. The worst possible case
1338 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1339 // where 'm' and 'n' are the number of PHI sources.
1340 if (PhiValueSet.size() > MaxLookupSearchDepth)
1341 return AliasResult::MayAlias;
1342 // Add the values to V1Srcs
1343 for (Value *PV1 : PhiValueSet) {
1344 if (CheckForRecPhi(PV1))
1345 continue;
1346 V1Srcs.push_back(PV1);
1347 }
1348 } else {
1349 // If we don't have PhiInfo then just look at the operands of the phi itself
1350 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1351 SmallPtrSet<Value *, 4> UniqueSrc;
1352 Value *OnePhi = nullptr;
1353 for (Value *PV1 : PN->incoming_values()) {
1354 if (isa<PHINode>(PV1)) {
1355 if (OnePhi && OnePhi != PV1) {
1356 // To control potential compile time explosion, we choose to be
1357 // conserviate when we have more than one Phi input. It is important
1358 // that we handle the single phi case as that lets us handle LCSSA
1359 // phi nodes and (combined with the recursive phi handling) simple
1360 // pointer induction variable patterns.
1361 return AliasResult::MayAlias;
1362 }
1363 OnePhi = PV1;
1364 }
1365
1366 if (CheckForRecPhi(PV1))
1367 continue;
1368
1369 if (UniqueSrc.insert(PV1).second)
1370 V1Srcs.push_back(PV1);
1371 }
1372
1373 if (OnePhi && UniqueSrc.size() > 1)
1374 // Out of an abundance of caution, allow only the trivial lcssa and
1375 // recursive phi cases.
1376 return AliasResult::MayAlias;
1377 }
1378
1379 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1380 // value. This should only be possible in blocks unreachable from the entry
1381 // block, but return MayAlias just in case.
1382 if (V1Srcs.empty())
1383 return AliasResult::MayAlias;
1384
1385 // If this PHI node is recursive, indicate that the pointer may be moved
1386 // across iterations. We can only prove NoAlias if different underlying
1387 // objects are involved.
1388 if (isRecursive)
1389 PNSize = LocationSize::beforeOrAfterPointer();
1390
1391 // In the recursive alias queries below, we may compare values from two
1392 // different loop iterations. Keep track of visited phi blocks, which will
1393 // be used when determining value equivalence.
1394 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1395 auto _ = make_scope_exit([&]() {
1396 if (BlockInserted)
1397 VisitedPhiBBs.erase(PN->getParent());
1398 });
1399
1400 // If we inserted a block into VisitedPhiBBs, alias analysis results that
1401 // have been cached earlier may no longer be valid. Perform recursive queries
1402 // with a new AAQueryInfo.
1403 AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1404 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1405
1406 AliasResult Alias = getBestAAResults().alias(
1407 MemoryLocation(V2, V2Size),
1408 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1409
1410 // Early exit if the check of the first PHI source against V2 is MayAlias.
1411 // Other results are not possible.
1412 if (Alias == AliasResult::MayAlias)
1413 return AliasResult::MayAlias;
1414 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1415 // remain valid to all elements and needs to conservatively return MayAlias.
1416 if (isRecursive && Alias != AliasResult::NoAlias)
1417 return AliasResult::MayAlias;
1418
1419 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1420 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1421 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1422 Value *V = V1Srcs[i];
1423
1424 AliasResult ThisAlias = getBestAAResults().alias(
1425 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1426 Alias = MergeAliasResults(ThisAlias, Alias);
1427 if (Alias == AliasResult::MayAlias)
1428 break;
1429 }
1430
1431 return Alias;
1432 }
1433
1434 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1435 /// array references.
aliasCheck(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI)1436 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1437 const Value *V2, LocationSize V2Size,
1438 AAQueryInfo &AAQI) {
1439 // If either of the memory references is empty, it doesn't matter what the
1440 // pointer values are.
1441 if (V1Size.isZero() || V2Size.isZero())
1442 return AliasResult::NoAlias;
1443
1444 // Strip off any casts if they exist.
1445 V1 = V1->stripPointerCastsForAliasAnalysis();
1446 V2 = V2->stripPointerCastsForAliasAnalysis();
1447
1448 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1449 // value for undef that aliases nothing in the program.
1450 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1451 return AliasResult::NoAlias;
1452
1453 // Are we checking for alias of the same value?
1454 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1455 // different iterations. We must therefore make sure that this is not the
1456 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1457 // happen by looking at the visited phi nodes and making sure they cannot
1458 // reach the value.
1459 if (isValueEqualInPotentialCycles(V1, V2))
1460 return AliasResult::MustAlias;
1461
1462 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1463 return AliasResult::NoAlias; // Scalars cannot alias each other
1464
1465 // Figure out what objects these things are pointing to if we can.
1466 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1467 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1468
1469 // Null values in the default address space don't point to any object, so they
1470 // don't alias any other pointer.
1471 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1472 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1473 return AliasResult::NoAlias;
1474 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1475 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1476 return AliasResult::NoAlias;
1477
1478 if (O1 != O2) {
1479 // If V1/V2 point to two different objects, we know that we have no alias.
1480 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1481 return AliasResult::NoAlias;
1482
1483 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1484 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1485 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1486 return AliasResult::NoAlias;
1487
1488 // Function arguments can't alias with things that are known to be
1489 // unambigously identified at the function level.
1490 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1491 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1492 return AliasResult::NoAlias;
1493
1494 // If one pointer is the result of a call/invoke or load and the other is a
1495 // non-escaping local object within the same function, then we know the
1496 // object couldn't escape to a point where the call could return it.
1497 //
1498 // Note that if the pointers are in different functions, there are a
1499 // variety of complications. A call with a nocapture argument may still
1500 // temporary store the nocapture argument's value in a temporary memory
1501 // location if that memory location doesn't escape. Or it may pass a
1502 // nocapture value to other functions as long as they don't capture it.
1503 if (isEscapeSource(O1) &&
1504 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1505 return AliasResult::NoAlias;
1506 if (isEscapeSource(O2) &&
1507 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1508 return AliasResult::NoAlias;
1509 }
1510
1511 // If the size of one access is larger than the entire object on the other
1512 // side, then we know such behavior is undefined and can assume no alias.
1513 bool NullIsValidLocation = NullPointerIsDefined(&F);
1514 if ((isObjectSmallerThan(
1515 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1516 TLI, NullIsValidLocation)) ||
1517 (isObjectSmallerThan(
1518 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1519 TLI, NullIsValidLocation)))
1520 return AliasResult::NoAlias;
1521
1522 // If one the accesses may be before the accessed pointer, canonicalize this
1523 // by using unknown after-pointer sizes for both accesses. This is
1524 // equivalent, because regardless of which pointer is lower, one of them
1525 // will always came after the other, as long as the underlying objects aren't
1526 // disjoint. We do this so that the rest of BasicAA does not have to deal
1527 // with accesses before the base pointer, and to improve cache utilization by
1528 // merging equivalent states.
1529 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1530 V1Size = LocationSize::afterPointer();
1531 V2Size = LocationSize::afterPointer();
1532 }
1533
1534 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1535 // for recursive queries. For this reason, this limit is chosen to be large
1536 // enough to be very rarely hit, while still being small enough to avoid
1537 // stack overflows.
1538 if (AAQI.Depth >= 512)
1539 return AliasResult::MayAlias;
1540
1541 // Check the cache before climbing up use-def chains. This also terminates
1542 // otherwise infinitely recursive queries.
1543 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1544 const bool Swapped = V1 > V2;
1545 if (Swapped)
1546 std::swap(Locs.first, Locs.second);
1547 const auto &Pair = AAQI.AliasCache.try_emplace(
1548 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1549 if (!Pair.second) {
1550 auto &Entry = Pair.first->second;
1551 if (!Entry.isDefinitive()) {
1552 // Remember that we used an assumption.
1553 ++Entry.NumAssumptionUses;
1554 ++AAQI.NumAssumptionUses;
1555 }
1556 // Cache contains sorted {V1,V2} pairs but we should return original order.
1557 auto Result = Entry.Result;
1558 Result.swap(Swapped);
1559 return Result;
1560 }
1561
1562 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1563 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1564 AliasResult Result =
1565 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1566
1567 auto It = AAQI.AliasCache.find(Locs);
1568 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1569 auto &Entry = It->second;
1570
1571 // Check whether a NoAlias assumption has been used, but disproven.
1572 bool AssumptionDisproven =
1573 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1574 if (AssumptionDisproven)
1575 Result = AliasResult::MayAlias;
1576
1577 // This is a definitive result now, when considered as a root query.
1578 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1579 Entry.Result = Result;
1580 // Cache contains sorted {V1,V2} pairs.
1581 Entry.Result.swap(Swapped);
1582 Entry.NumAssumptionUses = -1;
1583
1584 // If the assumption has been disproven, remove any results that may have
1585 // been based on this assumption. Do this after the Entry updates above to
1586 // avoid iterator invalidation.
1587 if (AssumptionDisproven)
1588 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1589 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1590
1591 // The result may still be based on assumptions higher up in the chain.
1592 // Remember it, so it can be purged from the cache later.
1593 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1594 Result != AliasResult::MayAlias)
1595 AAQI.AssumptionBasedResults.push_back(Locs);
1596 return Result;
1597 }
1598
aliasCheckRecursive(const Value * V1,LocationSize V1Size,const Value * V2,LocationSize V2Size,AAQueryInfo & AAQI,const Value * O1,const Value * O2)1599 AliasResult BasicAAResult::aliasCheckRecursive(
1600 const Value *V1, LocationSize V1Size,
1601 const Value *V2, LocationSize V2Size,
1602 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1603 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1604 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1605 if (Result != AliasResult::MayAlias)
1606 return Result;
1607 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1608 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1609 if (Result != AliasResult::MayAlias)
1610 return Result;
1611 }
1612
1613 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1614 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1615 if (Result != AliasResult::MayAlias)
1616 return Result;
1617 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1618 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1619 if (Result != AliasResult::MayAlias)
1620 return Result;
1621 }
1622
1623 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1624 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1625 if (Result != AliasResult::MayAlias)
1626 return Result;
1627 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1628 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1629 if (Result != AliasResult::MayAlias)
1630 return Result;
1631 }
1632
1633 // If both pointers are pointing into the same object and one of them
1634 // accesses the entire object, then the accesses must overlap in some way.
1635 if (O1 == O2) {
1636 bool NullIsValidLocation = NullPointerIsDefined(&F);
1637 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1638 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1639 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1640 return AliasResult::PartialAlias;
1641 }
1642
1643 return AliasResult::MayAlias;
1644 }
1645
1646 /// Check whether two Values can be considered equivalent.
1647 ///
1648 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1649 /// they can not be part of a cycle in the value graph by looking at all
1650 /// visited phi nodes an making sure that the phis cannot reach the value. We
1651 /// have to do this because we are looking through phi nodes (That is we say
1652 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2)1653 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1654 const Value *V2) {
1655 if (V != V2)
1656 return false;
1657
1658 const Instruction *Inst = dyn_cast<Instruction>(V);
1659 if (!Inst)
1660 return true;
1661
1662 if (VisitedPhiBBs.empty())
1663 return true;
1664
1665 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1666 return false;
1667
1668 // Make sure that the visited phis cannot reach the Value. This ensures that
1669 // the Values cannot come from different iterations of a potential cycle the
1670 // phi nodes could be involved in.
1671 for (auto *P : VisitedPhiBBs)
1672 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1673 return false;
1674
1675 return true;
1676 }
1677
1678 /// Computes the symbolic difference between two de-composed GEPs.
1679 ///
1680 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1681 /// instructions GEP1 and GEP2 which have common base pointers.
GetIndexDifference(SmallVectorImpl<VariableGEPIndex> & Dest,const SmallVectorImpl<VariableGEPIndex> & Src)1682 void BasicAAResult::GetIndexDifference(
1683 SmallVectorImpl<VariableGEPIndex> &Dest,
1684 const SmallVectorImpl<VariableGEPIndex> &Src) {
1685 if (Src.empty())
1686 return;
1687
1688 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1689 const Value *V = Src[i].V;
1690 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1691 APInt Scale = Src[i].Scale;
1692
1693 // Find V in Dest. This is N^2, but pointer indices almost never have more
1694 // than a few variable indexes.
1695 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1696 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1697 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1698 continue;
1699
1700 // If we found it, subtract off Scale V's from the entry in Dest. If it
1701 // goes to zero, remove the entry.
1702 if (Dest[j].Scale != Scale)
1703 Dest[j].Scale -= Scale;
1704 else
1705 Dest.erase(Dest.begin() + j);
1706 Scale = 0;
1707 break;
1708 }
1709
1710 // If we didn't consume this entry, add it to the end of the Dest list.
1711 if (!!Scale) {
1712 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale, Src[i].CxtI};
1713 Dest.push_back(Entry);
1714 }
1715 }
1716 }
1717
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> & VarIndices,LocationSize MaybeV1Size,LocationSize MaybeV2Size,const APInt & BaseOffset,AssumptionCache * AC,DominatorTree * DT)1718 bool BasicAAResult::constantOffsetHeuristic(
1719 const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1720 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1721 AssumptionCache *AC, DominatorTree *DT) {
1722 if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1723 !MaybeV2Size.hasValue())
1724 return false;
1725
1726 const uint64_t V1Size = MaybeV1Size.getValue();
1727 const uint64_t V2Size = MaybeV2Size.getValue();
1728
1729 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1730
1731 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1732 Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1733 return false;
1734
1735 // We'll strip off the Extensions of Var0 and Var1 and do another round
1736 // of GetLinearExpression decomposition. In the example above, if Var0
1737 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1738
1739 LinearExpression E0 =
1740 GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1741 LinearExpression E1 =
1742 GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1743 if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1744 E0.Val.SExtBits != E1.Val.SExtBits ||
1745 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1746 return false;
1747
1748 // We have a hit - Var0 and Var1 only differ by a constant offset!
1749
1750 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1751 // Var1 is possible to calculate, but we're just interested in the absolute
1752 // minimum difference between the two. The minimum distance may occur due to
1753 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1754 // the minimum distance between %i and %i + 5 is 3.
1755 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1756 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1757 APInt MinDiffBytes =
1758 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1759
1760 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1761 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1762 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1763 // V2Size can fit in the MinDiffBytes gap.
1764 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1765 MinDiffBytes.uge(V2Size + BaseOffset.abs());
1766 }
1767
1768 //===----------------------------------------------------------------------===//
1769 // BasicAliasAnalysis Pass
1770 //===----------------------------------------------------------------------===//
1771
1772 AnalysisKey BasicAA::Key;
1773
run(Function & F,FunctionAnalysisManager & AM)1774 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1775 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1776 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1777 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1778 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1779 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1780 }
1781
BasicAAWrapperPass()1782 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1783 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1784 }
1785
1786 char BasicAAWrapperPass::ID = 0;
1787
anchor()1788 void BasicAAWrapperPass::anchor() {}
1789
1790 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1791 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1792 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1793 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1794 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1795 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1796 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1797 "Basic Alias Analysis (stateless AA impl)", true, true)
1798
1799 FunctionPass *llvm::createBasicAAWrapperPass() {
1800 return new BasicAAWrapperPass();
1801 }
1802
runOnFunction(Function & F)1803 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1804 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1805 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1806 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1807 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1808
1809 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1810 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1811 &DTWP.getDomTree(),
1812 PVWP ? &PVWP->getResult() : nullptr));
1813
1814 return false;
1815 }
1816
getAnalysisUsage(AnalysisUsage & AU) const1817 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1818 AU.setPreservesAll();
1819 AU.addRequiredTransitive<AssumptionCacheTracker>();
1820 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1821 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1822 AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1823 }
1824
createLegacyPMBasicAAResult(Pass & P,Function & F)1825 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1826 return BasicAAResult(
1827 F.getParent()->getDataLayout(), F,
1828 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1829 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1830 }
1831