1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/PhiValues.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Argument.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantRange.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <utility>
61 
62 #define DEBUG_TYPE "basicaa"
63 
64 using namespace llvm;
65 
66 /// Enable analysis of recursive PHI nodes.
67 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
68                                           cl::init(true));
69 
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74                               "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
76 
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
81 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
82 
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // getUnderlyingObject().
85 static const unsigned MaxLookupSearchDepth = 6;
86 
87 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
88                                FunctionAnalysisManager::Invalidator &Inv) {
89   // We don't care if this analysis itself is preserved, it has no state. But
90   // we need to check that the analyses it depends on have been. Note that we
91   // may be created without handles to some analyses and in that case don't
92   // depend on them.
93   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
94       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
95       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
96     return true;
97 
98   // Otherwise this analysis result remains valid.
99   return false;
100 }
101 
102 //===----------------------------------------------------------------------===//
103 // Useful predicates
104 //===----------------------------------------------------------------------===//
105 
106 /// Returns the size of the object specified by V or UnknownSize if unknown.
107 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
108                               const TargetLibraryInfo &TLI,
109                               bool NullIsValidLoc,
110                               bool RoundToAlign = false) {
111   uint64_t Size;
112   ObjectSizeOpts Opts;
113   Opts.RoundToAlign = RoundToAlign;
114   Opts.NullIsUnknownSize = NullIsValidLoc;
115   if (getObjectSize(V, Size, DL, &TLI, Opts))
116     return Size;
117   return MemoryLocation::UnknownSize;
118 }
119 
120 /// Returns true if we can prove that the object specified by V is smaller than
121 /// Size.
122 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
123                                 const DataLayout &DL,
124                                 const TargetLibraryInfo &TLI,
125                                 bool NullIsValidLoc) {
126   // Note that the meanings of the "object" are slightly different in the
127   // following contexts:
128   //    c1: llvm::getObjectSize()
129   //    c2: llvm.objectsize() intrinsic
130   //    c3: isObjectSmallerThan()
131   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
132   // refers to the "entire object".
133   //
134   //  Consider this example:
135   //     char *p = (char*)malloc(100)
136   //     char *q = p+80;
137   //
138   //  In the context of c1 and c2, the "object" pointed by q refers to the
139   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
140   //
141   //  However, in the context of c3, the "object" refers to the chunk of memory
142   // being allocated. So, the "object" has 100 bytes, and q points to the middle
143   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
144   // parameter, before the llvm::getObjectSize() is called to get the size of
145   // entire object, we should:
146   //    - either rewind the pointer q to the base-address of the object in
147   //      question (in this case rewind to p), or
148   //    - just give up. It is up to caller to make sure the pointer is pointing
149   //      to the base address the object.
150   //
151   // We go for 2nd option for simplicity.
152   if (!isIdentifiedObject(V))
153     return false;
154 
155   // This function needs to use the aligned object size because we allow
156   // reads a bit past the end given sufficient alignment.
157   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
158                                       /*RoundToAlign*/ true);
159 
160   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
161 }
162 
163 /// Return the minimal extent from \p V to the end of the underlying object,
164 /// assuming the result is used in an aliasing query. E.g., we do use the query
165 /// location size and the fact that null pointers cannot alias here.
166 static uint64_t getMinimalExtentFrom(const Value &V,
167                                      const LocationSize &LocSize,
168                                      const DataLayout &DL,
169                                      bool NullIsValidLoc) {
170   // If we have dereferenceability information we know a lower bound for the
171   // extent as accesses for a lower offset would be valid. We need to exclude
172   // the "or null" part if null is a valid pointer. We can ignore frees, as an
173   // access after free would be undefined behavior.
174   bool CanBeNull, CanBeFreed;
175   uint64_t DerefBytes =
176     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
177   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
178   // If queried with a precise location size, we assume that location size to be
179   // accessed, thus valid.
180   if (LocSize.isPrecise())
181     DerefBytes = std::max(DerefBytes, LocSize.getValue());
182   return DerefBytes;
183 }
184 
185 /// Returns true if we can prove that the object specified by V has size Size.
186 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
187                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
188   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
189   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
190 }
191 
192 //===----------------------------------------------------------------------===//
193 // CaptureInfo implementations
194 //===----------------------------------------------------------------------===//
195 
196 CaptureInfo::~CaptureInfo() = default;
197 
198 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
199                                                 const Instruction *I) {
200   return isNonEscapingLocalObject(Object, &IsCapturedCache);
201 }
202 
203 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
204                                                  const Instruction *I) {
205   if (!isIdentifiedFunctionLocal(Object))
206     return false;
207 
208   auto Iter = EarliestEscapes.insert({Object, nullptr});
209   if (Iter.second) {
210     Instruction *EarliestCapture = FindEarliestCapture(
211         Object, *const_cast<Function *>(I->getFunction()),
212         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT, EphValues);
213     if (EarliestCapture) {
214       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
215       Ins.first->second.push_back(Object);
216     }
217     Iter.first->second = EarliestCapture;
218   }
219 
220   // No capturing instruction.
221   if (!Iter.first->second)
222     return true;
223 
224   return I != Iter.first->second &&
225          !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
226 }
227 
228 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
229   auto Iter = Inst2Obj.find(I);
230   if (Iter != Inst2Obj.end()) {
231     for (const Value *Obj : Iter->second)
232       EarliestEscapes.erase(Obj);
233     Inst2Obj.erase(I);
234   }
235 }
236 
237 //===----------------------------------------------------------------------===//
238 // GetElementPtr Instruction Decomposition and Analysis
239 //===----------------------------------------------------------------------===//
240 
241 namespace {
242 /// Represents zext(sext(trunc(V))).
243 struct CastedValue {
244   const Value *V;
245   unsigned ZExtBits = 0;
246   unsigned SExtBits = 0;
247   unsigned TruncBits = 0;
248 
249   explicit CastedValue(const Value *V) : V(V) {}
250   explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
251                        unsigned TruncBits)
252       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
253 
254   unsigned getBitWidth() const {
255     return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
256            SExtBits;
257   }
258 
259   CastedValue withValue(const Value *NewV) const {
260     return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
261   }
262 
263   /// Replace V with zext(NewV)
264   CastedValue withZExtOfValue(const Value *NewV) const {
265     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
266                         NewV->getType()->getPrimitiveSizeInBits();
267     if (ExtendBy <= TruncBits)
268       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
269 
270     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
271     ExtendBy -= TruncBits;
272     return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
273   }
274 
275   /// Replace V with sext(NewV)
276   CastedValue withSExtOfValue(const Value *NewV) const {
277     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
278                         NewV->getType()->getPrimitiveSizeInBits();
279     if (ExtendBy <= TruncBits)
280       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
281 
282     // zext(sext(sext(NewV)))
283     ExtendBy -= TruncBits;
284     return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
285   }
286 
287   APInt evaluateWith(APInt N) const {
288     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
289            "Incompatible bit width");
290     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
291     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
292     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
293     return N;
294   }
295 
296   ConstantRange evaluateWith(ConstantRange N) const {
297     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
298            "Incompatible bit width");
299     if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
300     if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
301     if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
302     return N;
303   }
304 
305   bool canDistributeOver(bool NUW, bool NSW) const {
306     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
307     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
308     // trunc(x op y) == trunc(x) op trunc(y)
309     return (!ZExtBits || NUW) && (!SExtBits || NSW);
310   }
311 
312   bool hasSameCastsAs(const CastedValue &Other) const {
313     return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
314            TruncBits == Other.TruncBits;
315   }
316 };
317 
318 /// Represents zext(sext(trunc(V))) * Scale + Offset.
319 struct LinearExpression {
320   CastedValue Val;
321   APInt Scale;
322   APInt Offset;
323 
324   /// True if all operations in this expression are NSW.
325   bool IsNSW;
326 
327   LinearExpression(const CastedValue &Val, const APInt &Scale,
328                    const APInt &Offset, bool IsNSW)
329       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
330 
331   LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
332     unsigned BitWidth = Val.getBitWidth();
333     Scale = APInt(BitWidth, 1);
334     Offset = APInt(BitWidth, 0);
335   }
336 
337   LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
338     // The check for zero offset is necessary, because generally
339     // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
340     bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
341     return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
342   }
343 };
344 }
345 
346 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
347 /// B are constant integers.
348 static LinearExpression GetLinearExpression(
349     const CastedValue &Val,  const DataLayout &DL, unsigned Depth,
350     AssumptionCache *AC, DominatorTree *DT) {
351   // Limit our recursion depth.
352   if (Depth == 6)
353     return Val;
354 
355   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
356     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
357                             Val.evaluateWith(Const->getValue()), true);
358 
359   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
360     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
361       APInt RHS = Val.evaluateWith(RHSC->getValue());
362       // The only non-OBO case we deal with is or, and only limited to the
363       // case where it is both nuw and nsw.
364       bool NUW = true, NSW = true;
365       if (isa<OverflowingBinaryOperator>(BOp)) {
366         NUW &= BOp->hasNoUnsignedWrap();
367         NSW &= BOp->hasNoSignedWrap();
368       }
369       if (!Val.canDistributeOver(NUW, NSW))
370         return Val;
371 
372       // While we can distribute over trunc, we cannot preserve nowrap flags
373       // in that case.
374       if (Val.TruncBits)
375         NUW = NSW = false;
376 
377       LinearExpression E(Val);
378       switch (BOp->getOpcode()) {
379       default:
380         // We don't understand this instruction, so we can't decompose it any
381         // further.
382         return Val;
383       case Instruction::Or:
384         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
385         // analyze it.
386         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
387                                BOp, DT))
388           return Val;
389 
390         LLVM_FALLTHROUGH;
391       case Instruction::Add: {
392         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
393                                 Depth + 1, AC, DT);
394         E.Offset += RHS;
395         E.IsNSW &= NSW;
396         break;
397       }
398       case Instruction::Sub: {
399         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
400                                 Depth + 1, AC, DT);
401         E.Offset -= RHS;
402         E.IsNSW &= NSW;
403         break;
404       }
405       case Instruction::Mul:
406         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
407                                 Depth + 1, AC, DT)
408                 .mul(RHS, NSW);
409         break;
410       case Instruction::Shl:
411         // We're trying to linearize an expression of the kind:
412         //   shl i8 -128, 36
413         // where the shift count exceeds the bitwidth of the type.
414         // We can't decompose this further (the expression would return
415         // a poison value).
416         if (RHS.getLimitedValue() > Val.getBitWidth())
417           return Val;
418 
419         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
420                                 Depth + 1, AC, DT);
421         E.Offset <<= RHS.getLimitedValue();
422         E.Scale <<= RHS.getLimitedValue();
423         E.IsNSW &= NSW;
424         break;
425       }
426       return E;
427     }
428   }
429 
430   if (isa<ZExtInst>(Val.V))
431     return GetLinearExpression(
432         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
433         DL, Depth + 1, AC, DT);
434 
435   if (isa<SExtInst>(Val.V))
436     return GetLinearExpression(
437         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
438         DL, Depth + 1, AC, DT);
439 
440   return Val;
441 }
442 
443 /// To ensure a pointer offset fits in an integer of size IndexSize
444 /// (in bits) when that size is smaller than the maximum index size. This is
445 /// an issue, for example, in particular for 32b pointers with negative indices
446 /// that rely on two's complement wrap-arounds for precise alias information
447 /// where the maximum index size is 64b.
448 static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) {
449   assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
450   unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
451   return (Offset << ShiftBits).ashr(ShiftBits);
452 }
453 
454 namespace {
455 // A linear transformation of a Value; this class represents
456 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
457 struct VariableGEPIndex {
458   CastedValue Val;
459   APInt Scale;
460 
461   // Context instruction to use when querying information about this index.
462   const Instruction *CxtI;
463 
464   /// True if all operations in this expression are NSW.
465   bool IsNSW;
466 
467   void dump() const {
468     print(dbgs());
469     dbgs() << "\n";
470   }
471   void print(raw_ostream &OS) const {
472     OS << "(V=" << Val.V->getName()
473        << ", zextbits=" << Val.ZExtBits
474        << ", sextbits=" << Val.SExtBits
475        << ", truncbits=" << Val.TruncBits
476        << ", scale=" << Scale << ")";
477   }
478 };
479 }
480 
481 // Represents the internal structure of a GEP, decomposed into a base pointer,
482 // constant offsets, and variable scaled indices.
483 struct BasicAAResult::DecomposedGEP {
484   // Base pointer of the GEP
485   const Value *Base;
486   // Total constant offset from base.
487   APInt Offset;
488   // Scaled variable (non-constant) indices.
489   SmallVector<VariableGEPIndex, 4> VarIndices;
490   // Are all operations inbounds GEPs or non-indexing operations?
491   // (None iff expression doesn't involve any geps)
492   Optional<bool> InBounds;
493 
494   void dump() const {
495     print(dbgs());
496     dbgs() << "\n";
497   }
498   void print(raw_ostream &OS) const {
499     OS << "(DecomposedGEP Base=" << Base->getName()
500        << ", Offset=" << Offset
501        << ", VarIndices=[";
502     for (size_t i = 0; i < VarIndices.size(); i++) {
503       if (i != 0)
504         OS << ", ";
505       VarIndices[i].print(OS);
506     }
507     OS << "])";
508   }
509 };
510 
511 
512 /// If V is a symbolic pointer expression, decompose it into a base pointer
513 /// with a constant offset and a number of scaled symbolic offsets.
514 ///
515 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
516 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
517 /// specified amount, but which may have other unrepresented high bits. As
518 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
519 BasicAAResult::DecomposedGEP
520 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
521                                       AssumptionCache *AC, DominatorTree *DT) {
522   // Limit recursion depth to limit compile time in crazy cases.
523   unsigned MaxLookup = MaxLookupSearchDepth;
524   SearchTimes++;
525   const Instruction *CxtI = dyn_cast<Instruction>(V);
526 
527   unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
528   DecomposedGEP Decomposed;
529   Decomposed.Offset = APInt(MaxIndexSize, 0);
530   do {
531     // See if this is a bitcast or GEP.
532     const Operator *Op = dyn_cast<Operator>(V);
533     if (!Op) {
534       // The only non-operator case we can handle are GlobalAliases.
535       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
536         if (!GA->isInterposable()) {
537           V = GA->getAliasee();
538           continue;
539         }
540       }
541       Decomposed.Base = V;
542       return Decomposed;
543     }
544 
545     if (Op->getOpcode() == Instruction::BitCast ||
546         Op->getOpcode() == Instruction::AddrSpaceCast) {
547       V = Op->getOperand(0);
548       continue;
549     }
550 
551     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
552     if (!GEPOp) {
553       if (const auto *PHI = dyn_cast<PHINode>(V)) {
554         // Look through single-arg phi nodes created by LCSSA.
555         if (PHI->getNumIncomingValues() == 1) {
556           V = PHI->getIncomingValue(0);
557           continue;
558         }
559       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
560         // CaptureTracking can know about special capturing properties of some
561         // intrinsics like launder.invariant.group, that can't be expressed with
562         // the attributes, but have properties like returning aliasing pointer.
563         // Because some analysis may assume that nocaptured pointer is not
564         // returned from some special intrinsic (because function would have to
565         // be marked with returns attribute), it is crucial to use this function
566         // because it should be in sync with CaptureTracking. Not using it may
567         // cause weird miscompilations where 2 aliasing pointers are assumed to
568         // noalias.
569         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
570           V = RP;
571           continue;
572         }
573       }
574 
575       Decomposed.Base = V;
576       return Decomposed;
577     }
578 
579     // Track whether we've seen at least one in bounds gep, and if so, whether
580     // all geps parsed were in bounds.
581     if (Decomposed.InBounds == None)
582       Decomposed.InBounds = GEPOp->isInBounds();
583     else if (!GEPOp->isInBounds())
584       Decomposed.InBounds = false;
585 
586     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
587 
588     // Don't attempt to analyze GEPs if index scale is not a compile-time
589     // constant.
590     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
591       Decomposed.Base = V;
592       return Decomposed;
593     }
594 
595     unsigned AS = GEPOp->getPointerAddressSpace();
596     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
597     gep_type_iterator GTI = gep_type_begin(GEPOp);
598     unsigned IndexSize = DL.getIndexSizeInBits(AS);
599     // Assume all GEP operands are constants until proven otherwise.
600     bool GepHasConstantOffset = true;
601     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
602          I != E; ++I, ++GTI) {
603       const Value *Index = *I;
604       // Compute the (potentially symbolic) offset in bytes for this index.
605       if (StructType *STy = GTI.getStructTypeOrNull()) {
606         // For a struct, add the member offset.
607         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
608         if (FieldNo == 0)
609           continue;
610 
611         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
612         continue;
613       }
614 
615       // For an array/pointer, add the element offset, explicitly scaled.
616       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
617         if (CIdx->isZero())
618           continue;
619         Decomposed.Offset +=
620             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
621             CIdx->getValue().sextOrTrunc(MaxIndexSize);
622         continue;
623       }
624 
625       GepHasConstantOffset = false;
626 
627       // If the integer type is smaller than the index size, it is implicitly
628       // sign extended or truncated to index size.
629       unsigned Width = Index->getType()->getIntegerBitWidth();
630       unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
631       unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
632       LinearExpression LE = GetLinearExpression(
633           CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
634 
635       // Scale by the type size.
636       unsigned TypeSize =
637           DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
638       LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
639       Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
640       APInt Scale = LE.Scale.sext(MaxIndexSize);
641 
642       // If we already had an occurrence of this index variable, merge this
643       // scale into it.  For example, we want to handle:
644       //   A[x][x] -> x*16 + x*4 -> x*20
645       // This also ensures that 'x' only appears in the index list once.
646       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
647         if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
648             Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
649           Scale += Decomposed.VarIndices[i].Scale;
650           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
651           break;
652         }
653       }
654 
655       // Make sure that we have a scale that makes sense for this target's
656       // index size.
657       Scale = adjustToIndexSize(Scale, IndexSize);
658 
659       if (!!Scale) {
660         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
661         Decomposed.VarIndices.push_back(Entry);
662       }
663     }
664 
665     // Take care of wrap-arounds
666     if (GepHasConstantOffset)
667       Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize);
668 
669     // Analyze the base pointer next.
670     V = GEPOp->getOperand(0);
671   } while (--MaxLookup);
672 
673   // If the chain of expressions is too deep, just return early.
674   Decomposed.Base = V;
675   SearchLimitReached++;
676   return Decomposed;
677 }
678 
679 /// Returns whether the given pointer value points to memory that is local to
680 /// the function, with global constants being considered local to all
681 /// functions.
682 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
683                                            AAQueryInfo &AAQI, bool OrLocal) {
684   assert(Visited.empty() && "Visited must be cleared after use!");
685 
686   unsigned MaxLookup = 8;
687   SmallVector<const Value *, 16> Worklist;
688   Worklist.push_back(Loc.Ptr);
689   do {
690     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
691     if (!Visited.insert(V).second) {
692       Visited.clear();
693       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
694     }
695 
696     // An alloca instruction defines local memory.
697     if (OrLocal && isa<AllocaInst>(V))
698       continue;
699 
700     // A global constant counts as local memory for our purposes.
701     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
702       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
703       // global to be marked constant in some modules and non-constant in
704       // others.  GV may even be a declaration, not a definition.
705       if (!GV->isConstant()) {
706         Visited.clear();
707         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
708       }
709       continue;
710     }
711 
712     // If both select values point to local memory, then so does the select.
713     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
714       Worklist.push_back(SI->getTrueValue());
715       Worklist.push_back(SI->getFalseValue());
716       continue;
717     }
718 
719     // If all values incoming to a phi node point to local memory, then so does
720     // the phi.
721     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
722       // Don't bother inspecting phi nodes with many operands.
723       if (PN->getNumIncomingValues() > MaxLookup) {
724         Visited.clear();
725         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
726       }
727       append_range(Worklist, PN->incoming_values());
728       continue;
729     }
730 
731     // Otherwise be conservative.
732     Visited.clear();
733     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
734   } while (!Worklist.empty() && --MaxLookup);
735 
736   Visited.clear();
737   return Worklist.empty();
738 }
739 
740 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
741   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
742   return II && II->getIntrinsicID() == IID;
743 }
744 
745 /// Returns the behavior when calling the given call site.
746 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
747   if (Call->doesNotAccessMemory())
748     // Can't do better than this.
749     return FMRB_DoesNotAccessMemory;
750 
751   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
752 
753   // If the callsite knows it only reads memory, don't return worse
754   // than that.
755   if (Call->onlyReadsMemory())
756     Min = FMRB_OnlyReadsMemory;
757   else if (Call->onlyWritesMemory())
758     Min = FMRB_OnlyWritesMemory;
759 
760   if (Call->onlyAccessesArgMemory())
761     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
762   else if (Call->onlyAccessesInaccessibleMemory())
763     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
764   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
765     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
766 
767   // If the call has operand bundles then aliasing attributes from the function
768   // it calls do not directly apply to the call.  This can be made more precise
769   // in the future.
770   if (!Call->hasOperandBundles())
771     if (const Function *F = Call->getCalledFunction())
772       Min =
773           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
774 
775   return Min;
776 }
777 
778 /// Returns the behavior when calling the given function. For use when the call
779 /// site is not known.
780 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
781   // If the function declares it doesn't access memory, we can't do better.
782   if (F->doesNotAccessMemory())
783     return FMRB_DoesNotAccessMemory;
784 
785   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
786 
787   // If the function declares it only reads memory, go with that.
788   if (F->onlyReadsMemory())
789     Min = FMRB_OnlyReadsMemory;
790   else if (F->onlyWritesMemory())
791     Min = FMRB_OnlyWritesMemory;
792 
793   if (F->onlyAccessesArgMemory())
794     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
795   else if (F->onlyAccessesInaccessibleMemory())
796     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
797   else if (F->onlyAccessesInaccessibleMemOrArgMem())
798     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
799 
800   return Min;
801 }
802 
803 /// Returns true if this is a writeonly (i.e Mod only) parameter.
804 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
805                              const TargetLibraryInfo &TLI) {
806   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
807     return true;
808 
809   // We can bound the aliasing properties of memset_pattern16 just as we can
810   // for memcpy/memset.  This is particularly important because the
811   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
812   // whenever possible.
813   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
814   // attributes.
815   LibFunc F;
816   if (Call->getCalledFunction() &&
817       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
818       F == LibFunc_memset_pattern16 && TLI.has(F))
819     if (ArgIdx == 0)
820       return true;
821 
822   // TODO: memset_pattern4, memset_pattern8
823   // TODO: _chk variants
824   // TODO: strcmp, strcpy
825 
826   return false;
827 }
828 
829 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
830                                            unsigned ArgIdx) {
831   // Checking for known builtin intrinsics and target library functions.
832   if (isWriteOnlyParam(Call, ArgIdx, TLI))
833     return ModRefInfo::Mod;
834 
835   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
836     return ModRefInfo::Ref;
837 
838   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
839     return ModRefInfo::NoModRef;
840 
841   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
842 }
843 
844 #ifndef NDEBUG
845 static const Function *getParent(const Value *V) {
846   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
847     if (!inst->getParent())
848       return nullptr;
849     return inst->getParent()->getParent();
850   }
851 
852   if (const Argument *arg = dyn_cast<Argument>(V))
853     return arg->getParent();
854 
855   return nullptr;
856 }
857 
858 static bool notDifferentParent(const Value *O1, const Value *O2) {
859 
860   const Function *F1 = getParent(O1);
861   const Function *F2 = getParent(O2);
862 
863   return !F1 || !F2 || F1 == F2;
864 }
865 #endif
866 
867 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
868                                  const MemoryLocation &LocB,
869                                  AAQueryInfo &AAQI) {
870   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
871          "BasicAliasAnalysis doesn't support interprocedural queries.");
872   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
873 }
874 
875 /// Checks to see if the specified callsite can clobber the specified memory
876 /// object.
877 ///
878 /// Since we only look at local properties of this function, we really can't
879 /// say much about this query.  We do, however, use simple "address taken"
880 /// analysis on local objects.
881 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
882                                         const MemoryLocation &Loc,
883                                         AAQueryInfo &AAQI) {
884   assert(notDifferentParent(Call, Loc.Ptr) &&
885          "AliasAnalysis query involving multiple functions!");
886 
887   const Value *Object = getUnderlyingObject(Loc.Ptr);
888 
889   // Calls marked 'tail' cannot read or write allocas from the current frame
890   // because the current frame might be destroyed by the time they run. However,
891   // a tail call may use an alloca with byval. Calling with byval copies the
892   // contents of the alloca into argument registers or stack slots, so there is
893   // no lifetime issue.
894   if (isa<AllocaInst>(Object))
895     if (const CallInst *CI = dyn_cast<CallInst>(Call))
896       if (CI->isTailCall() &&
897           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
898         return ModRefInfo::NoModRef;
899 
900   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
901   // modify them even though the alloca is not escaped.
902   if (auto *AI = dyn_cast<AllocaInst>(Object))
903     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
904       return ModRefInfo::Mod;
905 
906   // If the pointer is to a locally allocated object that does not escape,
907   // then the call can not mod/ref the pointer unless the call takes the pointer
908   // as an argument, and itself doesn't capture it.
909   if (!isa<Constant>(Object) && Call != Object &&
910       AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
911 
912     // Optimistically assume that call doesn't touch Object and check this
913     // assumption in the following loop.
914     ModRefInfo Result = ModRefInfo::NoModRef;
915     bool IsMustAlias = true;
916 
917     unsigned OperandNo = 0;
918     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
919          CI != CE; ++CI, ++OperandNo) {
920       // Only look at the no-capture or byval pointer arguments.  If this
921       // pointer were passed to arguments that were neither of these, then it
922       // couldn't be no-capture.
923       if (!(*CI)->getType()->isPointerTy() ||
924           (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
925            !Call->isByValArgument(OperandNo)))
926         continue;
927 
928       // Call doesn't access memory through this operand, so we don't care
929       // if it aliases with Object.
930       if (Call->doesNotAccessMemory(OperandNo))
931         continue;
932 
933       // If this is a no-capture pointer argument, see if we can tell that it
934       // is impossible to alias the pointer we're checking.
935       AliasResult AR = getBestAAResults().alias(
936           MemoryLocation::getBeforeOrAfter(*CI),
937           MemoryLocation::getBeforeOrAfter(Object), AAQI);
938       if (AR != AliasResult::MustAlias)
939         IsMustAlias = false;
940       // Operand doesn't alias 'Object', continue looking for other aliases
941       if (AR == AliasResult::NoAlias)
942         continue;
943       // Operand aliases 'Object', but call doesn't modify it. Strengthen
944       // initial assumption and keep looking in case if there are more aliases.
945       if (Call->onlyReadsMemory(OperandNo)) {
946         Result = setRef(Result);
947         continue;
948       }
949       // Operand aliases 'Object' but call only writes into it.
950       if (Call->onlyWritesMemory(OperandNo)) {
951         Result = setMod(Result);
952         continue;
953       }
954       // This operand aliases 'Object' and call reads and writes into it.
955       // Setting ModRef will not yield an early return below, MustAlias is not
956       // used further.
957       Result = ModRefInfo::ModRef;
958       break;
959     }
960 
961     // No operand aliases, reset Must bit. Add below if at least one aliases
962     // and all aliases found are MustAlias.
963     if (isNoModRef(Result))
964       IsMustAlias = false;
965 
966     // Early return if we improved mod ref information
967     if (!isModAndRefSet(Result)) {
968       if (isNoModRef(Result))
969         return ModRefInfo::NoModRef;
970       return IsMustAlias ? setMust(Result) : clearMust(Result);
971     }
972   }
973 
974   // If the call is malloc/calloc like, we can assume that it doesn't
975   // modify any IR visible value.  This is only valid because we assume these
976   // routines do not read values visible in the IR.  TODO: Consider special
977   // casing realloc and strdup routines which access only their arguments as
978   // well.  Or alternatively, replace all of this with inaccessiblememonly once
979   // that's implemented fully.
980   if (isMallocOrCallocLikeFn(Call, &TLI)) {
981     // Be conservative if the accessed pointer may alias the allocation -
982     // fallback to the generic handling below.
983     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
984                                  AAQI) == AliasResult::NoAlias)
985       return ModRefInfo::NoModRef;
986   }
987 
988   // Ideally, there should be no need to special case for memcpy/memove
989   // intrinsics here since general machinery (based on memory attributes) should
990   // already handle it just fine. Unfortunately, it doesn't due to deficiency in
991   // operand bundles support. At the moment it's not clear if complexity behind
992   // enhancing general mechanism worths it.
993   // TODO: Consider improving operand bundles support in general mechanism.
994   if (auto *Inst = dyn_cast<AnyMemTransferInst>(Call)) {
995     AliasResult SrcAA =
996         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
997     AliasResult DestAA =
998         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
999     // It's also possible for Loc to alias both src and dest, or neither.
1000     ModRefInfo rv = ModRefInfo::NoModRef;
1001     if (SrcAA != AliasResult::NoAlias || Call->hasReadingOperandBundles())
1002       rv = setRef(rv);
1003     if (DestAA != AliasResult::NoAlias || Call->hasClobberingOperandBundles())
1004       rv = setMod(rv);
1005     return rv;
1006   }
1007 
1008   // Guard intrinsics are marked as arbitrarily writing so that proper control
1009   // dependencies are maintained but they never mods any particular memory
1010   // location.
1011   //
1012   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1013   // heap state at the point the guard is issued needs to be consistent in case
1014   // the guard invokes the "deopt" continuation.
1015   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1016     return ModRefInfo::Ref;
1017   // The same applies to deoptimize which is essentially a guard(false).
1018   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1019     return ModRefInfo::Ref;
1020 
1021   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1022   // writing so that proper control dependencies are maintained but they never
1023   // mod any particular memory location visible to the IR.
1024   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1025   // intrinsic is now modeled as reading memory. This prevents hoisting the
1026   // invariant.start intrinsic over stores. Consider:
1027   // *ptr = 40;
1028   // *ptr = 50;
1029   // invariant_start(ptr)
1030   // int val = *ptr;
1031   // print(val);
1032   //
1033   // This cannot be transformed to:
1034   //
1035   // *ptr = 40;
1036   // invariant_start(ptr)
1037   // *ptr = 50;
1038   // int val = *ptr;
1039   // print(val);
1040   //
1041   // The transformation will cause the second store to be ignored (based on
1042   // rules of invariant.start)  and print 40, while the first program always
1043   // prints 50.
1044   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1045     return ModRefInfo::Ref;
1046 
1047   // The AAResultBase base class has some smarts, lets use them.
1048   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1049 }
1050 
1051 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1052                                         const CallBase *Call2,
1053                                         AAQueryInfo &AAQI) {
1054   // Guard intrinsics are marked as arbitrarily writing so that proper control
1055   // dependencies are maintained but they never mods any particular memory
1056   // location.
1057   //
1058   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1059   // heap state at the point the guard is issued needs to be consistent in case
1060   // the guard invokes the "deopt" continuation.
1061 
1062   // NB! This function is *not* commutative, so we special case two
1063   // possibilities for guard intrinsics.
1064 
1065   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1066     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1067                ? ModRefInfo::Ref
1068                : ModRefInfo::NoModRef;
1069 
1070   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1071     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1072                ? ModRefInfo::Mod
1073                : ModRefInfo::NoModRef;
1074 
1075   // The AAResultBase base class has some smarts, lets use them.
1076   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1077 }
1078 
1079 /// Return true if we know V to the base address of the corresponding memory
1080 /// object.  This implies that any address less than V must be out of bounds
1081 /// for the underlying object.  Note that just being isIdentifiedObject() is
1082 /// not enough - For example, a negative offset from a noalias argument or call
1083 /// can be inbounds w.r.t the actual underlying object.
1084 static bool isBaseOfObject(const Value *V) {
1085   // TODO: We can handle other cases here
1086   // 1) For GC languages, arguments to functions are often required to be
1087   //    base pointers.
1088   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1089   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1090 }
1091 
1092 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1093 /// another pointer.
1094 ///
1095 /// We know that V1 is a GEP, but we don't know anything about V2.
1096 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1097 /// V2.
1098 AliasResult BasicAAResult::aliasGEP(
1099     const GEPOperator *GEP1, LocationSize V1Size,
1100     const Value *V2, LocationSize V2Size,
1101     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1102   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1103     // TODO: This limitation exists for compile-time reasons. Relax it if we
1104     // can avoid exponential pathological cases.
1105     if (!isa<GEPOperator>(V2))
1106       return AliasResult::MayAlias;
1107 
1108     // If both accesses have unknown size, we can only check whether the base
1109     // objects don't alias.
1110     AliasResult BaseAlias = getBestAAResults().alias(
1111         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1112         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1113     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1114                                              : AliasResult::MayAlias;
1115   }
1116 
1117   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1118   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1119 
1120   // Bail if we were not able to decompose anything.
1121   if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1122     return AliasResult::MayAlias;
1123 
1124   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1125   // symbolic difference.
1126   subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1127 
1128   // If an inbounds GEP would have to start from an out of bounds address
1129   // for the two to alias, then we can assume noalias.
1130   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1131       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1132       isBaseOfObject(DecompGEP2.Base))
1133     return AliasResult::NoAlias;
1134 
1135   if (isa<GEPOperator>(V2)) {
1136     // Symmetric case to above.
1137     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1138         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1139         isBaseOfObject(DecompGEP1.Base))
1140       return AliasResult::NoAlias;
1141   }
1142 
1143   // For GEPs with identical offsets, we can preserve the size and AAInfo
1144   // when performing the alias check on the underlying objects.
1145   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1146     return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size),
1147                                     MemoryLocation(DecompGEP2.Base, V2Size),
1148                                     AAQI);
1149 
1150   // Do the base pointers alias?
1151   AliasResult BaseAlias = getBestAAResults().alias(
1152       MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1153       MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1154 
1155   // If we get a No or May, then return it immediately, no amount of analysis
1156   // will improve this situation.
1157   if (BaseAlias != AliasResult::MustAlias) {
1158     assert(BaseAlias == AliasResult::NoAlias ||
1159            BaseAlias == AliasResult::MayAlias);
1160     return BaseAlias;
1161   }
1162 
1163   // If there is a constant difference between the pointers, but the difference
1164   // is less than the size of the associated memory object, then we know
1165   // that the objects are partially overlapping.  If the difference is
1166   // greater, we know they do not overlap.
1167   if (DecompGEP1.VarIndices.empty()) {
1168     APInt &Off = DecompGEP1.Offset;
1169 
1170     // Initialize for Off >= 0 (V2 <= GEP1) case.
1171     const Value *LeftPtr = V2;
1172     const Value *RightPtr = GEP1;
1173     LocationSize VLeftSize = V2Size;
1174     LocationSize VRightSize = V1Size;
1175     const bool Swapped = Off.isNegative();
1176 
1177     if (Swapped) {
1178       // Swap if we have the situation where:
1179       // +                +
1180       // | BaseOffset     |
1181       // ---------------->|
1182       // |-->V1Size       |-------> V2Size
1183       // GEP1             V2
1184       std::swap(LeftPtr, RightPtr);
1185       std::swap(VLeftSize, VRightSize);
1186       Off = -Off;
1187     }
1188 
1189     if (!VLeftSize.hasValue())
1190       return AliasResult::MayAlias;
1191 
1192     const uint64_t LSize = VLeftSize.getValue();
1193     if (Off.ult(LSize)) {
1194       // Conservatively drop processing if a phi was visited and/or offset is
1195       // too big.
1196       AliasResult AR = AliasResult::PartialAlias;
1197       if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1198           (Off + VRightSize.getValue()).ule(LSize)) {
1199         // Memory referenced by right pointer is nested. Save the offset in
1200         // cache. Note that originally offset estimated as GEP1-V2, but
1201         // AliasResult contains the shift that represents GEP1+Offset=V2.
1202         AR.setOffset(-Off.getSExtValue());
1203         AR.swap(Swapped);
1204       }
1205       return AR;
1206     }
1207     return AliasResult::NoAlias;
1208   }
1209 
1210   // We need to know both acess sizes for all the following heuristics.
1211   if (!V1Size.hasValue() || !V2Size.hasValue())
1212     return AliasResult::MayAlias;
1213 
1214   APInt GCD;
1215   ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1216   for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1217     const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1218     const APInt &Scale = Index.Scale;
1219     APInt ScaleForGCD = Scale;
1220     if (!Index.IsNSW)
1221       ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1222                                         Scale.countTrailingZeros());
1223 
1224     if (i == 0)
1225       GCD = ScaleForGCD.abs();
1226     else
1227       GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1228 
1229     ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1230                                             true, &AC, Index.CxtI);
1231     KnownBits Known =
1232         computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1233     CR = CR.intersectWith(
1234         ConstantRange::fromKnownBits(Known, /* Signed */ true),
1235         ConstantRange::Signed);
1236     CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1237 
1238     assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1239            "Bit widths are normalized to MaxIndexSize");
1240     if (Index.IsNSW)
1241       OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale)));
1242     else
1243       OffsetRange = OffsetRange.add(CR.smul_fast(ConstantRange(Scale)));
1244   }
1245 
1246   // We now have accesses at two offsets from the same base:
1247   //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1248   //  2. 0 with size V2Size
1249   // Using arithmetic modulo GCD, the accesses are at
1250   // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1251   // into the range [V2Size..GCD), then we know they cannot overlap.
1252   APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1253   if (ModOffset.isNegative())
1254     ModOffset += GCD; // We want mod, not rem.
1255   if (ModOffset.uge(V2Size.getValue()) &&
1256       (GCD - ModOffset).uge(V1Size.getValue()))
1257     return AliasResult::NoAlias;
1258 
1259   // Compute ranges of potentially accessed bytes for both accesses. If the
1260   // interseciton is empty, there can be no overlap.
1261   unsigned BW = OffsetRange.getBitWidth();
1262   ConstantRange Range1 = OffsetRange.add(
1263       ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1264   ConstantRange Range2 =
1265       ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1266   if (Range1.intersectWith(Range2).isEmptySet())
1267     return AliasResult::NoAlias;
1268 
1269   // Try to determine the range of values for VarIndex such that
1270   // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1271   Optional<APInt> MinAbsVarIndex;
1272   if (DecompGEP1.VarIndices.size() == 1) {
1273     // VarIndex = Scale*V.
1274     const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1275     if (Var.Val.TruncBits == 0 &&
1276         isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1277       // If V != 0, then abs(VarIndex) > 0.
1278       MinAbsVarIndex = APInt(Var.Scale.getBitWidth(), 1);
1279 
1280       // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1281       // potentially wrapping math.
1282       auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1283         if (Var.IsNSW)
1284           return true;
1285 
1286         int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1287         // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1288         // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1289         // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1290         int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1291         if (MaxScaleValueBW <= 0)
1292           return false;
1293         return Var.Scale.ule(
1294             APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1295       };
1296       // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1297       // presence of potentially wrapping math.
1298       if (MultiplyByScaleNoWrap(Var)) {
1299         // If V != 0 then abs(VarIndex) >= abs(Scale).
1300         MinAbsVarIndex = Var.Scale.abs();
1301       }
1302     }
1303   } else if (DecompGEP1.VarIndices.size() == 2) {
1304     // VarIndex = Scale*V0 + (-Scale)*V1.
1305     // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1306     // Check that VisitedPhiBBs is empty, to avoid reasoning about
1307     // inequality of values across loop iterations.
1308     const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1309     const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1310     if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1311         Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1312         isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1313                         DT))
1314       MinAbsVarIndex = Var0.Scale.abs();
1315   }
1316 
1317   if (MinAbsVarIndex) {
1318     // The constant offset will have added at least +/-MinAbsVarIndex to it.
1319     APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1320     APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1321     // We know that Offset <= OffsetLo || Offset >= OffsetHi
1322     if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1323         OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1324       return AliasResult::NoAlias;
1325   }
1326 
1327   if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1328     return AliasResult::NoAlias;
1329 
1330   // Statically, we can see that the base objects are the same, but the
1331   // pointers have dynamic offsets which we can't resolve. And none of our
1332   // little tricks above worked.
1333   return AliasResult::MayAlias;
1334 }
1335 
1336 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1337   // If the results agree, take it.
1338   if (A == B)
1339     return A;
1340   // A mix of PartialAlias and MustAlias is PartialAlias.
1341   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1342       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1343     return AliasResult::PartialAlias;
1344   // Otherwise, we don't know anything.
1345   return AliasResult::MayAlias;
1346 }
1347 
1348 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1349 /// against another.
1350 AliasResult
1351 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1352                            const Value *V2, LocationSize V2Size,
1353                            AAQueryInfo &AAQI) {
1354   // If the values are Selects with the same condition, we can do a more precise
1355   // check: just check for aliases between the values on corresponding arms.
1356   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1357     if (SI->getCondition() == SI2->getCondition()) {
1358       AliasResult Alias = getBestAAResults().alias(
1359           MemoryLocation(SI->getTrueValue(), SISize),
1360           MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1361       if (Alias == AliasResult::MayAlias)
1362         return AliasResult::MayAlias;
1363       AliasResult ThisAlias = getBestAAResults().alias(
1364           MemoryLocation(SI->getFalseValue(), SISize),
1365           MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1366       return MergeAliasResults(ThisAlias, Alias);
1367     }
1368 
1369   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1370   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1371   AliasResult Alias =
1372       getBestAAResults().alias(MemoryLocation(SI->getTrueValue(), SISize),
1373                                MemoryLocation(V2, V2Size), AAQI);
1374   if (Alias == AliasResult::MayAlias)
1375     return AliasResult::MayAlias;
1376 
1377   AliasResult ThisAlias =
1378       getBestAAResults().alias(MemoryLocation(SI->getFalseValue(), SISize),
1379                                MemoryLocation(V2, V2Size), AAQI);
1380   return MergeAliasResults(ThisAlias, Alias);
1381 }
1382 
1383 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1384 /// another.
1385 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1386                                     const Value *V2, LocationSize V2Size,
1387                                     AAQueryInfo &AAQI) {
1388   if (!PN->getNumIncomingValues())
1389     return AliasResult::NoAlias;
1390   // If the values are PHIs in the same block, we can do a more precise
1391   // as well as efficient check: just check for aliases between the values
1392   // on corresponding edges.
1393   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1394     if (PN2->getParent() == PN->getParent()) {
1395       Optional<AliasResult> Alias;
1396       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1397         AliasResult ThisAlias = getBestAAResults().alias(
1398             MemoryLocation(PN->getIncomingValue(i), PNSize),
1399             MemoryLocation(
1400                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1401             AAQI);
1402         if (Alias)
1403           *Alias = MergeAliasResults(*Alias, ThisAlias);
1404         else
1405           Alias = ThisAlias;
1406         if (*Alias == AliasResult::MayAlias)
1407           break;
1408       }
1409       return *Alias;
1410     }
1411 
1412   SmallVector<Value *, 4> V1Srcs;
1413   // If a phi operand recurses back to the phi, we can still determine NoAlias
1414   // if we don't alias the underlying objects of the other phi operands, as we
1415   // know that the recursive phi needs to be based on them in some way.
1416   bool isRecursive = false;
1417   auto CheckForRecPhi = [&](Value *PV) {
1418     if (!EnableRecPhiAnalysis)
1419       return false;
1420     if (getUnderlyingObject(PV) == PN) {
1421       isRecursive = true;
1422       return true;
1423     }
1424     return false;
1425   };
1426 
1427   if (PV) {
1428     // If we have PhiValues then use it to get the underlying phi values.
1429     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1430     // If we have more phi values than the search depth then return MayAlias
1431     // conservatively to avoid compile time explosion. The worst possible case
1432     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1433     // where 'm' and 'n' are the number of PHI sources.
1434     if (PhiValueSet.size() > MaxLookupSearchDepth)
1435       return AliasResult::MayAlias;
1436     // Add the values to V1Srcs
1437     for (Value *PV1 : PhiValueSet) {
1438       if (CheckForRecPhi(PV1))
1439         continue;
1440       V1Srcs.push_back(PV1);
1441     }
1442   } else {
1443     // If we don't have PhiInfo then just look at the operands of the phi itself
1444     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1445     SmallPtrSet<Value *, 4> UniqueSrc;
1446     Value *OnePhi = nullptr;
1447     for (Value *PV1 : PN->incoming_values()) {
1448       if (isa<PHINode>(PV1)) {
1449         if (OnePhi && OnePhi != PV1) {
1450           // To control potential compile time explosion, we choose to be
1451           // conserviate when we have more than one Phi input.  It is important
1452           // that we handle the single phi case as that lets us handle LCSSA
1453           // phi nodes and (combined with the recursive phi handling) simple
1454           // pointer induction variable patterns.
1455           return AliasResult::MayAlias;
1456         }
1457         OnePhi = PV1;
1458       }
1459 
1460       if (CheckForRecPhi(PV1))
1461         continue;
1462 
1463       if (UniqueSrc.insert(PV1).second)
1464         V1Srcs.push_back(PV1);
1465     }
1466 
1467     if (OnePhi && UniqueSrc.size() > 1)
1468       // Out of an abundance of caution, allow only the trivial lcssa and
1469       // recursive phi cases.
1470       return AliasResult::MayAlias;
1471   }
1472 
1473   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1474   // value. This should only be possible in blocks unreachable from the entry
1475   // block, but return MayAlias just in case.
1476   if (V1Srcs.empty())
1477     return AliasResult::MayAlias;
1478 
1479   // If this PHI node is recursive, indicate that the pointer may be moved
1480   // across iterations. We can only prove NoAlias if different underlying
1481   // objects are involved.
1482   if (isRecursive)
1483     PNSize = LocationSize::beforeOrAfterPointer();
1484 
1485   // In the recursive alias queries below, we may compare values from two
1486   // different loop iterations. Keep track of visited phi blocks, which will
1487   // be used when determining value equivalence.
1488   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1489   auto _ = make_scope_exit([&]() {
1490     if (BlockInserted)
1491       VisitedPhiBBs.erase(PN->getParent());
1492   });
1493 
1494   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1495   // have been cached earlier may no longer be valid. Perform recursive queries
1496   // with a new AAQueryInfo.
1497   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1498   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1499 
1500   AliasResult Alias = getBestAAResults().alias(
1501       MemoryLocation(V1Srcs[0], PNSize), MemoryLocation(V2, V2Size), *UseAAQI);
1502 
1503   // Early exit if the check of the first PHI source against V2 is MayAlias.
1504   // Other results are not possible.
1505   if (Alias == AliasResult::MayAlias)
1506     return AliasResult::MayAlias;
1507   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1508   // remain valid to all elements and needs to conservatively return MayAlias.
1509   if (isRecursive && Alias != AliasResult::NoAlias)
1510     return AliasResult::MayAlias;
1511 
1512   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1513   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1514   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1515     Value *V = V1Srcs[i];
1516 
1517     AliasResult ThisAlias = getBestAAResults().alias(
1518         MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), *UseAAQI);
1519     Alias = MergeAliasResults(ThisAlias, Alias);
1520     if (Alias == AliasResult::MayAlias)
1521       break;
1522   }
1523 
1524   return Alias;
1525 }
1526 
1527 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1528 /// array references.
1529 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1530                                       const Value *V2, LocationSize V2Size,
1531                                       AAQueryInfo &AAQI) {
1532   // If either of the memory references is empty, it doesn't matter what the
1533   // pointer values are.
1534   if (V1Size.isZero() || V2Size.isZero())
1535     return AliasResult::NoAlias;
1536 
1537   // Strip off any casts if they exist.
1538   V1 = V1->stripPointerCastsForAliasAnalysis();
1539   V2 = V2->stripPointerCastsForAliasAnalysis();
1540 
1541   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1542   // value for undef that aliases nothing in the program.
1543   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1544     return AliasResult::NoAlias;
1545 
1546   // Are we checking for alias of the same value?
1547   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1548   // different iterations. We must therefore make sure that this is not the
1549   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1550   // happen by looking at the visited phi nodes and making sure they cannot
1551   // reach the value.
1552   if (isValueEqualInPotentialCycles(V1, V2))
1553     return AliasResult::MustAlias;
1554 
1555   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1556     return AliasResult::NoAlias; // Scalars cannot alias each other
1557 
1558   // Figure out what objects these things are pointing to if we can.
1559   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1560   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1561 
1562   // Null values in the default address space don't point to any object, so they
1563   // don't alias any other pointer.
1564   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1565     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1566       return AliasResult::NoAlias;
1567   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1568     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1569       return AliasResult::NoAlias;
1570 
1571   if (O1 != O2) {
1572     // If V1/V2 point to two different objects, we know that we have no alias.
1573     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1574       return AliasResult::NoAlias;
1575 
1576     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1577     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1578         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1579       return AliasResult::NoAlias;
1580 
1581     // Function arguments can't alias with things that are known to be
1582     // unambigously identified at the function level.
1583     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1584         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1585       return AliasResult::NoAlias;
1586 
1587     // If one pointer is the result of a call/invoke or load and the other is a
1588     // non-escaping local object within the same function, then we know the
1589     // object couldn't escape to a point where the call could return it.
1590     //
1591     // Note that if the pointers are in different functions, there are a
1592     // variety of complications. A call with a nocapture argument may still
1593     // temporary store the nocapture argument's value in a temporary memory
1594     // location if that memory location doesn't escape. Or it may pass a
1595     // nocapture value to other functions as long as they don't capture it.
1596     if (isEscapeSource(O1) &&
1597         AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1598       return AliasResult::NoAlias;
1599     if (isEscapeSource(O2) &&
1600         AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1601       return AliasResult::NoAlias;
1602   }
1603 
1604   // If the size of one access is larger than the entire object on the other
1605   // side, then we know such behavior is undefined and can assume no alias.
1606   bool NullIsValidLocation = NullPointerIsDefined(&F);
1607   if ((isObjectSmallerThan(
1608           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1609           TLI, NullIsValidLocation)) ||
1610       (isObjectSmallerThan(
1611           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1612           TLI, NullIsValidLocation)))
1613     return AliasResult::NoAlias;
1614 
1615   // If one the accesses may be before the accessed pointer, canonicalize this
1616   // by using unknown after-pointer sizes for both accesses. This is
1617   // equivalent, because regardless of which pointer is lower, one of them
1618   // will always came after the other, as long as the underlying objects aren't
1619   // disjoint. We do this so that the rest of BasicAA does not have to deal
1620   // with accesses before the base pointer, and to improve cache utilization by
1621   // merging equivalent states.
1622   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1623     V1Size = LocationSize::afterPointer();
1624     V2Size = LocationSize::afterPointer();
1625   }
1626 
1627   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1628   // for recursive queries. For this reason, this limit is chosen to be large
1629   // enough to be very rarely hit, while still being small enough to avoid
1630   // stack overflows.
1631   if (AAQI.Depth >= 512)
1632     return AliasResult::MayAlias;
1633 
1634   // Check the cache before climbing up use-def chains. This also terminates
1635   // otherwise infinitely recursive queries.
1636   AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1637   const bool Swapped = V1 > V2;
1638   if (Swapped)
1639     std::swap(Locs.first, Locs.second);
1640   const auto &Pair = AAQI.AliasCache.try_emplace(
1641       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1642   if (!Pair.second) {
1643     auto &Entry = Pair.first->second;
1644     if (!Entry.isDefinitive()) {
1645       // Remember that we used an assumption.
1646       ++Entry.NumAssumptionUses;
1647       ++AAQI.NumAssumptionUses;
1648     }
1649     // Cache contains sorted {V1,V2} pairs but we should return original order.
1650     auto Result = Entry.Result;
1651     Result.swap(Swapped);
1652     return Result;
1653   }
1654 
1655   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1656   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1657   AliasResult Result =
1658       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1659 
1660   auto It = AAQI.AliasCache.find(Locs);
1661   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1662   auto &Entry = It->second;
1663 
1664   // Check whether a NoAlias assumption has been used, but disproven.
1665   bool AssumptionDisproven =
1666       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1667   if (AssumptionDisproven)
1668     Result = AliasResult::MayAlias;
1669 
1670   // This is a definitive result now, when considered as a root query.
1671   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1672   Entry.Result = Result;
1673   // Cache contains sorted {V1,V2} pairs.
1674   Entry.Result.swap(Swapped);
1675   Entry.NumAssumptionUses = -1;
1676 
1677   // If the assumption has been disproven, remove any results that may have
1678   // been based on this assumption. Do this after the Entry updates above to
1679   // avoid iterator invalidation.
1680   if (AssumptionDisproven)
1681     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1682       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1683 
1684   // The result may still be based on assumptions higher up in the chain.
1685   // Remember it, so it can be purged from the cache later.
1686   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1687       Result != AliasResult::MayAlias)
1688     AAQI.AssumptionBasedResults.push_back(Locs);
1689   return Result;
1690 }
1691 
1692 AliasResult BasicAAResult::aliasCheckRecursive(
1693     const Value *V1, LocationSize V1Size,
1694     const Value *V2, LocationSize V2Size,
1695     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1696   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1697     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1698     if (Result != AliasResult::MayAlias)
1699       return Result;
1700   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1701     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1702     Result.swap();
1703     if (Result != AliasResult::MayAlias)
1704       return Result;
1705   }
1706 
1707   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1708     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1709     if (Result != AliasResult::MayAlias)
1710       return Result;
1711   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1712     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1713     Result.swap();
1714     if (Result != AliasResult::MayAlias)
1715       return Result;
1716   }
1717 
1718   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1719     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1720     if (Result != AliasResult::MayAlias)
1721       return Result;
1722   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1723     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1724     Result.swap();
1725     if (Result != AliasResult::MayAlias)
1726       return Result;
1727   }
1728 
1729   // If both pointers are pointing into the same object and one of them
1730   // accesses the entire object, then the accesses must overlap in some way.
1731   if (O1 == O2) {
1732     bool NullIsValidLocation = NullPointerIsDefined(&F);
1733     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1734         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1735          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1736       return AliasResult::PartialAlias;
1737   }
1738 
1739   return AliasResult::MayAlias;
1740 }
1741 
1742 /// Check whether two Values can be considered equivalent.
1743 ///
1744 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1745 /// they can not be part of a cycle in the value graph by looking at all
1746 /// visited phi nodes an making sure that the phis cannot reach the value. We
1747 /// have to do this because we are looking through phi nodes (That is we say
1748 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1749 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1750                                                   const Value *V2) {
1751   if (V != V2)
1752     return false;
1753 
1754   const Instruction *Inst = dyn_cast<Instruction>(V);
1755   if (!Inst)
1756     return true;
1757 
1758   if (VisitedPhiBBs.empty())
1759     return true;
1760 
1761   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1762     return false;
1763 
1764   // Make sure that the visited phis cannot reach the Value. This ensures that
1765   // the Values cannot come from different iterations of a potential cycle the
1766   // phi nodes could be involved in.
1767   for (const auto *P : VisitedPhiBBs)
1768     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1769       return false;
1770 
1771   return true;
1772 }
1773 
1774 /// Computes the symbolic difference between two de-composed GEPs.
1775 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1776                                            const DecomposedGEP &SrcGEP) {
1777   DestGEP.Offset -= SrcGEP.Offset;
1778   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1779     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1780     // than a few variable indexes.
1781     bool Found = false;
1782     for (auto I : enumerate(DestGEP.VarIndices)) {
1783       VariableGEPIndex &Dest = I.value();
1784       if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1785           !Dest.Val.hasSameCastsAs(Src.Val))
1786         continue;
1787 
1788       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1789       // goes to zero, remove the entry.
1790       if (Dest.Scale != Src.Scale) {
1791         Dest.Scale -= Src.Scale;
1792         Dest.IsNSW = false;
1793       } else {
1794         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1795       }
1796       Found = true;
1797       break;
1798     }
1799 
1800     // If we didn't consume this entry, add it to the end of the Dest list.
1801     if (!Found) {
1802       VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1803       DestGEP.VarIndices.push_back(Entry);
1804     }
1805   }
1806 }
1807 
1808 bool BasicAAResult::constantOffsetHeuristic(
1809     const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1810     LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1811   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1812       !MaybeV2Size.hasValue())
1813     return false;
1814 
1815   const uint64_t V1Size = MaybeV1Size.getValue();
1816   const uint64_t V2Size = MaybeV2Size.getValue();
1817 
1818   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1819 
1820   if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1821       Var0.Scale != -Var1.Scale ||
1822       Var0.Val.V->getType() != Var1.Val.V->getType())
1823     return false;
1824 
1825   // We'll strip off the Extensions of Var0 and Var1 and do another round
1826   // of GetLinearExpression decomposition. In the example above, if Var0
1827   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1828 
1829   LinearExpression E0 =
1830       GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1831   LinearExpression E1 =
1832       GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1833   if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1834       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1835     return false;
1836 
1837   // We have a hit - Var0 and Var1 only differ by a constant offset!
1838 
1839   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1840   // Var1 is possible to calculate, but we're just interested in the absolute
1841   // minimum difference between the two. The minimum distance may occur due to
1842   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1843   // the minimum distance between %i and %i + 5 is 3.
1844   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1845   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1846   APInt MinDiffBytes =
1847     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1848 
1849   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1850   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1851   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1852   // V2Size can fit in the MinDiffBytes gap.
1853   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1854          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1855 }
1856 
1857 //===----------------------------------------------------------------------===//
1858 // BasicAliasAnalysis Pass
1859 //===----------------------------------------------------------------------===//
1860 
1861 AnalysisKey BasicAA::Key;
1862 
1863 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1864   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1865   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1866   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1867   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1868   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1869 }
1870 
1871 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1872   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1873 }
1874 
1875 char BasicAAWrapperPass::ID = 0;
1876 
1877 void BasicAAWrapperPass::anchor() {}
1878 
1879 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1880                       "Basic Alias Analysis (stateless AA impl)", true, true)
1881 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1882 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1883 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1884 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1885 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1886                     "Basic Alias Analysis (stateless AA impl)", true, true)
1887 
1888 FunctionPass *llvm::createBasicAAWrapperPass() {
1889   return new BasicAAWrapperPass();
1890 }
1891 
1892 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1893   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1894   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1895   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1896   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1897 
1898   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1899                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1900                                  &DTWP.getDomTree(),
1901                                  PVWP ? &PVWP->getResult() : nullptr));
1902 
1903   return false;
1904 }
1905 
1906 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1907   AU.setPreservesAll();
1908   AU.addRequiredTransitive<AssumptionCacheTracker>();
1909   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1910   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1911   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1912 }
1913 
1914 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1915   return BasicAAResult(
1916       F.getParent()->getDataLayout(), F,
1917       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1918       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1919 }
1920