1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/PhiValues.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
69                                           cl::init(false));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b",
76                                         cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits",
78                                     cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84                               "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // GetUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
invalidate(Function & Fn,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99                                FunctionAnalysisManager::Invalidator &Inv) {
100   // We don't care if this analysis itself is preserved, it has no state. But
101   // we need to check that the analyses it depends on have been. Note that we
102   // may be created without handles to some analyses and in that case don't
103   // depend on them.
104   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106       (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
107       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
108     return true;
109 
110   // Otherwise this analysis result remains valid.
111   return false;
112 }
113 
114 //===----------------------------------------------------------------------===//
115 // Useful predicates
116 //===----------------------------------------------------------------------===//
117 
118 /// Returns true if the pointer is to a function-local object that never
119 /// escapes from the function.
isNonEscapingLocalObject(const Value * V)120 static bool isNonEscapingLocalObject(const Value *V) {
121   // If this is a local allocation, check to see if it escapes.
122   if (isa<AllocaInst>(V) || isNoAliasCall(V))
123     // Set StoreCaptures to True so that we can assume in our callers that the
124     // pointer is not the result of a load instruction. Currently
125     // PointerMayBeCaptured doesn't have any special analysis for the
126     // StoreCaptures=false case; if it did, our callers could be refined to be
127     // more precise.
128     return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
129 
130   // If this is an argument that corresponds to a byval or noalias argument,
131   // then it has not escaped before entering the function.  Check if it escapes
132   // inside the function.
133   if (const Argument *A = dyn_cast<Argument>(V))
134     if (A->hasByValAttr() || A->hasNoAliasAttr())
135       // Note even if the argument is marked nocapture, we still need to check
136       // for copies made inside the function. The nocapture attribute only
137       // specifies that there are no copies made that outlive the function.
138       return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
139 
140   return false;
141 }
142 
143 /// Returns true if the pointer is one which would have been considered an
144 /// escape by isNonEscapingLocalObject.
isEscapeSource(const Value * V)145 static bool isEscapeSource(const Value *V) {
146   if (isa<CallBase>(V))
147     return true;
148 
149   if (isa<Argument>(V))
150     return true;
151 
152   // The load case works because isNonEscapingLocalObject considers all
153   // stores to be escapes (it passes true for the StoreCaptures argument
154   // to PointerMayBeCaptured).
155   if (isa<LoadInst>(V))
156     return true;
157 
158   return false;
159 }
160 
161 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc,bool RoundToAlign=false)162 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
163                               const TargetLibraryInfo &TLI,
164                               bool NullIsValidLoc,
165                               bool RoundToAlign = false) {
166   uint64_t Size;
167   ObjectSizeOpts Opts;
168   Opts.RoundToAlign = RoundToAlign;
169   Opts.NullIsUnknownSize = NullIsValidLoc;
170   if (getObjectSize(V, Size, DL, &TLI, Opts))
171     return Size;
172   return MemoryLocation::UnknownSize;
173 }
174 
175 /// Returns true if we can prove that the object specified by V is smaller than
176 /// Size.
isObjectSmallerThan(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)177 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
178                                 const DataLayout &DL,
179                                 const TargetLibraryInfo &TLI,
180                                 bool NullIsValidLoc) {
181   // Note that the meanings of the "object" are slightly different in the
182   // following contexts:
183   //    c1: llvm::getObjectSize()
184   //    c2: llvm.objectsize() intrinsic
185   //    c3: isObjectSmallerThan()
186   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
187   // refers to the "entire object".
188   //
189   //  Consider this example:
190   //     char *p = (char*)malloc(100)
191   //     char *q = p+80;
192   //
193   //  In the context of c1 and c2, the "object" pointed by q refers to the
194   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
195   //
196   //  However, in the context of c3, the "object" refers to the chunk of memory
197   // being allocated. So, the "object" has 100 bytes, and q points to the middle
198   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
199   // parameter, before the llvm::getObjectSize() is called to get the size of
200   // entire object, we should:
201   //    - either rewind the pointer q to the base-address of the object in
202   //      question (in this case rewind to p), or
203   //    - just give up. It is up to caller to make sure the pointer is pointing
204   //      to the base address the object.
205   //
206   // We go for 2nd option for simplicity.
207   if (!isIdentifiedObject(V))
208     return false;
209 
210   // This function needs to use the aligned object size because we allow
211   // reads a bit past the end given sufficient alignment.
212   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
213                                       /*RoundToAlign*/ true);
214 
215   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
216 }
217 
218 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)219 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
220                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
221   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
222   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
223 }
224 
225 //===----------------------------------------------------------------------===//
226 // GetElementPtr Instruction Decomposition and Analysis
227 //===----------------------------------------------------------------------===//
228 
229 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
230 /// B are constant integers.
231 ///
232 /// Returns the scale and offset values as APInts and return V as a Value*, and
233 /// return whether we looked through any sign or zero extends.  The incoming
234 /// Value is known to have IntegerType, and it may already be sign or zero
235 /// extended.
236 ///
237 /// Note that this looks through extends, so the high bits may not be
238 /// represented in the result.
GetLinearExpression(const Value * V,APInt & Scale,APInt & Offset,unsigned & ZExtBits,unsigned & SExtBits,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT,bool & NSW,bool & NUW)239 /*static*/ const Value *BasicAAResult::GetLinearExpression(
240     const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
241     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
242     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
243   assert(V->getType()->isIntegerTy() && "Not an integer value");
244 
245   // Limit our recursion depth.
246   if (Depth == 6) {
247     Scale = 1;
248     Offset = 0;
249     return V;
250   }
251 
252   if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
253     // If it's a constant, just convert it to an offset and remove the variable.
254     // If we've been called recursively, the Offset bit width will be greater
255     // than the constant's (the Offset's always as wide as the outermost call),
256     // so we'll zext here and process any extension in the isa<SExtInst> &
257     // isa<ZExtInst> cases below.
258     Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
259     assert(Scale == 0 && "Constant values don't have a scale");
260     return V;
261   }
262 
263   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
264     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
265       // If we've been called recursively, then Offset and Scale will be wider
266       // than the BOp operands. We'll always zext it here as we'll process sign
267       // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
268       APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
269 
270       switch (BOp->getOpcode()) {
271       default:
272         // We don't understand this instruction, so we can't decompose it any
273         // further.
274         Scale = 1;
275         Offset = 0;
276         return V;
277       case Instruction::Or:
278         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
279         // analyze it.
280         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
281                                BOp, DT)) {
282           Scale = 1;
283           Offset = 0;
284           return V;
285         }
286         LLVM_FALLTHROUGH;
287       case Instruction::Add:
288         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
289                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
290         Offset += RHS;
291         break;
292       case Instruction::Sub:
293         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
294                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
295         Offset -= RHS;
296         break;
297       case Instruction::Mul:
298         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
299                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
300         Offset *= RHS;
301         Scale *= RHS;
302         break;
303       case Instruction::Shl:
304         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
305                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
306 
307         // We're trying to linearize an expression of the kind:
308         //   shl i8 -128, 36
309         // where the shift count exceeds the bitwidth of the type.
310         // We can't decompose this further (the expression would return
311         // a poison value).
312         if (Offset.getBitWidth() < RHS.getLimitedValue() ||
313             Scale.getBitWidth() < RHS.getLimitedValue()) {
314           Scale = 1;
315           Offset = 0;
316           return V;
317         }
318 
319         Offset <<= RHS.getLimitedValue();
320         Scale <<= RHS.getLimitedValue();
321         // the semantics of nsw and nuw for left shifts don't match those of
322         // multiplications, so we won't propagate them.
323         NSW = NUW = false;
324         return V;
325       }
326 
327       if (isa<OverflowingBinaryOperator>(BOp)) {
328         NUW &= BOp->hasNoUnsignedWrap();
329         NSW &= BOp->hasNoSignedWrap();
330       }
331       return V;
332     }
333   }
334 
335   // Since GEP indices are sign extended anyway, we don't care about the high
336   // bits of a sign or zero extended value - just scales and offsets.  The
337   // extensions have to be consistent though.
338   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
339     Value *CastOp = cast<CastInst>(V)->getOperand(0);
340     unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
341     unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
342     unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
343     const Value *Result =
344         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
345                             Depth + 1, AC, DT, NSW, NUW);
346 
347     // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
348     // by just incrementing the number of bits we've extended by.
349     unsigned ExtendedBy = NewWidth - SmallWidth;
350 
351     if (isa<SExtInst>(V) && ZExtBits == 0) {
352       // sext(sext(%x, a), b) == sext(%x, a + b)
353 
354       if (NSW) {
355         // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
356         // into sext(%x) + sext(c). We'll sext the Offset ourselves:
357         unsigned OldWidth = Offset.getBitWidth();
358         Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
359       } else {
360         // We may have signed-wrapped, so don't decompose sext(%x + c) into
361         // sext(%x) + sext(c)
362         Scale = 1;
363         Offset = 0;
364         Result = CastOp;
365         ZExtBits = OldZExtBits;
366         SExtBits = OldSExtBits;
367       }
368       SExtBits += ExtendedBy;
369     } else {
370       // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
371 
372       if (!NUW) {
373         // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
374         // zext(%x) + zext(c)
375         Scale = 1;
376         Offset = 0;
377         Result = CastOp;
378         ZExtBits = OldZExtBits;
379         SExtBits = OldSExtBits;
380       }
381       ZExtBits += ExtendedBy;
382     }
383 
384     return Result;
385   }
386 
387   Scale = 1;
388   Offset = 0;
389   return V;
390 }
391 
392 /// To ensure a pointer offset fits in an integer of size PointerSize
393 /// (in bits) when that size is smaller than the maximum pointer size. This is
394 /// an issue, for example, in particular for 32b pointers with negative indices
395 /// that rely on two's complement wrap-arounds for precise alias information
396 /// where the maximum pointer size is 64b.
adjustToPointerSize(APInt Offset,unsigned PointerSize)397 static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) {
398   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
399   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
400   return (Offset << ShiftBits).ashr(ShiftBits);
401 }
402 
getMaxPointerSize(const DataLayout & DL)403 static unsigned getMaxPointerSize(const DataLayout &DL) {
404   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
405   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
406   if (DoubleCalcBits) MaxPointerSize *= 2;
407 
408   return MaxPointerSize;
409 }
410 
411 /// If V is a symbolic pointer expression, decompose it into a base pointer
412 /// with a constant offset and a number of scaled symbolic offsets.
413 ///
414 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
415 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
416 /// specified amount, but which may have other unrepresented high bits. As
417 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
418 ///
419 /// When DataLayout is around, this function is capable of analyzing everything
420 /// that GetUnderlyingObject can look through. To be able to do that
421 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
422 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
423 /// through pointer casts.
DecomposeGEPExpression(const Value * V,DecomposedGEP & Decomposed,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)424 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
425        DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
426        DominatorTree *DT) {
427   // Limit recursion depth to limit compile time in crazy cases.
428   unsigned MaxLookup = MaxLookupSearchDepth;
429   SearchTimes++;
430 
431   unsigned MaxPointerSize = getMaxPointerSize(DL);
432   Decomposed.VarIndices.clear();
433   do {
434     // See if this is a bitcast or GEP.
435     const Operator *Op = dyn_cast<Operator>(V);
436     if (!Op) {
437       // The only non-operator case we can handle are GlobalAliases.
438       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
439         if (!GA->isInterposable()) {
440           V = GA->getAliasee();
441           continue;
442         }
443       }
444       Decomposed.Base = V;
445       return false;
446     }
447 
448     if (Op->getOpcode() == Instruction::BitCast ||
449         Op->getOpcode() == Instruction::AddrSpaceCast) {
450       V = Op->getOperand(0);
451       continue;
452     }
453 
454     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
455     if (!GEPOp) {
456       if (const auto *Call = dyn_cast<CallBase>(V)) {
457         // CaptureTracking can know about special capturing properties of some
458         // intrinsics like launder.invariant.group, that can't be expressed with
459         // the attributes, but have properties like returning aliasing pointer.
460         // Because some analysis may assume that nocaptured pointer is not
461         // returned from some special intrinsic (because function would have to
462         // be marked with returns attribute), it is crucial to use this function
463         // because it should be in sync with CaptureTracking. Not using it may
464         // cause weird miscompilations where 2 aliasing pointers are assumed to
465         // noalias.
466         if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
467           V = RP;
468           continue;
469         }
470       }
471 
472       // If it's not a GEP, hand it off to SimplifyInstruction to see if it
473       // can come up with something. This matches what GetUnderlyingObject does.
474       if (const Instruction *I = dyn_cast<Instruction>(V))
475         // TODO: Get a DominatorTree and AssumptionCache and use them here
476         // (these are both now available in this function, but this should be
477         // updated when GetUnderlyingObject is updated). TLI should be
478         // provided also.
479         if (const Value *Simplified =
480                 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
481           V = Simplified;
482           continue;
483         }
484 
485       Decomposed.Base = V;
486       return false;
487     }
488 
489     // Don't attempt to analyze GEPs over unsized objects.
490     if (!GEPOp->getSourceElementType()->isSized()) {
491       Decomposed.Base = V;
492       return false;
493     }
494 
495     unsigned AS = GEPOp->getPointerAddressSpace();
496     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
497     gep_type_iterator GTI = gep_type_begin(GEPOp);
498     unsigned PointerSize = DL.getPointerSizeInBits(AS);
499     // Assume all GEP operands are constants until proven otherwise.
500     bool GepHasConstantOffset = true;
501     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
502          I != E; ++I, ++GTI) {
503       const Value *Index = *I;
504       // Compute the (potentially symbolic) offset in bytes for this index.
505       if (StructType *STy = GTI.getStructTypeOrNull()) {
506         // For a struct, add the member offset.
507         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
508         if (FieldNo == 0)
509           continue;
510 
511         Decomposed.StructOffset +=
512           DL.getStructLayout(STy)->getElementOffset(FieldNo);
513         continue;
514       }
515 
516       // For an array/pointer, add the element offset, explicitly scaled.
517       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
518         if (CIdx->isZero())
519           continue;
520         Decomposed.OtherOffset +=
521           (DL.getTypeAllocSize(GTI.getIndexedType()) *
522             CIdx->getValue().sextOrSelf(MaxPointerSize))
523               .sextOrTrunc(MaxPointerSize);
524         continue;
525       }
526 
527       GepHasConstantOffset = false;
528 
529       APInt Scale(MaxPointerSize, DL.getTypeAllocSize(GTI.getIndexedType()));
530       unsigned ZExtBits = 0, SExtBits = 0;
531 
532       // If the integer type is smaller than the pointer size, it is implicitly
533       // sign extended to pointer size.
534       unsigned Width = Index->getType()->getIntegerBitWidth();
535       if (PointerSize > Width)
536         SExtBits += PointerSize - Width;
537 
538       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
539       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
540       bool NSW = true, NUW = true;
541       const Value *OrigIndex = Index;
542       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
543                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
544 
545       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
546       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
547 
548       // It can be the case that, even through C1*V+C2 does not overflow for
549       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
550       // decompose the expression in this way.
551       //
552       // FIXME: C1*Scale and the other operations in the decomposed
553       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
554       // possibility.
555       APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
556                                  Scale.sext(MaxPointerSize*2);
557       if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
558         Index = OrigIndex;
559         IndexScale = 1;
560         IndexOffset = 0;
561 
562         ZExtBits = SExtBits = 0;
563         if (PointerSize > Width)
564           SExtBits += PointerSize - Width;
565       } else {
566         Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
567         Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
568       }
569 
570       // If we already had an occurrence of this index variable, merge this
571       // scale into it.  For example, we want to handle:
572       //   A[x][x] -> x*16 + x*4 -> x*20
573       // This also ensures that 'x' only appears in the index list once.
574       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
575         if (Decomposed.VarIndices[i].V == Index &&
576             Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
577             Decomposed.VarIndices[i].SExtBits == SExtBits) {
578           Scale += Decomposed.VarIndices[i].Scale;
579           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
580           break;
581         }
582       }
583 
584       // Make sure that we have a scale that makes sense for this target's
585       // pointer size.
586       Scale = adjustToPointerSize(Scale, PointerSize);
587 
588       if (!!Scale) {
589         VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
590         Decomposed.VarIndices.push_back(Entry);
591       }
592     }
593 
594     // Take care of wrap-arounds
595     if (GepHasConstantOffset) {
596       Decomposed.StructOffset =
597           adjustToPointerSize(Decomposed.StructOffset, PointerSize);
598       Decomposed.OtherOffset =
599           adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
600     }
601 
602     // Analyze the base pointer next.
603     V = GEPOp->getOperand(0);
604   } while (--MaxLookup);
605 
606   // If the chain of expressions is too deep, just return early.
607   Decomposed.Base = V;
608   SearchLimitReached++;
609   return true;
610 }
611 
612 /// Returns whether the given pointer value points to memory that is local to
613 /// the function, with global constants being considered local to all
614 /// functions.
pointsToConstantMemory(const MemoryLocation & Loc,bool OrLocal)615 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
616                                            bool OrLocal) {
617   assert(Visited.empty() && "Visited must be cleared after use!");
618 
619   unsigned MaxLookup = 8;
620   SmallVector<const Value *, 16> Worklist;
621   Worklist.push_back(Loc.Ptr);
622   do {
623     const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
624     if (!Visited.insert(V).second) {
625       Visited.clear();
626       return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
627     }
628 
629     // An alloca instruction defines local memory.
630     if (OrLocal && isa<AllocaInst>(V))
631       continue;
632 
633     // A global constant counts as local memory for our purposes.
634     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
635       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
636       // global to be marked constant in some modules and non-constant in
637       // others.  GV may even be a declaration, not a definition.
638       if (!GV->isConstant()) {
639         Visited.clear();
640         return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
641       }
642       continue;
643     }
644 
645     // If both select values point to local memory, then so does the select.
646     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
647       Worklist.push_back(SI->getTrueValue());
648       Worklist.push_back(SI->getFalseValue());
649       continue;
650     }
651 
652     // If all values incoming to a phi node point to local memory, then so does
653     // the phi.
654     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
655       // Don't bother inspecting phi nodes with many operands.
656       if (PN->getNumIncomingValues() > MaxLookup) {
657         Visited.clear();
658         return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
659       }
660       for (Value *IncValue : PN->incoming_values())
661         Worklist.push_back(IncValue);
662       continue;
663     }
664 
665     // Otherwise be conservative.
666     Visited.clear();
667     return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
668   } while (!Worklist.empty() && --MaxLookup);
669 
670   Visited.clear();
671   return Worklist.empty();
672 }
673 
674 /// Returns the behavior when calling the given call site.
getModRefBehavior(const CallBase * Call)675 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
676   if (Call->doesNotAccessMemory())
677     // Can't do better than this.
678     return FMRB_DoesNotAccessMemory;
679 
680   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
681 
682   // If the callsite knows it only reads memory, don't return worse
683   // than that.
684   if (Call->onlyReadsMemory())
685     Min = FMRB_OnlyReadsMemory;
686   else if (Call->doesNotReadMemory())
687     Min = FMRB_DoesNotReadMemory;
688 
689   if (Call->onlyAccessesArgMemory())
690     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
691   else if (Call->onlyAccessesInaccessibleMemory())
692     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
693   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
694     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
695 
696   // If the call has operand bundles then aliasing attributes from the function
697   // it calls do not directly apply to the call.  This can be made more precise
698   // in the future.
699   if (!Call->hasOperandBundles())
700     if (const Function *F = Call->getCalledFunction())
701       Min =
702           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
703 
704   return Min;
705 }
706 
707 /// Returns the behavior when calling the given function. For use when the call
708 /// site is not known.
getModRefBehavior(const Function * F)709 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
710   // If the function declares it doesn't access memory, we can't do better.
711   if (F->doesNotAccessMemory())
712     return FMRB_DoesNotAccessMemory;
713 
714   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
715 
716   // If the function declares it only reads memory, go with that.
717   if (F->onlyReadsMemory())
718     Min = FMRB_OnlyReadsMemory;
719   else if (F->doesNotReadMemory())
720     Min = FMRB_DoesNotReadMemory;
721 
722   if (F->onlyAccessesArgMemory())
723     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
724   else if (F->onlyAccessesInaccessibleMemory())
725     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
726   else if (F->onlyAccessesInaccessibleMemOrArgMem())
727     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
728 
729   return Min;
730 }
731 
732 /// Returns true if this is a writeonly (i.e Mod only) parameter.
isWriteOnlyParam(const CallBase * Call,unsigned ArgIdx,const TargetLibraryInfo & TLI)733 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
734                              const TargetLibraryInfo &TLI) {
735   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
736     return true;
737 
738   // We can bound the aliasing properties of memset_pattern16 just as we can
739   // for memcpy/memset.  This is particularly important because the
740   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
741   // whenever possible.
742   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
743   // attributes.
744   LibFunc F;
745   if (Call->getCalledFunction() &&
746       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
747       F == LibFunc_memset_pattern16 && TLI.has(F))
748     if (ArgIdx == 0)
749       return true;
750 
751   // TODO: memset_pattern4, memset_pattern8
752   // TODO: _chk variants
753   // TODO: strcmp, strcpy
754 
755   return false;
756 }
757 
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)758 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
759                                            unsigned ArgIdx) {
760   // Checking for known builtin intrinsics and target library functions.
761   if (isWriteOnlyParam(Call, ArgIdx, TLI))
762     return ModRefInfo::Mod;
763 
764   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
765     return ModRefInfo::Ref;
766 
767   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
768     return ModRefInfo::NoModRef;
769 
770   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
771 }
772 
isIntrinsicCall(const CallBase * Call,Intrinsic::ID IID)773 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
774   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
775   return II && II->getIntrinsicID() == IID;
776 }
777 
778 #ifndef NDEBUG
getParent(const Value * V)779 static const Function *getParent(const Value *V) {
780   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
781     if (!inst->getParent())
782       return nullptr;
783     return inst->getParent()->getParent();
784   }
785 
786   if (const Argument *arg = dyn_cast<Argument>(V))
787     return arg->getParent();
788 
789   return nullptr;
790 }
791 
notDifferentParent(const Value * O1,const Value * O2)792 static bool notDifferentParent(const Value *O1, const Value *O2) {
793 
794   const Function *F1 = getParent(O1);
795   const Function *F2 = getParent(O2);
796 
797   return !F1 || !F2 || F1 == F2;
798 }
799 #endif
800 
alias(const MemoryLocation & LocA,const MemoryLocation & LocB)801 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
802                                  const MemoryLocation &LocB) {
803   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
804          "BasicAliasAnalysis doesn't support interprocedural queries.");
805 
806   // If we have a directly cached entry for these locations, we have recursed
807   // through this once, so just return the cached results. Notably, when this
808   // happens, we don't clear the cache.
809   auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
810   if (CacheIt != AliasCache.end())
811     return CacheIt->second;
812 
813   AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
814                                  LocB.Size, LocB.AATags);
815   // AliasCache rarely has more than 1 or 2 elements, always use
816   // shrink_and_clear so it quickly returns to the inline capacity of the
817   // SmallDenseMap if it ever grows larger.
818   // FIXME: This should really be shrink_to_inline_capacity_and_clear().
819   AliasCache.shrink_and_clear();
820   VisitedPhiBBs.clear();
821   return Alias;
822 }
823 
824 /// Checks to see if the specified callsite can clobber the specified memory
825 /// object.
826 ///
827 /// Since we only look at local properties of this function, we really can't
828 /// say much about this query.  We do, however, use simple "address taken"
829 /// analysis on local objects.
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc)830 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
831                                         const MemoryLocation &Loc) {
832   assert(notDifferentParent(Call, Loc.Ptr) &&
833          "AliasAnalysis query involving multiple functions!");
834 
835   const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
836 
837   // Calls marked 'tail' cannot read or write allocas from the current frame
838   // because the current frame might be destroyed by the time they run. However,
839   // a tail call may use an alloca with byval. Calling with byval copies the
840   // contents of the alloca into argument registers or stack slots, so there is
841   // no lifetime issue.
842   if (isa<AllocaInst>(Object))
843     if (const CallInst *CI = dyn_cast<CallInst>(Call))
844       if (CI->isTailCall() &&
845           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
846         return ModRefInfo::NoModRef;
847 
848   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
849   // modify them even though the alloca is not escaped.
850   if (auto *AI = dyn_cast<AllocaInst>(Object))
851     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
852       return ModRefInfo::Mod;
853 
854   // If the pointer is to a locally allocated object that does not escape,
855   // then the call can not mod/ref the pointer unless the call takes the pointer
856   // as an argument, and itself doesn't capture it.
857   if (!isa<Constant>(Object) && Call != Object &&
858       isNonEscapingLocalObject(Object)) {
859 
860     // Optimistically assume that call doesn't touch Object and check this
861     // assumption in the following loop.
862     ModRefInfo Result = ModRefInfo::NoModRef;
863     bool IsMustAlias = true;
864 
865     unsigned OperandNo = 0;
866     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
867          CI != CE; ++CI, ++OperandNo) {
868       // Only look at the no-capture or byval pointer arguments.  If this
869       // pointer were passed to arguments that were neither of these, then it
870       // couldn't be no-capture.
871       if (!(*CI)->getType()->isPointerTy() ||
872           (!Call->doesNotCapture(OperandNo) &&
873            OperandNo < Call->getNumArgOperands() &&
874            !Call->isByValArgument(OperandNo)))
875         continue;
876 
877       // Call doesn't access memory through this operand, so we don't care
878       // if it aliases with Object.
879       if (Call->doesNotAccessMemory(OperandNo))
880         continue;
881 
882       // If this is a no-capture pointer argument, see if we can tell that it
883       // is impossible to alias the pointer we're checking.
884       AliasResult AR =
885           getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
886       if (AR != MustAlias)
887         IsMustAlias = false;
888       // Operand doesnt alias 'Object', continue looking for other aliases
889       if (AR == NoAlias)
890         continue;
891       // Operand aliases 'Object', but call doesn't modify it. Strengthen
892       // initial assumption and keep looking in case if there are more aliases.
893       if (Call->onlyReadsMemory(OperandNo)) {
894         Result = setRef(Result);
895         continue;
896       }
897       // Operand aliases 'Object' but call only writes into it.
898       if (Call->doesNotReadMemory(OperandNo)) {
899         Result = setMod(Result);
900         continue;
901       }
902       // This operand aliases 'Object' and call reads and writes into it.
903       // Setting ModRef will not yield an early return below, MustAlias is not
904       // used further.
905       Result = ModRefInfo::ModRef;
906       break;
907     }
908 
909     // No operand aliases, reset Must bit. Add below if at least one aliases
910     // and all aliases found are MustAlias.
911     if (isNoModRef(Result))
912       IsMustAlias = false;
913 
914     // Early return if we improved mod ref information
915     if (!isModAndRefSet(Result)) {
916       if (isNoModRef(Result))
917         return ModRefInfo::NoModRef;
918       return IsMustAlias ? setMust(Result) : clearMust(Result);
919     }
920   }
921 
922   // If the call is to malloc or calloc, we can assume that it doesn't
923   // modify any IR visible value.  This is only valid because we assume these
924   // routines do not read values visible in the IR.  TODO: Consider special
925   // casing realloc and strdup routines which access only their arguments as
926   // well.  Or alternatively, replace all of this with inaccessiblememonly once
927   // that's implemented fully.
928   if (isMallocOrCallocLikeFn(Call, &TLI)) {
929     // Be conservative if the accessed pointer may alias the allocation -
930     // fallback to the generic handling below.
931     if (getBestAAResults().alias(MemoryLocation(Call), Loc) == NoAlias)
932       return ModRefInfo::NoModRef;
933   }
934 
935   // The semantics of memcpy intrinsics forbid overlap between their respective
936   // operands, i.e., source and destination of any given memcpy must no-alias.
937   // If Loc must-aliases either one of these two locations, then it necessarily
938   // no-aliases the other.
939   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
940     AliasResult SrcAA, DestAA;
941 
942     if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
943                                           Loc)) == MustAlias)
944       // Loc is exactly the memcpy source thus disjoint from memcpy dest.
945       return ModRefInfo::Ref;
946     if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
947                                            Loc)) == MustAlias)
948       // The converse case.
949       return ModRefInfo::Mod;
950 
951     // It's also possible for Loc to alias both src and dest, or neither.
952     ModRefInfo rv = ModRefInfo::NoModRef;
953     if (SrcAA != NoAlias)
954       rv = setRef(rv);
955     if (DestAA != NoAlias)
956       rv = setMod(rv);
957     return rv;
958   }
959 
960   // While the assume intrinsic is marked as arbitrarily writing so that
961   // proper control dependencies will be maintained, it never aliases any
962   // particular memory location.
963   if (isIntrinsicCall(Call, Intrinsic::assume))
964     return ModRefInfo::NoModRef;
965 
966   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
967   // that proper control dependencies are maintained but they never mods any
968   // particular memory location.
969   //
970   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
971   // heap state at the point the guard is issued needs to be consistent in case
972   // the guard invokes the "deopt" continuation.
973   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
974     return ModRefInfo::Ref;
975 
976   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
977   // writing so that proper control dependencies are maintained but they never
978   // mod any particular memory location visible to the IR.
979   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
980   // intrinsic is now modeled as reading memory. This prevents hoisting the
981   // invariant.start intrinsic over stores. Consider:
982   // *ptr = 40;
983   // *ptr = 50;
984   // invariant_start(ptr)
985   // int val = *ptr;
986   // print(val);
987   //
988   // This cannot be transformed to:
989   //
990   // *ptr = 40;
991   // invariant_start(ptr)
992   // *ptr = 50;
993   // int val = *ptr;
994   // print(val);
995   //
996   // The transformation will cause the second store to be ignored (based on
997   // rules of invariant.start)  and print 40, while the first program always
998   // prints 50.
999   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1000     return ModRefInfo::Ref;
1001 
1002   // The AAResultBase base class has some smarts, lets use them.
1003   return AAResultBase::getModRefInfo(Call, Loc);
1004 }
1005 
getModRefInfo(const CallBase * Call1,const CallBase * Call2)1006 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1007                                         const CallBase *Call2) {
1008   // While the assume intrinsic is marked as arbitrarily writing so that
1009   // proper control dependencies will be maintained, it never aliases any
1010   // particular memory location.
1011   if (isIntrinsicCall(Call1, Intrinsic::assume) ||
1012       isIntrinsicCall(Call2, Intrinsic::assume))
1013     return ModRefInfo::NoModRef;
1014 
1015   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1016   // that proper control dependencies are maintained but they never mod any
1017   // particular memory location.
1018   //
1019   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1020   // heap state at the point the guard is issued needs to be consistent in case
1021   // the guard invokes the "deopt" continuation.
1022 
1023   // NB! This function is *not* commutative, so we specical case two
1024   // possibilities for guard intrinsics.
1025 
1026   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1027     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1028                ? ModRefInfo::Ref
1029                : ModRefInfo::NoModRef;
1030 
1031   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1032     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1033                ? ModRefInfo::Mod
1034                : ModRefInfo::NoModRef;
1035 
1036   // The AAResultBase base class has some smarts, lets use them.
1037   return AAResultBase::getModRefInfo(Call1, Call2);
1038 }
1039 
1040 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1041 /// both having the exact same pointer operand.
aliasSameBasePointerGEPs(const GEPOperator * GEP1,LocationSize MaybeV1Size,const GEPOperator * GEP2,LocationSize MaybeV2Size,const DataLayout & DL)1042 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1043                                             LocationSize MaybeV1Size,
1044                                             const GEPOperator *GEP2,
1045                                             LocationSize MaybeV2Size,
1046                                             const DataLayout &DL) {
1047   assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1048              GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1049          GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1050          "Expected GEPs with the same pointer operand");
1051 
1052   // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1053   // such that the struct field accesses provably cannot alias.
1054   // We also need at least two indices (the pointer, and the struct field).
1055   if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1056       GEP1->getNumIndices() < 2)
1057     return MayAlias;
1058 
1059   // If we don't know the size of the accesses through both GEPs, we can't
1060   // determine whether the struct fields accessed can't alias.
1061   if (MaybeV1Size == LocationSize::unknown() ||
1062       MaybeV2Size == LocationSize::unknown())
1063     return MayAlias;
1064 
1065   const uint64_t V1Size = MaybeV1Size.getValue();
1066   const uint64_t V2Size = MaybeV2Size.getValue();
1067 
1068   ConstantInt *C1 =
1069       dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1070   ConstantInt *C2 =
1071       dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1072 
1073   // If the last (struct) indices are constants and are equal, the other indices
1074   // might be also be dynamically equal, so the GEPs can alias.
1075   if (C1 && C2) {
1076     unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1077     if (C1->getValue().sextOrSelf(BitWidth) ==
1078         C2->getValue().sextOrSelf(BitWidth))
1079       return MayAlias;
1080   }
1081 
1082   // Find the last-indexed type of the GEP, i.e., the type you'd get if
1083   // you stripped the last index.
1084   // On the way, look at each indexed type.  If there's something other
1085   // than an array, different indices can lead to different final types.
1086   SmallVector<Value *, 8> IntermediateIndices;
1087 
1088   // Insert the first index; we don't need to check the type indexed
1089   // through it as it only drops the pointer indirection.
1090   assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1091   IntermediateIndices.push_back(GEP1->getOperand(1));
1092 
1093   // Insert all the remaining indices but the last one.
1094   // Also, check that they all index through arrays.
1095   for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1096     if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1097             GEP1->getSourceElementType(), IntermediateIndices)))
1098       return MayAlias;
1099     IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1100   }
1101 
1102   auto *Ty = GetElementPtrInst::getIndexedType(
1103     GEP1->getSourceElementType(), IntermediateIndices);
1104   StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1105 
1106   if (isa<SequentialType>(Ty)) {
1107     // We know that:
1108     // - both GEPs begin indexing from the exact same pointer;
1109     // - the last indices in both GEPs are constants, indexing into a sequential
1110     //   type (array or pointer);
1111     // - both GEPs only index through arrays prior to that.
1112     //
1113     // Because array indices greater than the number of elements are valid in
1114     // GEPs, unless we know the intermediate indices are identical between
1115     // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1116     // partially overlap. We also need to check that the loaded size matches
1117     // the element size, otherwise we could still have overlap.
1118     const uint64_t ElementSize =
1119         DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1120     if (V1Size != ElementSize || V2Size != ElementSize)
1121       return MayAlias;
1122 
1123     for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1124       if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1125         return MayAlias;
1126 
1127     // Now we know that the array/pointer that GEP1 indexes into and that
1128     // that GEP2 indexes into must either precisely overlap or be disjoint.
1129     // Because they cannot partially overlap and because fields in an array
1130     // cannot overlap, if we can prove the final indices are different between
1131     // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1132 
1133     // If the last indices are constants, we've already checked they don't
1134     // equal each other so we can exit early.
1135     if (C1 && C2)
1136       return NoAlias;
1137     {
1138       Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1139       Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1140       if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1141         // If one of the indices is a PHI node, be safe and only use
1142         // computeKnownBits so we don't make any assumptions about the
1143         // relationships between the two indices. This is important if we're
1144         // asking about values from different loop iterations. See PR32314.
1145         // TODO: We may be able to change the check so we only do this when
1146         // we definitely looked through a PHINode.
1147         if (GEP1LastIdx != GEP2LastIdx &&
1148             GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1149           KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1150           KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1151           if (Known1.Zero.intersects(Known2.One) ||
1152               Known1.One.intersects(Known2.Zero))
1153             return NoAlias;
1154         }
1155       } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1156         return NoAlias;
1157     }
1158     return MayAlias;
1159   } else if (!LastIndexedStruct || !C1 || !C2) {
1160     return MayAlias;
1161   }
1162 
1163   if (C1->getValue().getActiveBits() > 64 ||
1164       C2->getValue().getActiveBits() > 64)
1165     return MayAlias;
1166 
1167   // We know that:
1168   // - both GEPs begin indexing from the exact same pointer;
1169   // - the last indices in both GEPs are constants, indexing into a struct;
1170   // - said indices are different, hence, the pointed-to fields are different;
1171   // - both GEPs only index through arrays prior to that.
1172   //
1173   // This lets us determine that the struct that GEP1 indexes into and the
1174   // struct that GEP2 indexes into must either precisely overlap or be
1175   // completely disjoint.  Because they cannot partially overlap, indexing into
1176   // different non-overlapping fields of the struct will never alias.
1177 
1178   // Therefore, the only remaining thing needed to show that both GEPs can't
1179   // alias is that the fields are not overlapping.
1180   const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1181   const uint64_t StructSize = SL->getSizeInBytes();
1182   const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1183   const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1184 
1185   auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1186                                       uint64_t V2Off, uint64_t V2Size) {
1187     return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1188            ((V2Off + V2Size <= StructSize) ||
1189             (V2Off + V2Size - StructSize <= V1Off));
1190   };
1191 
1192   if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1193       EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1194     return NoAlias;
1195 
1196   return MayAlias;
1197 }
1198 
1199 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1200 // beginning of the object the GEP points would have a negative offset with
1201 // repsect to the alloca, that means the GEP can not alias pointer (b).
1202 // Note that the pointer based on the alloca may not be a GEP. For
1203 // example, it may be the alloca itself.
1204 // The same applies if (b) is based on a GlobalVariable. Note that just being
1205 // based on isIdentifiedObject() is not enough - we need an identified object
1206 // that does not permit access to negative offsets. For example, a negative
1207 // offset from a noalias argument or call can be inbounds w.r.t the actual
1208 // underlying object.
1209 //
1210 // For example, consider:
1211 //
1212 //   struct { int f0, int f1, ...} foo;
1213 //   foo alloca;
1214 //   foo* random = bar(alloca);
1215 //   int *f0 = &alloca.f0
1216 //   int *f1 = &random->f1;
1217 //
1218 // Which is lowered, approximately, to:
1219 //
1220 //  %alloca = alloca %struct.foo
1221 //  %random = call %struct.foo* @random(%struct.foo* %alloca)
1222 //  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1223 //  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1224 //
1225 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1226 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1227 // point into the same object. But since %f0 points to the beginning of %alloca,
1228 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1229 // than (%alloca - 1), and so is not inbounds, a contradiction.
isGEPBaseAtNegativeOffset(const GEPOperator * GEPOp,const DecomposedGEP & DecompGEP,const DecomposedGEP & DecompObject,LocationSize MaybeObjectAccessSize)1230 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1231       const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1232       LocationSize MaybeObjectAccessSize) {
1233   // If the object access size is unknown, or the GEP isn't inbounds, bail.
1234   if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1235     return false;
1236 
1237   const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1238 
1239   // We need the object to be an alloca or a globalvariable, and want to know
1240   // the offset of the pointer from the object precisely, so no variable
1241   // indices are allowed.
1242   if (!(isa<AllocaInst>(DecompObject.Base) ||
1243         isa<GlobalVariable>(DecompObject.Base)) ||
1244       !DecompObject.VarIndices.empty())
1245     return false;
1246 
1247   APInt ObjectBaseOffset = DecompObject.StructOffset +
1248                            DecompObject.OtherOffset;
1249 
1250   // If the GEP has no variable indices, we know the precise offset
1251   // from the base, then use it. If the GEP has variable indices,
1252   // we can't get exact GEP offset to identify pointer alias. So return
1253   // false in that case.
1254   if (!DecompGEP.VarIndices.empty())
1255     return false;
1256 
1257   APInt GEPBaseOffset = DecompGEP.StructOffset;
1258   GEPBaseOffset += DecompGEP.OtherOffset;
1259 
1260   return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
1261 }
1262 
1263 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1264 /// another pointer.
1265 ///
1266 /// We know that V1 is a GEP, but we don't know anything about V2.
1267 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1268 /// V2.
1269 AliasResult
aliasGEP(const GEPOperator * GEP1,LocationSize V1Size,const AAMDNodes & V1AAInfo,const Value * V2,LocationSize V2Size,const AAMDNodes & V2AAInfo,const Value * UnderlyingV1,const Value * UnderlyingV2)1270 BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
1271                         const AAMDNodes &V1AAInfo, const Value *V2,
1272                         LocationSize V2Size, const AAMDNodes &V2AAInfo,
1273                         const Value *UnderlyingV1, const Value *UnderlyingV2) {
1274   DecomposedGEP DecompGEP1, DecompGEP2;
1275   unsigned MaxPointerSize = getMaxPointerSize(DL);
1276   DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
1277   DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
1278 
1279   bool GEP1MaxLookupReached =
1280     DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1281   bool GEP2MaxLookupReached =
1282     DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1283 
1284   APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1285   APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1286 
1287   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1288          "DecomposeGEPExpression returned a result different from "
1289          "GetUnderlyingObject");
1290 
1291   // If the GEP's offset relative to its base is such that the base would
1292   // fall below the start of the object underlying V2, then the GEP and V2
1293   // cannot alias.
1294   if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1295       isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1296     return NoAlias;
1297   // If we have two gep instructions with must-alias or not-alias'ing base
1298   // pointers, figure out if the indexes to the GEP tell us anything about the
1299   // derived pointer.
1300   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1301     // Check for the GEP base being at a negative offset, this time in the other
1302     // direction.
1303     if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1304         isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1305       return NoAlias;
1306     // Do the base pointers alias?
1307     AliasResult BaseAlias =
1308         aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1309                    UnderlyingV2, LocationSize::unknown(), AAMDNodes());
1310 
1311     // Check for geps of non-aliasing underlying pointers where the offsets are
1312     // identical.
1313     if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1314       // Do the base pointers alias assuming type and size.
1315       AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1316                                                 UnderlyingV2, V2Size, V2AAInfo);
1317       if (PreciseBaseAlias == NoAlias) {
1318         // See if the computed offset from the common pointer tells us about the
1319         // relation of the resulting pointer.
1320         // If the max search depth is reached the result is undefined
1321         if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1322           return MayAlias;
1323 
1324         // Same offsets.
1325         if (GEP1BaseOffset == GEP2BaseOffset &&
1326             DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1327           return NoAlias;
1328       }
1329     }
1330 
1331     // If we get a No or May, then return it immediately, no amount of analysis
1332     // will improve this situation.
1333     if (BaseAlias != MustAlias) {
1334       assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1335       return BaseAlias;
1336     }
1337 
1338     // Otherwise, we have a MustAlias.  Since the base pointers alias each other
1339     // exactly, see if the computed offset from the common pointer tells us
1340     // about the relation of the resulting pointer.
1341     // If we know the two GEPs are based off of the exact same pointer (and not
1342     // just the same underlying object), see if that tells us anything about
1343     // the resulting pointers.
1344     if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1345             GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1346         GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1347       AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1348       // If we couldn't find anything interesting, don't abandon just yet.
1349       if (R != MayAlias)
1350         return R;
1351     }
1352 
1353     // If the max search depth is reached, the result is undefined
1354     if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1355       return MayAlias;
1356 
1357     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1358     // symbolic difference.
1359     GEP1BaseOffset -= GEP2BaseOffset;
1360     GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1361 
1362   } else {
1363     // Check to see if these two pointers are related by the getelementptr
1364     // instruction.  If one pointer is a GEP with a non-zero index of the other
1365     // pointer, we know they cannot alias.
1366 
1367     // If both accesses are unknown size, we can't do anything useful here.
1368     if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1369       return MayAlias;
1370 
1371     AliasResult R =
1372         aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), V2,
1373                    LocationSize::unknown(), V2AAInfo, nullptr, UnderlyingV2);
1374     if (R != MustAlias) {
1375       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1376       // If V2 is known not to alias GEP base pointer, then the two values
1377       // cannot alias per GEP semantics: "Any memory access must be done through
1378       // a pointer value associated with an address range of the memory access,
1379       // otherwise the behavior is undefined.".
1380       assert(R == NoAlias || R == MayAlias);
1381       return R;
1382     }
1383 
1384     // If the max search depth is reached the result is undefined
1385     if (GEP1MaxLookupReached)
1386       return MayAlias;
1387   }
1388 
1389   // In the two GEP Case, if there is no difference in the offsets of the
1390   // computed pointers, the resultant pointers are a must alias.  This
1391   // happens when we have two lexically identical GEP's (for example).
1392   //
1393   // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1394   // must aliases the GEP, the end result is a must alias also.
1395   if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1396     return MustAlias;
1397 
1398   // If there is a constant difference between the pointers, but the difference
1399   // is less than the size of the associated memory object, then we know
1400   // that the objects are partially overlapping.  If the difference is
1401   // greater, we know they do not overlap.
1402   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1403     if (GEP1BaseOffset.sge(0)) {
1404       if (V2Size != LocationSize::unknown()) {
1405         if (GEP1BaseOffset.ult(V2Size.getValue()))
1406           return PartialAlias;
1407         return NoAlias;
1408       }
1409     } else {
1410       // We have the situation where:
1411       // +                +
1412       // | BaseOffset     |
1413       // ---------------->|
1414       // |-->V1Size       |-------> V2Size
1415       // GEP1             V2
1416       // We need to know that V2Size is not unknown, otherwise we might have
1417       // stripped a gep with negative index ('gep <ptr>, -1, ...).
1418       if (V1Size != LocationSize::unknown() &&
1419           V2Size != LocationSize::unknown()) {
1420         if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1421           return PartialAlias;
1422         return NoAlias;
1423       }
1424     }
1425   }
1426 
1427   if (!DecompGEP1.VarIndices.empty()) {
1428     APInt Modulo(MaxPointerSize, 0);
1429     bool AllPositive = true;
1430     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1431 
1432       // Try to distinguish something like &A[i][1] against &A[42][0].
1433       // Grab the least significant bit set in any of the scales. We
1434       // don't need std::abs here (even if the scale's negative) as we'll
1435       // be ^'ing Modulo with itself later.
1436       Modulo |= DecompGEP1.VarIndices[i].Scale;
1437 
1438       if (AllPositive) {
1439         // If the Value could change between cycles, then any reasoning about
1440         // the Value this cycle may not hold in the next cycle. We'll just
1441         // give up if we can't determine conditions that hold for every cycle:
1442         const Value *V = DecompGEP1.VarIndices[i].V;
1443 
1444         KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1445         bool SignKnownZero = Known.isNonNegative();
1446         bool SignKnownOne = Known.isNegative();
1447 
1448         // Zero-extension widens the variable, and so forces the sign
1449         // bit to zero.
1450         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1451         SignKnownZero |= IsZExt;
1452         SignKnownOne &= !IsZExt;
1453 
1454         // If the variable begins with a zero then we know it's
1455         // positive, regardless of whether the value is signed or
1456         // unsigned.
1457         APInt Scale = DecompGEP1.VarIndices[i].Scale;
1458         AllPositive =
1459             (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
1460       }
1461     }
1462 
1463     Modulo = Modulo ^ (Modulo & (Modulo - 1));
1464 
1465     // We can compute the difference between the two addresses
1466     // mod Modulo. Check whether that difference guarantees that the
1467     // two locations do not alias.
1468     APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
1469     if (V1Size != LocationSize::unknown() &&
1470         V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1471         (Modulo - ModOffset).uge(V1Size.getValue()))
1472       return NoAlias;
1473 
1474     // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1475     // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1476     // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1477     if (AllPositive && GEP1BaseOffset.sgt(0) &&
1478         V2Size != LocationSize::unknown() &&
1479         GEP1BaseOffset.uge(V2Size.getValue()))
1480       return NoAlias;
1481 
1482     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1483                                 GEP1BaseOffset, &AC, DT))
1484       return NoAlias;
1485   }
1486 
1487   // Statically, we can see that the base objects are the same, but the
1488   // pointers have dynamic offsets which we can't resolve. And none of our
1489   // little tricks above worked.
1490   return MayAlias;
1491 }
1492 
MergeAliasResults(AliasResult A,AliasResult B)1493 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1494   // If the results agree, take it.
1495   if (A == B)
1496     return A;
1497   // A mix of PartialAlias and MustAlias is PartialAlias.
1498   if ((A == PartialAlias && B == MustAlias) ||
1499       (B == PartialAlias && A == MustAlias))
1500     return PartialAlias;
1501   // Otherwise, we don't know anything.
1502   return MayAlias;
1503 }
1504 
1505 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1506 /// against another.
aliasSelect(const SelectInst * SI,LocationSize SISize,const AAMDNodes & SIAAInfo,const Value * V2,LocationSize V2Size,const AAMDNodes & V2AAInfo,const Value * UnderV2)1507 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
1508                                        LocationSize SISize,
1509                                        const AAMDNodes &SIAAInfo,
1510                                        const Value *V2, LocationSize V2Size,
1511                                        const AAMDNodes &V2AAInfo,
1512                                        const Value *UnderV2) {
1513   // If the values are Selects with the same condition, we can do a more precise
1514   // check: just check for aliases between the values on corresponding arms.
1515   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1516     if (SI->getCondition() == SI2->getCondition()) {
1517       AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1518                                      SI2->getTrueValue(), V2Size, V2AAInfo);
1519       if (Alias == MayAlias)
1520         return MayAlias;
1521       AliasResult ThisAlias =
1522           aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1523                      SI2->getFalseValue(), V2Size, V2AAInfo);
1524       return MergeAliasResults(ThisAlias, Alias);
1525     }
1526 
1527   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1528   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1529   AliasResult Alias =
1530       aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1531                  SISize, SIAAInfo, UnderV2);
1532   if (Alias == MayAlias)
1533     return MayAlias;
1534 
1535   AliasResult ThisAlias =
1536       aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1537                  UnderV2);
1538   return MergeAliasResults(ThisAlias, Alias);
1539 }
1540 
1541 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1542 /// another.
aliasPHI(const PHINode * PN,LocationSize PNSize,const AAMDNodes & PNAAInfo,const Value * V2,LocationSize V2Size,const AAMDNodes & V2AAInfo,const Value * UnderV2)1543 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1544                                     const AAMDNodes &PNAAInfo, const Value *V2,
1545                                     LocationSize V2Size,
1546                                     const AAMDNodes &V2AAInfo,
1547                                     const Value *UnderV2) {
1548   // Track phi nodes we have visited. We use this information when we determine
1549   // value equivalence.
1550   VisitedPhiBBs.insert(PN->getParent());
1551 
1552   // If the values are PHIs in the same block, we can do a more precise
1553   // as well as efficient check: just check for aliases between the values
1554   // on corresponding edges.
1555   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1556     if (PN2->getParent() == PN->getParent()) {
1557       LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1558                    MemoryLocation(V2, V2Size, V2AAInfo));
1559       if (PN > V2)
1560         std::swap(Locs.first, Locs.second);
1561       // Analyse the PHIs' inputs under the assumption that the PHIs are
1562       // NoAlias.
1563       // If the PHIs are May/MustAlias there must be (recursively) an input
1564       // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1565       // there must be an operation on the PHIs within the PHIs' value cycle
1566       // that causes a MayAlias.
1567       // Pretend the phis do not alias.
1568       AliasResult Alias = NoAlias;
1569       assert(AliasCache.count(Locs) &&
1570              "There must exist an entry for the phi node");
1571       AliasResult OrigAliasResult = AliasCache[Locs];
1572       AliasCache[Locs] = NoAlias;
1573 
1574       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1575         AliasResult ThisAlias =
1576             aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1577                        PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1578                        V2Size, V2AAInfo);
1579         Alias = MergeAliasResults(ThisAlias, Alias);
1580         if (Alias == MayAlias)
1581           break;
1582       }
1583 
1584       // Reset if speculation failed.
1585       if (Alias != NoAlias)
1586         AliasCache[Locs] = OrigAliasResult;
1587 
1588       return Alias;
1589     }
1590 
1591   SmallVector<Value *, 4> V1Srcs;
1592   bool isRecursive = false;
1593   if (PV)  {
1594     // If we have PhiValues then use it to get the underlying phi values.
1595     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1596     // If we have more phi values than the search depth then return MayAlias
1597     // conservatively to avoid compile time explosion. The worst possible case
1598     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1599     // where 'm' and 'n' are the number of PHI sources.
1600     if (PhiValueSet.size() > MaxLookupSearchDepth)
1601       return MayAlias;
1602     // Add the values to V1Srcs
1603     for (Value *PV1 : PhiValueSet) {
1604       if (EnableRecPhiAnalysis) {
1605         if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1606           // Check whether the incoming value is a GEP that advances the pointer
1607           // result of this PHI node (e.g. in a loop). If this is the case, we
1608           // would recurse and always get a MayAlias. Handle this case specially
1609           // below.
1610           if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1611               isa<ConstantInt>(PV1GEP->idx_begin())) {
1612             isRecursive = true;
1613             continue;
1614           }
1615         }
1616       }
1617       V1Srcs.push_back(PV1);
1618     }
1619   } else {
1620     // If we don't have PhiInfo then just look at the operands of the phi itself
1621     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1622     SmallPtrSet<Value *, 4> UniqueSrc;
1623     for (Value *PV1 : PN->incoming_values()) {
1624       if (isa<PHINode>(PV1))
1625         // If any of the source itself is a PHI, return MayAlias conservatively
1626         // to avoid compile time explosion. The worst possible case is if both
1627         // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1628         // and 'n' are the number of PHI sources.
1629         return MayAlias;
1630 
1631       if (EnableRecPhiAnalysis)
1632         if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1633           // Check whether the incoming value is a GEP that advances the pointer
1634           // result of this PHI node (e.g. in a loop). If this is the case, we
1635           // would recurse and always get a MayAlias. Handle this case specially
1636           // below.
1637           if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1638               isa<ConstantInt>(PV1GEP->idx_begin())) {
1639             isRecursive = true;
1640             continue;
1641           }
1642         }
1643 
1644       if (UniqueSrc.insert(PV1).second)
1645         V1Srcs.push_back(PV1);
1646     }
1647   }
1648 
1649   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1650   // value. This should only be possible in blocks unreachable from the entry
1651   // block, but return MayAlias just in case.
1652   if (V1Srcs.empty())
1653     return MayAlias;
1654 
1655   // If this PHI node is recursive, set the size of the accessed memory to
1656   // unknown to represent all the possible values the GEP could advance the
1657   // pointer to.
1658   if (isRecursive)
1659     PNSize = LocationSize::unknown();
1660 
1661   AliasResult Alias =
1662       aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1663                  PNSize, PNAAInfo, UnderV2);
1664 
1665   // Early exit if the check of the first PHI source against V2 is MayAlias.
1666   // Other results are not possible.
1667   if (Alias == MayAlias)
1668     return MayAlias;
1669 
1670   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1671   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1672   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1673     Value *V = V1Srcs[i];
1674 
1675     AliasResult ThisAlias =
1676         aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1677     Alias = MergeAliasResults(ThisAlias, Alias);
1678     if (Alias == MayAlias)
1679       break;
1680   }
1681 
1682   return Alias;
1683 }
1684 
1685 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1686 /// array references.
aliasCheck(const Value * V1,LocationSize V1Size,AAMDNodes V1AAInfo,const Value * V2,LocationSize V2Size,AAMDNodes V2AAInfo,const Value * O1,const Value * O2)1687 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1688                                       AAMDNodes V1AAInfo, const Value *V2,
1689                                       LocationSize V2Size, AAMDNodes V2AAInfo,
1690                                       const Value *O1, const Value *O2) {
1691   // If either of the memory references is empty, it doesn't matter what the
1692   // pointer values are.
1693   if (V1Size.isZero() || V2Size.isZero())
1694     return NoAlias;
1695 
1696   // Strip off any casts if they exist.
1697   V1 = V1->stripPointerCastsAndInvariantGroups();
1698   V2 = V2->stripPointerCastsAndInvariantGroups();
1699 
1700   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1701   // value for undef that aliases nothing in the program.
1702   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1703     return NoAlias;
1704 
1705   // Are we checking for alias of the same value?
1706   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1707   // different iterations. We must therefore make sure that this is not the
1708   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1709   // happen by looking at the visited phi nodes and making sure they cannot
1710   // reach the value.
1711   if (isValueEqualInPotentialCycles(V1, V2))
1712     return MustAlias;
1713 
1714   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1715     return NoAlias; // Scalars cannot alias each other
1716 
1717   // Figure out what objects these things are pointing to if we can.
1718   if (O1 == nullptr)
1719     O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1720 
1721   if (O2 == nullptr)
1722     O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1723 
1724   // Null values in the default address space don't point to any object, so they
1725   // don't alias any other pointer.
1726   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1727     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1728       return NoAlias;
1729   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1730     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1731       return NoAlias;
1732 
1733   if (O1 != O2) {
1734     // If V1/V2 point to two different objects, we know that we have no alias.
1735     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1736       return NoAlias;
1737 
1738     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1739     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1740         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1741       return NoAlias;
1742 
1743     // Function arguments can't alias with things that are known to be
1744     // unambigously identified at the function level.
1745     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1746         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1747       return NoAlias;
1748 
1749     // If one pointer is the result of a call/invoke or load and the other is a
1750     // non-escaping local object within the same function, then we know the
1751     // object couldn't escape to a point where the call could return it.
1752     //
1753     // Note that if the pointers are in different functions, there are a
1754     // variety of complications. A call with a nocapture argument may still
1755     // temporary store the nocapture argument's value in a temporary memory
1756     // location if that memory location doesn't escape. Or it may pass a
1757     // nocapture value to other functions as long as they don't capture it.
1758     if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1759       return NoAlias;
1760     if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1761       return NoAlias;
1762   }
1763 
1764   // If the size of one access is larger than the entire object on the other
1765   // side, then we know such behavior is undefined and can assume no alias.
1766   bool NullIsValidLocation = NullPointerIsDefined(&F);
1767   if ((V1Size.isPrecise() && isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI,
1768                                                  NullIsValidLocation)) ||
1769       (V2Size.isPrecise() && isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI,
1770                                                  NullIsValidLocation)))
1771     return NoAlias;
1772 
1773   // Check the cache before climbing up use-def chains. This also terminates
1774   // otherwise infinitely recursive queries.
1775   LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1776                MemoryLocation(V2, V2Size, V2AAInfo));
1777   if (V1 > V2)
1778     std::swap(Locs.first, Locs.second);
1779   std::pair<AliasCacheTy::iterator, bool> Pair =
1780       AliasCache.insert(std::make_pair(Locs, MayAlias));
1781   if (!Pair.second)
1782     return Pair.first->second;
1783 
1784   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1785   // GEP can't simplify, we don't even look at the PHI cases.
1786   if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1787     std::swap(V1, V2);
1788     std::swap(V1Size, V2Size);
1789     std::swap(O1, O2);
1790     std::swap(V1AAInfo, V2AAInfo);
1791   }
1792   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1793     AliasResult Result =
1794         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1795     if (Result != MayAlias)
1796       return AliasCache[Locs] = Result;
1797   }
1798 
1799   if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1800     std::swap(V1, V2);
1801     std::swap(O1, O2);
1802     std::swap(V1Size, V2Size);
1803     std::swap(V1AAInfo, V2AAInfo);
1804   }
1805   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1806     AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1807                                   V2, V2Size, V2AAInfo, O2);
1808     if (Result != MayAlias)
1809       return AliasCache[Locs] = Result;
1810   }
1811 
1812   if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1813     std::swap(V1, V2);
1814     std::swap(O1, O2);
1815     std::swap(V1Size, V2Size);
1816     std::swap(V1AAInfo, V2AAInfo);
1817   }
1818   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1819     AliasResult Result =
1820         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1821     if (Result != MayAlias)
1822       return AliasCache[Locs] = Result;
1823   }
1824 
1825   // If both pointers are pointing into the same object and one of them
1826   // accesses the entire object, then the accesses must overlap in some way.
1827   if (O1 == O2)
1828     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1829         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1830          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1831       return AliasCache[Locs] = PartialAlias;
1832 
1833   // Recurse back into the best AA results we have, potentially with refined
1834   // memory locations. We have already ensured that BasicAA has a MayAlias
1835   // cache result for these, so any recursion back into BasicAA won't loop.
1836   AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1837   return AliasCache[Locs] = Result;
1838 }
1839 
1840 /// Check whether two Values can be considered equivalent.
1841 ///
1842 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1843 /// they can not be part of a cycle in the value graph by looking at all
1844 /// visited phi nodes an making sure that the phis cannot reach the value. We
1845 /// have to do this because we are looking through phi nodes (That is we say
1846 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2)1847 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1848                                                   const Value *V2) {
1849   if (V != V2)
1850     return false;
1851 
1852   const Instruction *Inst = dyn_cast<Instruction>(V);
1853   if (!Inst)
1854     return true;
1855 
1856   if (VisitedPhiBBs.empty())
1857     return true;
1858 
1859   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1860     return false;
1861 
1862   // Make sure that the visited phis cannot reach the Value. This ensures that
1863   // the Values cannot come from different iterations of a potential cycle the
1864   // phi nodes could be involved in.
1865   for (auto *P : VisitedPhiBBs)
1866     if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1867       return false;
1868 
1869   return true;
1870 }
1871 
1872 /// Computes the symbolic difference between two de-composed GEPs.
1873 ///
1874 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1875 /// instructions GEP1 and GEP2 which have common base pointers.
GetIndexDifference(SmallVectorImpl<VariableGEPIndex> & Dest,const SmallVectorImpl<VariableGEPIndex> & Src)1876 void BasicAAResult::GetIndexDifference(
1877     SmallVectorImpl<VariableGEPIndex> &Dest,
1878     const SmallVectorImpl<VariableGEPIndex> &Src) {
1879   if (Src.empty())
1880     return;
1881 
1882   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1883     const Value *V = Src[i].V;
1884     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1885     APInt Scale = Src[i].Scale;
1886 
1887     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1888     // than a few variable indexes.
1889     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1890       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1891           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1892         continue;
1893 
1894       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1895       // goes to zero, remove the entry.
1896       if (Dest[j].Scale != Scale)
1897         Dest[j].Scale -= Scale;
1898       else
1899         Dest.erase(Dest.begin() + j);
1900       Scale = 0;
1901       break;
1902     }
1903 
1904     // If we didn't consume this entry, add it to the end of the Dest list.
1905     if (!!Scale) {
1906       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1907       Dest.push_back(Entry);
1908     }
1909   }
1910 }
1911 
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> & VarIndices,LocationSize MaybeV1Size,LocationSize MaybeV2Size,APInt BaseOffset,AssumptionCache * AC,DominatorTree * DT)1912 bool BasicAAResult::constantOffsetHeuristic(
1913     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1914     LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset,
1915     AssumptionCache *AC, DominatorTree *DT) {
1916   if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1917       MaybeV2Size == LocationSize::unknown())
1918     return false;
1919 
1920   const uint64_t V1Size = MaybeV1Size.getValue();
1921   const uint64_t V2Size = MaybeV2Size.getValue();
1922 
1923   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1924 
1925   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1926       Var0.Scale != -Var1.Scale)
1927     return false;
1928 
1929   unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1930 
1931   // We'll strip off the Extensions of Var0 and Var1 and do another round
1932   // of GetLinearExpression decomposition. In the example above, if Var0
1933   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1934 
1935   APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1936       V1Offset(Width, 0);
1937   bool NSW = true, NUW = true;
1938   unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1939   const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1940                                         V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1941   NSW = true;
1942   NUW = true;
1943   const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1944                                         V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1945 
1946   if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1947       V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1948     return false;
1949 
1950   // We have a hit - Var0 and Var1 only differ by a constant offset!
1951 
1952   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1953   // Var1 is possible to calculate, but we're just interested in the absolute
1954   // minimum difference between the two. The minimum distance may occur due to
1955   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1956   // the minimum distance between %i and %i + 5 is 3.
1957   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1958   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1959   APInt MinDiffBytes =
1960     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1961 
1962   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1963   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1964   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1965   // V2Size can fit in the MinDiffBytes gap.
1966   return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1967          MinDiffBytes.uge(V2Size + BaseOffset.abs());
1968 }
1969 
1970 //===----------------------------------------------------------------------===//
1971 // BasicAliasAnalysis Pass
1972 //===----------------------------------------------------------------------===//
1973 
1974 AnalysisKey BasicAA::Key;
1975 
run(Function & F,FunctionAnalysisManager & AM)1976 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1977   return BasicAAResult(F.getParent()->getDataLayout(),
1978                        F,
1979                        AM.getResult<TargetLibraryAnalysis>(F),
1980                        AM.getResult<AssumptionAnalysis>(F),
1981                        &AM.getResult<DominatorTreeAnalysis>(F),
1982                        AM.getCachedResult<LoopAnalysis>(F),
1983                        AM.getCachedResult<PhiValuesAnalysis>(F));
1984 }
1985 
BasicAAWrapperPass()1986 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1987     initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1988 }
1989 
1990 char BasicAAWrapperPass::ID = 0;
1991 
anchor()1992 void BasicAAWrapperPass::anchor() {}
1993 
1994 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1995                       "Basic Alias Analysis (stateless AA impl)", false, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1996 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1997 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1998 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1999 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
2000                     "Basic Alias Analysis (stateless AA impl)", false, true)
2001 
2002 FunctionPass *llvm::createBasicAAWrapperPass() {
2003   return new BasicAAWrapperPass();
2004 }
2005 
runOnFunction(Function & F)2006 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2007   auto &ACT = getAnalysis<AssumptionCacheTracker>();
2008   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2009   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2010   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2011   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
2012 
2013   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
2014                                  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
2015                                  LIWP ? &LIWP->getLoopInfo() : nullptr,
2016                                  PVWP ? &PVWP->getResult() : nullptr));
2017 
2018   return false;
2019 }
2020 
getAnalysisUsage(AnalysisUsage & AU) const2021 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2022   AU.setPreservesAll();
2023   AU.addRequired<AssumptionCacheTracker>();
2024   AU.addRequired<DominatorTreeWrapperPass>();
2025   AU.addRequired<TargetLibraryInfoWrapperPass>();
2026   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
2027 }
2028 
createLegacyPMBasicAAResult(Pass & P,Function & F)2029 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
2030   return BasicAAResult(
2031       F.getParent()->getDataLayout(),
2032       F,
2033       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
2034       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
2035 }
2036