1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62
63 #define DEBUG_TYPE "basicaa"
64
65 using namespace llvm;
66
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69 cl::init(false));
70
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76 cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78 cl::Hidden, cl::init(false));
79
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84 "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // GetUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97
invalidate(Function & Fn,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99 FunctionAnalysisManager::Invalidator &Inv) {
100 // We don't care if this analysis itself is preserved, it has no state. But
101 // we need to check that the analyses it depends on have been. Note that we
102 // may be created without handles to some analyses and in that case don't
103 // depend on them.
104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
107 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
108 return true;
109
110 // Otherwise this analysis result remains valid.
111 return false;
112 }
113
114 //===----------------------------------------------------------------------===//
115 // Useful predicates
116 //===----------------------------------------------------------------------===//
117
118 /// Returns true if the pointer is to a function-local object that never
119 /// escapes from the function.
isNonEscapingLocalObject(const Value * V,SmallDenseMap<const Value *,bool,8> * IsCapturedCache=nullptr)120 static bool isNonEscapingLocalObject(
121 const Value *V,
122 SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) {
123 SmallDenseMap<const Value *, bool, 8>::iterator CacheIt;
124 if (IsCapturedCache) {
125 bool Inserted;
126 std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false});
127 if (!Inserted)
128 // Found cached result, return it!
129 return CacheIt->second;
130 }
131
132 // If this is a local allocation, check to see if it escapes.
133 if (isa<AllocaInst>(V) || isNoAliasCall(V)) {
134 // Set StoreCaptures to True so that we can assume in our callers that the
135 // pointer is not the result of a load instruction. Currently
136 // PointerMayBeCaptured doesn't have any special analysis for the
137 // StoreCaptures=false case; if it did, our callers could be refined to be
138 // more precise.
139 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
140 if (IsCapturedCache)
141 CacheIt->second = Ret;
142 return Ret;
143 }
144
145 // If this is an argument that corresponds to a byval or noalias argument,
146 // then it has not escaped before entering the function. Check if it escapes
147 // inside the function.
148 if (const Argument *A = dyn_cast<Argument>(V))
149 if (A->hasByValAttr() || A->hasNoAliasAttr()) {
150 // Note even if the argument is marked nocapture, we still need to check
151 // for copies made inside the function. The nocapture attribute only
152 // specifies that there are no copies made that outlive the function.
153 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
154 if (IsCapturedCache)
155 CacheIt->second = Ret;
156 return Ret;
157 }
158
159 return false;
160 }
161
162 /// Returns true if the pointer is one which would have been considered an
163 /// escape by isNonEscapingLocalObject.
isEscapeSource(const Value * V)164 static bool isEscapeSource(const Value *V) {
165 if (isa<CallBase>(V))
166 return true;
167
168 if (isa<Argument>(V))
169 return true;
170
171 // The load case works because isNonEscapingLocalObject considers all
172 // stores to be escapes (it passes true for the StoreCaptures argument
173 // to PointerMayBeCaptured).
174 if (isa<LoadInst>(V))
175 return true;
176
177 return false;
178 }
179
180 /// Returns the size of the object specified by V or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc,bool RoundToAlign=false)181 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
182 const TargetLibraryInfo &TLI,
183 bool NullIsValidLoc,
184 bool RoundToAlign = false) {
185 uint64_t Size;
186 ObjectSizeOpts Opts;
187 Opts.RoundToAlign = RoundToAlign;
188 Opts.NullIsUnknownSize = NullIsValidLoc;
189 if (getObjectSize(V, Size, DL, &TLI, Opts))
190 return Size;
191 return MemoryLocation::UnknownSize;
192 }
193
194 /// Returns true if we can prove that the object specified by V is smaller than
195 /// Size.
isObjectSmallerThan(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)196 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
197 const DataLayout &DL,
198 const TargetLibraryInfo &TLI,
199 bool NullIsValidLoc) {
200 // Note that the meanings of the "object" are slightly different in the
201 // following contexts:
202 // c1: llvm::getObjectSize()
203 // c2: llvm.objectsize() intrinsic
204 // c3: isObjectSmallerThan()
205 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
206 // refers to the "entire object".
207 //
208 // Consider this example:
209 // char *p = (char*)malloc(100)
210 // char *q = p+80;
211 //
212 // In the context of c1 and c2, the "object" pointed by q refers to the
213 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
214 //
215 // However, in the context of c3, the "object" refers to the chunk of memory
216 // being allocated. So, the "object" has 100 bytes, and q points to the middle
217 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
218 // parameter, before the llvm::getObjectSize() is called to get the size of
219 // entire object, we should:
220 // - either rewind the pointer q to the base-address of the object in
221 // question (in this case rewind to p), or
222 // - just give up. It is up to caller to make sure the pointer is pointing
223 // to the base address the object.
224 //
225 // We go for 2nd option for simplicity.
226 if (!isIdentifiedObject(V))
227 return false;
228
229 // This function needs to use the aligned object size because we allow
230 // reads a bit past the end given sufficient alignment.
231 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
232 /*RoundToAlign*/ true);
233
234 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
235 }
236
237 /// Return the minimal extent from \p V to the end of the underlying object,
238 /// assuming the result is used in an aliasing query. E.g., we do use the query
239 /// location size and the fact that null pointers cannot alias here.
getMinimalExtentFrom(const Value & V,const LocationSize & LocSize,const DataLayout & DL,bool NullIsValidLoc)240 static uint64_t getMinimalExtentFrom(const Value &V,
241 const LocationSize &LocSize,
242 const DataLayout &DL,
243 bool NullIsValidLoc) {
244 // If we have dereferenceability information we know a lower bound for the
245 // extent as accesses for a lower offset would be valid. We need to exclude
246 // the "or null" part if null is a valid pointer.
247 bool CanBeNull;
248 uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull);
249 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
250 // If queried with a precise location size, we assume that location size to be
251 // accessed, thus valid.
252 if (LocSize.isPrecise())
253 DerefBytes = std::max(DerefBytes, LocSize.getValue());
254 return DerefBytes;
255 }
256
257 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI,bool NullIsValidLoc)258 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
259 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
260 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
261 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
262 }
263
264 //===----------------------------------------------------------------------===//
265 // GetElementPtr Instruction Decomposition and Analysis
266 //===----------------------------------------------------------------------===//
267
268 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
269 /// B are constant integers.
270 ///
271 /// Returns the scale and offset values as APInts and return V as a Value*, and
272 /// return whether we looked through any sign or zero extends. The incoming
273 /// Value is known to have IntegerType, and it may already be sign or zero
274 /// extended.
275 ///
276 /// Note that this looks through extends, so the high bits may not be
277 /// represented in the result.
GetLinearExpression(const Value * V,APInt & Scale,APInt & Offset,unsigned & ZExtBits,unsigned & SExtBits,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT,bool & NSW,bool & NUW)278 /*static*/ const Value *BasicAAResult::GetLinearExpression(
279 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
280 unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
281 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
282 assert(V->getType()->isIntegerTy() && "Not an integer value");
283
284 // Limit our recursion depth.
285 if (Depth == 6) {
286 Scale = 1;
287 Offset = 0;
288 return V;
289 }
290
291 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
292 // If it's a constant, just convert it to an offset and remove the variable.
293 // If we've been called recursively, the Offset bit width will be greater
294 // than the constant's (the Offset's always as wide as the outermost call),
295 // so we'll zext here and process any extension in the isa<SExtInst> &
296 // isa<ZExtInst> cases below.
297 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
298 assert(Scale == 0 && "Constant values don't have a scale");
299 return V;
300 }
301
302 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
303 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
304 // If we've been called recursively, then Offset and Scale will be wider
305 // than the BOp operands. We'll always zext it here as we'll process sign
306 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
307 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
308
309 switch (BOp->getOpcode()) {
310 default:
311 // We don't understand this instruction, so we can't decompose it any
312 // further.
313 Scale = 1;
314 Offset = 0;
315 return V;
316 case Instruction::Or:
317 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
318 // analyze it.
319 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
320 BOp, DT)) {
321 Scale = 1;
322 Offset = 0;
323 return V;
324 }
325 LLVM_FALLTHROUGH;
326 case Instruction::Add:
327 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
328 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
329 Offset += RHS;
330 break;
331 case Instruction::Sub:
332 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
333 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
334 Offset -= RHS;
335 break;
336 case Instruction::Mul:
337 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
338 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
339 Offset *= RHS;
340 Scale *= RHS;
341 break;
342 case Instruction::Shl:
343 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
344 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
345
346 // We're trying to linearize an expression of the kind:
347 // shl i8 -128, 36
348 // where the shift count exceeds the bitwidth of the type.
349 // We can't decompose this further (the expression would return
350 // a poison value).
351 if (Offset.getBitWidth() < RHS.getLimitedValue() ||
352 Scale.getBitWidth() < RHS.getLimitedValue()) {
353 Scale = 1;
354 Offset = 0;
355 return V;
356 }
357
358 Offset <<= RHS.getLimitedValue();
359 Scale <<= RHS.getLimitedValue();
360 // the semantics of nsw and nuw for left shifts don't match those of
361 // multiplications, so we won't propagate them.
362 NSW = NUW = false;
363 return V;
364 }
365
366 if (isa<OverflowingBinaryOperator>(BOp)) {
367 NUW &= BOp->hasNoUnsignedWrap();
368 NSW &= BOp->hasNoSignedWrap();
369 }
370 return V;
371 }
372 }
373
374 // Since GEP indices are sign extended anyway, we don't care about the high
375 // bits of a sign or zero extended value - just scales and offsets. The
376 // extensions have to be consistent though.
377 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
378 Value *CastOp = cast<CastInst>(V)->getOperand(0);
379 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
380 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
381 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
382 const Value *Result =
383 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
384 Depth + 1, AC, DT, NSW, NUW);
385
386 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
387 // by just incrementing the number of bits we've extended by.
388 unsigned ExtendedBy = NewWidth - SmallWidth;
389
390 if (isa<SExtInst>(V) && ZExtBits == 0) {
391 // sext(sext(%x, a), b) == sext(%x, a + b)
392
393 if (NSW) {
394 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
395 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
396 unsigned OldWidth = Offset.getBitWidth();
397 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
398 } else {
399 // We may have signed-wrapped, so don't decompose sext(%x + c) into
400 // sext(%x) + sext(c)
401 Scale = 1;
402 Offset = 0;
403 Result = CastOp;
404 ZExtBits = OldZExtBits;
405 SExtBits = OldSExtBits;
406 }
407 SExtBits += ExtendedBy;
408 } else {
409 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
410
411 if (!NUW) {
412 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
413 // zext(%x) + zext(c)
414 Scale = 1;
415 Offset = 0;
416 Result = CastOp;
417 ZExtBits = OldZExtBits;
418 SExtBits = OldSExtBits;
419 }
420 ZExtBits += ExtendedBy;
421 }
422
423 return Result;
424 }
425
426 Scale = 1;
427 Offset = 0;
428 return V;
429 }
430
431 /// To ensure a pointer offset fits in an integer of size PointerSize
432 /// (in bits) when that size is smaller than the maximum pointer size. This is
433 /// an issue, for example, in particular for 32b pointers with negative indices
434 /// that rely on two's complement wrap-arounds for precise alias information
435 /// where the maximum pointer size is 64b.
adjustToPointerSize(const APInt & Offset,unsigned PointerSize)436 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
437 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
438 unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
439 return (Offset << ShiftBits).ashr(ShiftBits);
440 }
441
getMaxPointerSize(const DataLayout & DL)442 static unsigned getMaxPointerSize(const DataLayout &DL) {
443 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
444 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
445 if (DoubleCalcBits) MaxPointerSize *= 2;
446
447 return MaxPointerSize;
448 }
449
450 /// If V is a symbolic pointer expression, decompose it into a base pointer
451 /// with a constant offset and a number of scaled symbolic offsets.
452 ///
453 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
454 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
455 /// specified amount, but which may have other unrepresented high bits. As
456 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
457 ///
458 /// When DataLayout is around, this function is capable of analyzing everything
459 /// that GetUnderlyingObject can look through. To be able to do that
460 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
461 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
462 /// through pointer casts.
DecomposeGEPExpression(const Value * V,DecomposedGEP & Decomposed,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)463 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
464 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
465 DominatorTree *DT) {
466 // Limit recursion depth to limit compile time in crazy cases.
467 unsigned MaxLookup = MaxLookupSearchDepth;
468 SearchTimes++;
469
470 unsigned MaxPointerSize = getMaxPointerSize(DL);
471 Decomposed.VarIndices.clear();
472 do {
473 // See if this is a bitcast or GEP.
474 const Operator *Op = dyn_cast<Operator>(V);
475 if (!Op) {
476 // The only non-operator case we can handle are GlobalAliases.
477 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
478 if (!GA->isInterposable()) {
479 V = GA->getAliasee();
480 continue;
481 }
482 }
483 Decomposed.Base = V;
484 return false;
485 }
486
487 if (Op->getOpcode() == Instruction::BitCast ||
488 Op->getOpcode() == Instruction::AddrSpaceCast) {
489 V = Op->getOperand(0);
490 continue;
491 }
492
493 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
494 if (!GEPOp) {
495 if (const auto *PHI = dyn_cast<PHINode>(V)) {
496 // Look through single-arg phi nodes created by LCSSA.
497 if (PHI->getNumIncomingValues() == 1) {
498 V = PHI->getIncomingValue(0);
499 continue;
500 }
501 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
502 // CaptureTracking can know about special capturing properties of some
503 // intrinsics like launder.invariant.group, that can't be expressed with
504 // the attributes, but have properties like returning aliasing pointer.
505 // Because some analysis may assume that nocaptured pointer is not
506 // returned from some special intrinsic (because function would have to
507 // be marked with returns attribute), it is crucial to use this function
508 // because it should be in sync with CaptureTracking. Not using it may
509 // cause weird miscompilations where 2 aliasing pointers are assumed to
510 // noalias.
511 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
512 V = RP;
513 continue;
514 }
515 }
516
517 Decomposed.Base = V;
518 return false;
519 }
520
521 // Don't attempt to analyze GEPs over unsized objects.
522 if (!GEPOp->getSourceElementType()->isSized()) {
523 Decomposed.Base = V;
524 return false;
525 }
526
527 // Don't attempt to analyze GEPs if index scale is not a compile-time
528 // constant.
529 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
530 Decomposed.Base = V;
531 Decomposed.HasCompileTimeConstantScale = false;
532 return false;
533 }
534
535 unsigned AS = GEPOp->getPointerAddressSpace();
536 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
537 gep_type_iterator GTI = gep_type_begin(GEPOp);
538 unsigned PointerSize = DL.getPointerAddrSizeInBits(AS);
539 // Assume all GEP operands are constants until proven otherwise.
540 bool GepHasConstantOffset = true;
541 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
542 I != E; ++I, ++GTI) {
543 const Value *Index = *I;
544 // Compute the (potentially symbolic) offset in bytes for this index.
545 if (StructType *STy = GTI.getStructTypeOrNull()) {
546 // For a struct, add the member offset.
547 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
548 if (FieldNo == 0)
549 continue;
550
551 Decomposed.StructOffset +=
552 DL.getStructLayout(STy)->getElementOffset(FieldNo);
553 continue;
554 }
555
556 // For an array/pointer, add the element offset, explicitly scaled.
557 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
558 if (CIdx->isZero())
559 continue;
560 Decomposed.OtherOffset +=
561 (DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
562 CIdx->getValue().sextOrSelf(MaxPointerSize))
563 .sextOrTrunc(MaxPointerSize);
564 continue;
565 }
566
567 GepHasConstantOffset = false;
568
569 APInt Scale(MaxPointerSize,
570 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
571 unsigned ZExtBits = 0, SExtBits = 0;
572
573 // If the integer type is smaller than the pointer size, it is implicitly
574 // sign extended to pointer size.
575 unsigned Width = Index->getType()->getIntegerBitWidth();
576 if (PointerSize > Width)
577 SExtBits += PointerSize - Width;
578
579 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
580 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
581 bool NSW = true, NUW = true;
582 const Value *OrigIndex = Index;
583 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
584 SExtBits, DL, 0, AC, DT, NSW, NUW);
585
586 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
587 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
588
589 // It can be the case that, even through C1*V+C2 does not overflow for
590 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
591 // decompose the expression in this way.
592 //
593 // FIXME: C1*Scale and the other operations in the decomposed
594 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
595 // possibility.
596 APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
597 Scale.sext(MaxPointerSize*2);
598 if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
599 Index = OrigIndex;
600 IndexScale = 1;
601 IndexOffset = 0;
602
603 ZExtBits = SExtBits = 0;
604 if (PointerSize > Width)
605 SExtBits += PointerSize - Width;
606 } else {
607 Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
608 Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
609 }
610
611 // If we already had an occurrence of this index variable, merge this
612 // scale into it. For example, we want to handle:
613 // A[x][x] -> x*16 + x*4 -> x*20
614 // This also ensures that 'x' only appears in the index list once.
615 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
616 if (Decomposed.VarIndices[i].V == Index &&
617 Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
618 Decomposed.VarIndices[i].SExtBits == SExtBits) {
619 Scale += Decomposed.VarIndices[i].Scale;
620 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
621 break;
622 }
623 }
624
625 // Make sure that we have a scale that makes sense for this target's
626 // pointer size.
627 Scale = adjustToPointerSize(Scale, PointerSize);
628
629 if (!!Scale) {
630 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
631 Decomposed.VarIndices.push_back(Entry);
632 }
633 }
634
635 // Take care of wrap-arounds
636 if (GepHasConstantOffset) {
637 Decomposed.StructOffset =
638 adjustToPointerSize(Decomposed.StructOffset, PointerSize);
639 Decomposed.OtherOffset =
640 adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
641 }
642
643 // Analyze the base pointer next.
644 V = GEPOp->getOperand(0);
645 } while (--MaxLookup);
646
647 // If the chain of expressions is too deep, just return early.
648 Decomposed.Base = V;
649 SearchLimitReached++;
650 return true;
651 }
652
653 /// Returns whether the given pointer value points to memory that is local to
654 /// the function, with global constants being considered local to all
655 /// functions.
pointsToConstantMemory(const MemoryLocation & Loc,AAQueryInfo & AAQI,bool OrLocal)656 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
657 AAQueryInfo &AAQI, bool OrLocal) {
658 assert(Visited.empty() && "Visited must be cleared after use!");
659
660 unsigned MaxLookup = 8;
661 SmallVector<const Value *, 16> Worklist;
662 Worklist.push_back(Loc.Ptr);
663 do {
664 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
665 if (!Visited.insert(V).second) {
666 Visited.clear();
667 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
668 }
669
670 // An alloca instruction defines local memory.
671 if (OrLocal && isa<AllocaInst>(V))
672 continue;
673
674 // A global constant counts as local memory for our purposes.
675 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
676 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
677 // global to be marked constant in some modules and non-constant in
678 // others. GV may even be a declaration, not a definition.
679 if (!GV->isConstant()) {
680 Visited.clear();
681 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
682 }
683 continue;
684 }
685
686 // If both select values point to local memory, then so does the select.
687 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
688 Worklist.push_back(SI->getTrueValue());
689 Worklist.push_back(SI->getFalseValue());
690 continue;
691 }
692
693 // If all values incoming to a phi node point to local memory, then so does
694 // the phi.
695 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
696 // Don't bother inspecting phi nodes with many operands.
697 if (PN->getNumIncomingValues() > MaxLookup) {
698 Visited.clear();
699 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
700 }
701 for (Value *IncValue : PN->incoming_values())
702 Worklist.push_back(IncValue);
703 continue;
704 }
705
706 // Otherwise be conservative.
707 Visited.clear();
708 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
709 } while (!Worklist.empty() && --MaxLookup);
710
711 Visited.clear();
712 return Worklist.empty();
713 }
714
715 /// Returns the behavior when calling the given call site.
getModRefBehavior(const CallBase * Call)716 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
717 if (Call->doesNotAccessMemory())
718 // Can't do better than this.
719 return FMRB_DoesNotAccessMemory;
720
721 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
722
723 // If the callsite knows it only reads memory, don't return worse
724 // than that.
725 if (Call->onlyReadsMemory())
726 Min = FMRB_OnlyReadsMemory;
727 else if (Call->doesNotReadMemory())
728 Min = FMRB_OnlyWritesMemory;
729
730 if (Call->onlyAccessesArgMemory())
731 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
732 else if (Call->onlyAccessesInaccessibleMemory())
733 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
734 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
735 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
736
737 // If the call has operand bundles then aliasing attributes from the function
738 // it calls do not directly apply to the call. This can be made more precise
739 // in the future.
740 if (!Call->hasOperandBundles())
741 if (const Function *F = Call->getCalledFunction())
742 Min =
743 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
744
745 return Min;
746 }
747
748 /// Returns the behavior when calling the given function. For use when the call
749 /// site is not known.
getModRefBehavior(const Function * F)750 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
751 // If the function declares it doesn't access memory, we can't do better.
752 if (F->doesNotAccessMemory())
753 return FMRB_DoesNotAccessMemory;
754
755 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
756
757 // If the function declares it only reads memory, go with that.
758 if (F->onlyReadsMemory())
759 Min = FMRB_OnlyReadsMemory;
760 else if (F->doesNotReadMemory())
761 Min = FMRB_OnlyWritesMemory;
762
763 if (F->onlyAccessesArgMemory())
764 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
765 else if (F->onlyAccessesInaccessibleMemory())
766 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
767 else if (F->onlyAccessesInaccessibleMemOrArgMem())
768 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
769
770 return Min;
771 }
772
773 /// Returns true if this is a writeonly (i.e Mod only) parameter.
isWriteOnlyParam(const CallBase * Call,unsigned ArgIdx,const TargetLibraryInfo & TLI)774 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
775 const TargetLibraryInfo &TLI) {
776 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
777 return true;
778
779 // We can bound the aliasing properties of memset_pattern16 just as we can
780 // for memcpy/memset. This is particularly important because the
781 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
782 // whenever possible.
783 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
784 // attributes.
785 LibFunc F;
786 if (Call->getCalledFunction() &&
787 TLI.getLibFunc(*Call->getCalledFunction(), F) &&
788 F == LibFunc_memset_pattern16 && TLI.has(F))
789 if (ArgIdx == 0)
790 return true;
791
792 // TODO: memset_pattern4, memset_pattern8
793 // TODO: _chk variants
794 // TODO: strcmp, strcpy
795
796 return false;
797 }
798
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)799 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
800 unsigned ArgIdx) {
801 // Checking for known builtin intrinsics and target library functions.
802 if (isWriteOnlyParam(Call, ArgIdx, TLI))
803 return ModRefInfo::Mod;
804
805 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
806 return ModRefInfo::Ref;
807
808 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
809 return ModRefInfo::NoModRef;
810
811 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
812 }
813
isIntrinsicCall(const CallBase * Call,Intrinsic::ID IID)814 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
815 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
816 return II && II->getIntrinsicID() == IID;
817 }
818
819 #ifndef NDEBUG
getParent(const Value * V)820 static const Function *getParent(const Value *V) {
821 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
822 if (!inst->getParent())
823 return nullptr;
824 return inst->getParent()->getParent();
825 }
826
827 if (const Argument *arg = dyn_cast<Argument>(V))
828 return arg->getParent();
829
830 return nullptr;
831 }
832
notDifferentParent(const Value * O1,const Value * O2)833 static bool notDifferentParent(const Value *O1, const Value *O2) {
834
835 const Function *F1 = getParent(O1);
836 const Function *F2 = getParent(O2);
837
838 return !F1 || !F2 || F1 == F2;
839 }
840 #endif
841
alias(const MemoryLocation & LocA,const MemoryLocation & LocB,AAQueryInfo & AAQI)842 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
843 const MemoryLocation &LocB,
844 AAQueryInfo &AAQI) {
845 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
846 "BasicAliasAnalysis doesn't support interprocedural queries.");
847
848 // If we have a directly cached entry for these locations, we have recursed
849 // through this once, so just return the cached results. Notably, when this
850 // happens, we don't clear the cache.
851 auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB));
852 if (CacheIt != AAQI.AliasCache.end())
853 return CacheIt->second;
854
855 CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA));
856 if (CacheIt != AAQI.AliasCache.end())
857 return CacheIt->second;
858
859 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
860 LocB.Size, LocB.AATags, AAQI);
861
862 VisitedPhiBBs.clear();
863 return Alias;
864 }
865
866 /// Checks to see if the specified callsite can clobber the specified memory
867 /// object.
868 ///
869 /// Since we only look at local properties of this function, we really can't
870 /// say much about this query. We do, however, use simple "address taken"
871 /// analysis on local objects.
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc,AAQueryInfo & AAQI)872 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
873 const MemoryLocation &Loc,
874 AAQueryInfo &AAQI) {
875 assert(notDifferentParent(Call, Loc.Ptr) &&
876 "AliasAnalysis query involving multiple functions!");
877
878 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
879
880 // Calls marked 'tail' cannot read or write allocas from the current frame
881 // because the current frame might be destroyed by the time they run. However,
882 // a tail call may use an alloca with byval. Calling with byval copies the
883 // contents of the alloca into argument registers or stack slots, so there is
884 // no lifetime issue.
885 if (isa<AllocaInst>(Object))
886 if (const CallInst *CI = dyn_cast<CallInst>(Call))
887 if (CI->isTailCall() &&
888 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
889 return ModRefInfo::NoModRef;
890
891 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
892 // modify them even though the alloca is not escaped.
893 if (auto *AI = dyn_cast<AllocaInst>(Object))
894 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
895 return ModRefInfo::Mod;
896
897 // If the pointer is to a locally allocated object that does not escape,
898 // then the call can not mod/ref the pointer unless the call takes the pointer
899 // as an argument, and itself doesn't capture it.
900 if (!isa<Constant>(Object) && Call != Object &&
901 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
902
903 // Optimistically assume that call doesn't touch Object and check this
904 // assumption in the following loop.
905 ModRefInfo Result = ModRefInfo::NoModRef;
906 bool IsMustAlias = true;
907
908 unsigned OperandNo = 0;
909 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
910 CI != CE; ++CI, ++OperandNo) {
911 // Only look at the no-capture or byval pointer arguments. If this
912 // pointer were passed to arguments that were neither of these, then it
913 // couldn't be no-capture.
914 if (!(*CI)->getType()->isPointerTy() ||
915 (!Call->doesNotCapture(OperandNo) &&
916 OperandNo < Call->getNumArgOperands() &&
917 !Call->isByValArgument(OperandNo)))
918 continue;
919
920 // Call doesn't access memory through this operand, so we don't care
921 // if it aliases with Object.
922 if (Call->doesNotAccessMemory(OperandNo))
923 continue;
924
925 // If this is a no-capture pointer argument, see if we can tell that it
926 // is impossible to alias the pointer we're checking.
927 AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
928 MemoryLocation(Object), AAQI);
929 if (AR != MustAlias)
930 IsMustAlias = false;
931 // Operand doesn't alias 'Object', continue looking for other aliases
932 if (AR == NoAlias)
933 continue;
934 // Operand aliases 'Object', but call doesn't modify it. Strengthen
935 // initial assumption and keep looking in case if there are more aliases.
936 if (Call->onlyReadsMemory(OperandNo)) {
937 Result = setRef(Result);
938 continue;
939 }
940 // Operand aliases 'Object' but call only writes into it.
941 if (Call->doesNotReadMemory(OperandNo)) {
942 Result = setMod(Result);
943 continue;
944 }
945 // This operand aliases 'Object' and call reads and writes into it.
946 // Setting ModRef will not yield an early return below, MustAlias is not
947 // used further.
948 Result = ModRefInfo::ModRef;
949 break;
950 }
951
952 // No operand aliases, reset Must bit. Add below if at least one aliases
953 // and all aliases found are MustAlias.
954 if (isNoModRef(Result))
955 IsMustAlias = false;
956
957 // Early return if we improved mod ref information
958 if (!isModAndRefSet(Result)) {
959 if (isNoModRef(Result))
960 return ModRefInfo::NoModRef;
961 return IsMustAlias ? setMust(Result) : clearMust(Result);
962 }
963 }
964
965 // If the call is malloc/calloc like, we can assume that it doesn't
966 // modify any IR visible value. This is only valid because we assume these
967 // routines do not read values visible in the IR. TODO: Consider special
968 // casing realloc and strdup routines which access only their arguments as
969 // well. Or alternatively, replace all of this with inaccessiblememonly once
970 // that's implemented fully.
971 if (isMallocOrCallocLikeFn(Call, &TLI)) {
972 // Be conservative if the accessed pointer may alias the allocation -
973 // fallback to the generic handling below.
974 if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
975 return ModRefInfo::NoModRef;
976 }
977
978 // The semantics of memcpy intrinsics forbid overlap between their respective
979 // operands, i.e., source and destination of any given memcpy must no-alias.
980 // If Loc must-aliases either one of these two locations, then it necessarily
981 // no-aliases the other.
982 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
983 AliasResult SrcAA, DestAA;
984
985 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
986 Loc, AAQI)) == MustAlias)
987 // Loc is exactly the memcpy source thus disjoint from memcpy dest.
988 return ModRefInfo::Ref;
989 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
990 Loc, AAQI)) == MustAlias)
991 // The converse case.
992 return ModRefInfo::Mod;
993
994 // It's also possible for Loc to alias both src and dest, or neither.
995 ModRefInfo rv = ModRefInfo::NoModRef;
996 if (SrcAA != NoAlias)
997 rv = setRef(rv);
998 if (DestAA != NoAlias)
999 rv = setMod(rv);
1000 return rv;
1001 }
1002
1003 // While the assume intrinsic is marked as arbitrarily writing so that
1004 // proper control dependencies will be maintained, it never aliases any
1005 // particular memory location.
1006 if (isIntrinsicCall(Call, Intrinsic::assume))
1007 return ModRefInfo::NoModRef;
1008
1009 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1010 // that proper control dependencies are maintained but they never mods any
1011 // particular memory location.
1012 //
1013 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1014 // heap state at the point the guard is issued needs to be consistent in case
1015 // the guard invokes the "deopt" continuation.
1016 if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1017 return ModRefInfo::Ref;
1018
1019 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1020 // writing so that proper control dependencies are maintained but they never
1021 // mod any particular memory location visible to the IR.
1022 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1023 // intrinsic is now modeled as reading memory. This prevents hoisting the
1024 // invariant.start intrinsic over stores. Consider:
1025 // *ptr = 40;
1026 // *ptr = 50;
1027 // invariant_start(ptr)
1028 // int val = *ptr;
1029 // print(val);
1030 //
1031 // This cannot be transformed to:
1032 //
1033 // *ptr = 40;
1034 // invariant_start(ptr)
1035 // *ptr = 50;
1036 // int val = *ptr;
1037 // print(val);
1038 //
1039 // The transformation will cause the second store to be ignored (based on
1040 // rules of invariant.start) and print 40, while the first program always
1041 // prints 50.
1042 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1043 return ModRefInfo::Ref;
1044
1045 // The AAResultBase base class has some smarts, lets use them.
1046 return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1047 }
1048
getModRefInfo(const CallBase * Call1,const CallBase * Call2,AAQueryInfo & AAQI)1049 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1050 const CallBase *Call2,
1051 AAQueryInfo &AAQI) {
1052 // While the assume intrinsic is marked as arbitrarily writing so that
1053 // proper control dependencies will be maintained, it never aliases any
1054 // particular memory location.
1055 if (isIntrinsicCall(Call1, Intrinsic::assume) ||
1056 isIntrinsicCall(Call2, Intrinsic::assume))
1057 return ModRefInfo::NoModRef;
1058
1059 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1060 // that proper control dependencies are maintained but they never mod any
1061 // particular memory location.
1062 //
1063 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1064 // heap state at the point the guard is issued needs to be consistent in case
1065 // the guard invokes the "deopt" continuation.
1066
1067 // NB! This function is *not* commutative, so we special case two
1068 // possibilities for guard intrinsics.
1069
1070 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1071 return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1072 ? ModRefInfo::Ref
1073 : ModRefInfo::NoModRef;
1074
1075 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1076 return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1077 ? ModRefInfo::Mod
1078 : ModRefInfo::NoModRef;
1079
1080 // The AAResultBase base class has some smarts, lets use them.
1081 return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1082 }
1083
1084 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1085 /// both having the exact same pointer operand.
aliasSameBasePointerGEPs(const GEPOperator * GEP1,LocationSize MaybeV1Size,const GEPOperator * GEP2,LocationSize MaybeV2Size,const DataLayout & DL)1086 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1087 LocationSize MaybeV1Size,
1088 const GEPOperator *GEP2,
1089 LocationSize MaybeV2Size,
1090 const DataLayout &DL) {
1091 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1092 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1093 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1094 "Expected GEPs with the same pointer operand");
1095
1096 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1097 // such that the struct field accesses provably cannot alias.
1098 // We also need at least two indices (the pointer, and the struct field).
1099 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1100 GEP1->getNumIndices() < 2)
1101 return MayAlias;
1102
1103 // If we don't know the size of the accesses through both GEPs, we can't
1104 // determine whether the struct fields accessed can't alias.
1105 if (MaybeV1Size == LocationSize::unknown() ||
1106 MaybeV2Size == LocationSize::unknown())
1107 return MayAlias;
1108
1109 const uint64_t V1Size = MaybeV1Size.getValue();
1110 const uint64_t V2Size = MaybeV2Size.getValue();
1111
1112 ConstantInt *C1 =
1113 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1114 ConstantInt *C2 =
1115 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1116
1117 // If the last (struct) indices are constants and are equal, the other indices
1118 // might be also be dynamically equal, so the GEPs can alias.
1119 if (C1 && C2) {
1120 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1121 if (C1->getValue().sextOrSelf(BitWidth) ==
1122 C2->getValue().sextOrSelf(BitWidth))
1123 return MayAlias;
1124 }
1125
1126 // Find the last-indexed type of the GEP, i.e., the type you'd get if
1127 // you stripped the last index.
1128 // On the way, look at each indexed type. If there's something other
1129 // than an array, different indices can lead to different final types.
1130 SmallVector<Value *, 8> IntermediateIndices;
1131
1132 // Insert the first index; we don't need to check the type indexed
1133 // through it as it only drops the pointer indirection.
1134 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1135 IntermediateIndices.push_back(GEP1->getOperand(1));
1136
1137 // Insert all the remaining indices but the last one.
1138 // Also, check that they all index through arrays.
1139 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1140 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1141 GEP1->getSourceElementType(), IntermediateIndices)))
1142 return MayAlias;
1143 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1144 }
1145
1146 auto *Ty = GetElementPtrInst::getIndexedType(
1147 GEP1->getSourceElementType(), IntermediateIndices);
1148 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1149
1150 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
1151 // We know that:
1152 // - both GEPs begin indexing from the exact same pointer;
1153 // - the last indices in both GEPs are constants, indexing into a sequential
1154 // type (array or vector);
1155 // - both GEPs only index through arrays prior to that.
1156 //
1157 // Because array indices greater than the number of elements are valid in
1158 // GEPs, unless we know the intermediate indices are identical between
1159 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1160 // partially overlap. We also need to check that the loaded size matches
1161 // the element size, otherwise we could still have overlap.
1162 Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
1163 const uint64_t ElementSize =
1164 DL.getTypeStoreSize(LastElementTy).getFixedSize();
1165 if (V1Size != ElementSize || V2Size != ElementSize)
1166 return MayAlias;
1167
1168 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1169 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1170 return MayAlias;
1171
1172 // Now we know that the array/pointer that GEP1 indexes into and that
1173 // that GEP2 indexes into must either precisely overlap or be disjoint.
1174 // Because they cannot partially overlap and because fields in an array
1175 // cannot overlap, if we can prove the final indices are different between
1176 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1177
1178 // If the last indices are constants, we've already checked they don't
1179 // equal each other so we can exit early.
1180 if (C1 && C2)
1181 return NoAlias;
1182 {
1183 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1184 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1185 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1186 // If one of the indices is a PHI node, be safe and only use
1187 // computeKnownBits so we don't make any assumptions about the
1188 // relationships between the two indices. This is important if we're
1189 // asking about values from different loop iterations. See PR32314.
1190 // TODO: We may be able to change the check so we only do this when
1191 // we definitely looked through a PHINode.
1192 if (GEP1LastIdx != GEP2LastIdx &&
1193 GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1194 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1195 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1196 if (Known1.Zero.intersects(Known2.One) ||
1197 Known1.One.intersects(Known2.Zero))
1198 return NoAlias;
1199 }
1200 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1201 return NoAlias;
1202 }
1203 return MayAlias;
1204 } else if (!LastIndexedStruct || !C1 || !C2) {
1205 return MayAlias;
1206 }
1207
1208 if (C1->getValue().getActiveBits() > 64 ||
1209 C2->getValue().getActiveBits() > 64)
1210 return MayAlias;
1211
1212 // We know that:
1213 // - both GEPs begin indexing from the exact same pointer;
1214 // - the last indices in both GEPs are constants, indexing into a struct;
1215 // - said indices are different, hence, the pointed-to fields are different;
1216 // - both GEPs only index through arrays prior to that.
1217 //
1218 // This lets us determine that the struct that GEP1 indexes into and the
1219 // struct that GEP2 indexes into must either precisely overlap or be
1220 // completely disjoint. Because they cannot partially overlap, indexing into
1221 // different non-overlapping fields of the struct will never alias.
1222
1223 // Therefore, the only remaining thing needed to show that both GEPs can't
1224 // alias is that the fields are not overlapping.
1225 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1226 const uint64_t StructSize = SL->getSizeInBytes();
1227 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1228 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1229
1230 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1231 uint64_t V2Off, uint64_t V2Size) {
1232 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1233 ((V2Off + V2Size <= StructSize) ||
1234 (V2Off + V2Size - StructSize <= V1Off));
1235 };
1236
1237 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1238 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1239 return NoAlias;
1240
1241 return MayAlias;
1242 }
1243
1244 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1245 // beginning of the object the GEP points would have a negative offset with
1246 // repsect to the alloca, that means the GEP can not alias pointer (b).
1247 // Note that the pointer based on the alloca may not be a GEP. For
1248 // example, it may be the alloca itself.
1249 // The same applies if (b) is based on a GlobalVariable. Note that just being
1250 // based on isIdentifiedObject() is not enough - we need an identified object
1251 // that does not permit access to negative offsets. For example, a negative
1252 // offset from a noalias argument or call can be inbounds w.r.t the actual
1253 // underlying object.
1254 //
1255 // For example, consider:
1256 //
1257 // struct { int f0, int f1, ...} foo;
1258 // foo alloca;
1259 // foo* random = bar(alloca);
1260 // int *f0 = &alloca.f0
1261 // int *f1 = &random->f1;
1262 //
1263 // Which is lowered, approximately, to:
1264 //
1265 // %alloca = alloca %struct.foo
1266 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1267 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1268 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1269 //
1270 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1271 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1272 // point into the same object. But since %f0 points to the beginning of %alloca,
1273 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1274 // than (%alloca - 1), and so is not inbounds, a contradiction.
isGEPBaseAtNegativeOffset(const GEPOperator * GEPOp,const DecomposedGEP & DecompGEP,const DecomposedGEP & DecompObject,LocationSize MaybeObjectAccessSize)1275 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1276 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1277 LocationSize MaybeObjectAccessSize) {
1278 // If the object access size is unknown, or the GEP isn't inbounds, bail.
1279 if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1280 return false;
1281
1282 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1283
1284 // We need the object to be an alloca or a globalvariable, and want to know
1285 // the offset of the pointer from the object precisely, so no variable
1286 // indices are allowed.
1287 if (!(isa<AllocaInst>(DecompObject.Base) ||
1288 isa<GlobalVariable>(DecompObject.Base)) ||
1289 !DecompObject.VarIndices.empty())
1290 return false;
1291
1292 APInt ObjectBaseOffset = DecompObject.StructOffset +
1293 DecompObject.OtherOffset;
1294
1295 // If the GEP has no variable indices, we know the precise offset
1296 // from the base, then use it. If the GEP has variable indices,
1297 // we can't get exact GEP offset to identify pointer alias. So return
1298 // false in that case.
1299 if (!DecompGEP.VarIndices.empty())
1300 return false;
1301
1302 APInt GEPBaseOffset = DecompGEP.StructOffset;
1303 GEPBaseOffset += DecompGEP.OtherOffset;
1304
1305 return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
1306 }
1307
1308 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1309 /// another pointer.
1310 ///
1311 /// We know that V1 is a GEP, but we don't know anything about V2.
1312 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1313 /// V2.
aliasGEP(const GEPOperator * GEP1,LocationSize V1Size,const AAMDNodes & V1AAInfo,const Value * V2,LocationSize V2Size,const AAMDNodes & V2AAInfo,const Value * UnderlyingV1,const Value * UnderlyingV2,AAQueryInfo & AAQI)1314 AliasResult BasicAAResult::aliasGEP(
1315 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1316 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1317 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1318 DecomposedGEP DecompGEP1, DecompGEP2;
1319 unsigned MaxPointerSize = getMaxPointerSize(DL);
1320 DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
1321 DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
1322 DecompGEP1.HasCompileTimeConstantScale =
1323 DecompGEP2.HasCompileTimeConstantScale = true;
1324
1325 bool GEP1MaxLookupReached =
1326 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1327 bool GEP2MaxLookupReached =
1328 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1329
1330 // Don't attempt to analyze the decomposed GEP if index scale is not a
1331 // compile-time constant.
1332 if (!DecompGEP1.HasCompileTimeConstantScale ||
1333 !DecompGEP2.HasCompileTimeConstantScale)
1334 return MayAlias;
1335
1336 APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1337 APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1338
1339 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1340 "DecomposeGEPExpression returned a result different from "
1341 "GetUnderlyingObject");
1342
1343 // If the GEP's offset relative to its base is such that the base would
1344 // fall below the start of the object underlying V2, then the GEP and V2
1345 // cannot alias.
1346 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1347 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1348 return NoAlias;
1349 // If we have two gep instructions with must-alias or not-alias'ing base
1350 // pointers, figure out if the indexes to the GEP tell us anything about the
1351 // derived pointer.
1352 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1353 // Check for the GEP base being at a negative offset, this time in the other
1354 // direction.
1355 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1356 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1357 return NoAlias;
1358 // Do the base pointers alias?
1359 AliasResult BaseAlias =
1360 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1361 UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI);
1362
1363 // Check for geps of non-aliasing underlying pointers where the offsets are
1364 // identical.
1365 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1366 // Do the base pointers alias assuming type and size.
1367 AliasResult PreciseBaseAlias = aliasCheck(
1368 UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI);
1369 if (PreciseBaseAlias == NoAlias) {
1370 // See if the computed offset from the common pointer tells us about the
1371 // relation of the resulting pointer.
1372 // If the max search depth is reached the result is undefined
1373 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1374 return MayAlias;
1375
1376 // Same offsets.
1377 if (GEP1BaseOffset == GEP2BaseOffset &&
1378 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1379 return NoAlias;
1380 }
1381 }
1382
1383 // If we get a No or May, then return it immediately, no amount of analysis
1384 // will improve this situation.
1385 if (BaseAlias != MustAlias) {
1386 assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1387 return BaseAlias;
1388 }
1389
1390 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1391 // exactly, see if the computed offset from the common pointer tells us
1392 // about the relation of the resulting pointer.
1393 // If we know the two GEPs are based off of the exact same pointer (and not
1394 // just the same underlying object), see if that tells us anything about
1395 // the resulting pointers.
1396 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1397 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1398 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1399 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1400 // If we couldn't find anything interesting, don't abandon just yet.
1401 if (R != MayAlias)
1402 return R;
1403 }
1404
1405 // If the max search depth is reached, the result is undefined
1406 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1407 return MayAlias;
1408
1409 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1410 // symbolic difference.
1411 GEP1BaseOffset -= GEP2BaseOffset;
1412 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1413
1414 } else {
1415 // Check to see if these two pointers are related by the getelementptr
1416 // instruction. If one pointer is a GEP with a non-zero index of the other
1417 // pointer, we know they cannot alias.
1418
1419 // If both accesses are unknown size, we can't do anything useful here.
1420 if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1421 return MayAlias;
1422
1423 AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(),
1424 AAMDNodes(), V2, LocationSize::unknown(),
1425 V2AAInfo, AAQI, nullptr, UnderlyingV2);
1426 if (R != MustAlias) {
1427 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1428 // If V2 is known not to alias GEP base pointer, then the two values
1429 // cannot alias per GEP semantics: "Any memory access must be done through
1430 // a pointer value associated with an address range of the memory access,
1431 // otherwise the behavior is undefined.".
1432 assert(R == NoAlias || R == MayAlias);
1433 return R;
1434 }
1435
1436 // If the max search depth is reached the result is undefined
1437 if (GEP1MaxLookupReached)
1438 return MayAlias;
1439 }
1440
1441 // In the two GEP Case, if there is no difference in the offsets of the
1442 // computed pointers, the resultant pointers are a must alias. This
1443 // happens when we have two lexically identical GEP's (for example).
1444 //
1445 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1446 // must aliases the GEP, the end result is a must alias also.
1447 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1448 return MustAlias;
1449
1450 // If there is a constant difference between the pointers, but the difference
1451 // is less than the size of the associated memory object, then we know
1452 // that the objects are partially overlapping. If the difference is
1453 // greater, we know they do not overlap.
1454 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1455 if (GEP1BaseOffset.sge(0)) {
1456 if (V2Size != LocationSize::unknown()) {
1457 if (GEP1BaseOffset.ult(V2Size.getValue()))
1458 return PartialAlias;
1459 return NoAlias;
1460 }
1461 } else {
1462 // We have the situation where:
1463 // + +
1464 // | BaseOffset |
1465 // ---------------->|
1466 // |-->V1Size |-------> V2Size
1467 // GEP1 V2
1468 // We need to know that V2Size is not unknown, otherwise we might have
1469 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1470 if (V1Size != LocationSize::unknown() &&
1471 V2Size != LocationSize::unknown()) {
1472 if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1473 return PartialAlias;
1474 return NoAlias;
1475 }
1476 }
1477 }
1478
1479 if (!DecompGEP1.VarIndices.empty()) {
1480 APInt Modulo(MaxPointerSize, 0);
1481 bool AllPositive = true;
1482 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1483
1484 // Try to distinguish something like &A[i][1] against &A[42][0].
1485 // Grab the least significant bit set in any of the scales. We
1486 // don't need std::abs here (even if the scale's negative) as we'll
1487 // be ^'ing Modulo with itself later.
1488 Modulo |= DecompGEP1.VarIndices[i].Scale;
1489
1490 if (AllPositive) {
1491 // If the Value could change between cycles, then any reasoning about
1492 // the Value this cycle may not hold in the next cycle. We'll just
1493 // give up if we can't determine conditions that hold for every cycle:
1494 const Value *V = DecompGEP1.VarIndices[i].V;
1495
1496 KnownBits Known =
1497 computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT);
1498 bool SignKnownZero = Known.isNonNegative();
1499 bool SignKnownOne = Known.isNegative();
1500
1501 // Zero-extension widens the variable, and so forces the sign
1502 // bit to zero.
1503 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1504 SignKnownZero |= IsZExt;
1505 SignKnownOne &= !IsZExt;
1506
1507 // If the variable begins with a zero then we know it's
1508 // positive, regardless of whether the value is signed or
1509 // unsigned.
1510 APInt Scale = DecompGEP1.VarIndices[i].Scale;
1511 AllPositive =
1512 (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
1513 }
1514 }
1515
1516 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1517
1518 // We can compute the difference between the two addresses
1519 // mod Modulo. Check whether that difference guarantees that the
1520 // two locations do not alias.
1521 APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
1522 if (V1Size != LocationSize::unknown() &&
1523 V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1524 (Modulo - ModOffset).uge(V1Size.getValue()))
1525 return NoAlias;
1526
1527 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1528 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1529 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1530 if (AllPositive && GEP1BaseOffset.sgt(0) &&
1531 V2Size != LocationSize::unknown() &&
1532 GEP1BaseOffset.uge(V2Size.getValue()))
1533 return NoAlias;
1534
1535 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1536 GEP1BaseOffset, &AC, DT))
1537 return NoAlias;
1538 }
1539
1540 // Statically, we can see that the base objects are the same, but the
1541 // pointers have dynamic offsets which we can't resolve. And none of our
1542 // little tricks above worked.
1543 return MayAlias;
1544 }
1545
MergeAliasResults(AliasResult A,AliasResult B)1546 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1547 // If the results agree, take it.
1548 if (A == B)
1549 return A;
1550 // A mix of PartialAlias and MustAlias is PartialAlias.
1551 if ((A == PartialAlias && B == MustAlias) ||
1552 (B == PartialAlias && A == MustAlias))
1553 return PartialAlias;
1554 // Otherwise, we don't know anything.
1555 return MayAlias;
1556 }
1557
1558 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1559 /// against another.
1560 AliasResult
aliasSelect(const SelectInst * SI,LocationSize SISize,const AAMDNodes & SIAAInfo,const Value * V2,LocationSize V2Size,const AAMDNodes & V2AAInfo,const Value * UnderV2,AAQueryInfo & AAQI)1561 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1562 const AAMDNodes &SIAAInfo, const Value *V2,
1563 LocationSize V2Size, const AAMDNodes &V2AAInfo,
1564 const Value *UnderV2, AAQueryInfo &AAQI) {
1565 // If the values are Selects with the same condition, we can do a more precise
1566 // check: just check for aliases between the values on corresponding arms.
1567 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1568 if (SI->getCondition() == SI2->getCondition()) {
1569 AliasResult Alias =
1570 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(),
1571 V2Size, V2AAInfo, AAQI);
1572 if (Alias == MayAlias)
1573 return MayAlias;
1574 AliasResult ThisAlias =
1575 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1576 SI2->getFalseValue(), V2Size, V2AAInfo, AAQI);
1577 return MergeAliasResults(ThisAlias, Alias);
1578 }
1579
1580 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1581 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1582 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1583 SISize, SIAAInfo, AAQI, UnderV2);
1584 if (Alias == MayAlias)
1585 return MayAlias;
1586
1587 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(),
1588 SISize, SIAAInfo, AAQI, UnderV2);
1589 return MergeAliasResults(ThisAlias, Alias);
1590 }
1591
1592 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1593 /// another.
aliasPHI(const PHINode * PN,LocationSize PNSize,const AAMDNodes & PNAAInfo,const Value * V2,LocationSize V2Size,const AAMDNodes & V2AAInfo,const Value * UnderV2,AAQueryInfo & AAQI)1594 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1595 const AAMDNodes &PNAAInfo, const Value *V2,
1596 LocationSize V2Size,
1597 const AAMDNodes &V2AAInfo,
1598 const Value *UnderV2, AAQueryInfo &AAQI) {
1599 // Track phi nodes we have visited. We use this information when we determine
1600 // value equivalence.
1601 VisitedPhiBBs.insert(PN->getParent());
1602
1603 // If the values are PHIs in the same block, we can do a more precise
1604 // as well as efficient check: just check for aliases between the values
1605 // on corresponding edges.
1606 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1607 if (PN2->getParent() == PN->getParent()) {
1608 AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1609 MemoryLocation(V2, V2Size, V2AAInfo));
1610 if (PN > V2)
1611 std::swap(Locs.first, Locs.second);
1612 // Analyse the PHIs' inputs under the assumption that the PHIs are
1613 // NoAlias.
1614 // If the PHIs are May/MustAlias there must be (recursively) an input
1615 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1616 // there must be an operation on the PHIs within the PHIs' value cycle
1617 // that causes a MayAlias.
1618 // Pretend the phis do not alias.
1619 AliasResult Alias = NoAlias;
1620 AliasResult OrigAliasResult;
1621 {
1622 // Limited lifetime iterator invalidated by the aliasCheck call below.
1623 auto CacheIt = AAQI.AliasCache.find(Locs);
1624 assert((CacheIt != AAQI.AliasCache.end()) &&
1625 "There must exist an entry for the phi node");
1626 OrigAliasResult = CacheIt->second;
1627 CacheIt->second = NoAlias;
1628 }
1629
1630 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1631 AliasResult ThisAlias =
1632 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1633 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1634 V2Size, V2AAInfo, AAQI);
1635 Alias = MergeAliasResults(ThisAlias, Alias);
1636 if (Alias == MayAlias)
1637 break;
1638 }
1639
1640 // Reset if speculation failed.
1641 if (Alias != NoAlias) {
1642 auto Pair =
1643 AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult));
1644 assert(!Pair.second && "Entry must have existed");
1645 Pair.first->second = OrigAliasResult;
1646 }
1647 return Alias;
1648 }
1649
1650 SmallVector<Value *, 4> V1Srcs;
1651 bool isRecursive = false;
1652 if (PV) {
1653 // If we have PhiValues then use it to get the underlying phi values.
1654 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1655 // If we have more phi values than the search depth then return MayAlias
1656 // conservatively to avoid compile time explosion. The worst possible case
1657 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1658 // where 'm' and 'n' are the number of PHI sources.
1659 if (PhiValueSet.size() > MaxLookupSearchDepth)
1660 return MayAlias;
1661 // Add the values to V1Srcs
1662 for (Value *PV1 : PhiValueSet) {
1663 if (EnableRecPhiAnalysis) {
1664 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1665 // Check whether the incoming value is a GEP that advances the pointer
1666 // result of this PHI node (e.g. in a loop). If this is the case, we
1667 // would recurse and always get a MayAlias. Handle this case specially
1668 // below.
1669 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1670 isa<ConstantInt>(PV1GEP->idx_begin())) {
1671 isRecursive = true;
1672 continue;
1673 }
1674 }
1675 }
1676 V1Srcs.push_back(PV1);
1677 }
1678 } else {
1679 // If we don't have PhiInfo then just look at the operands of the phi itself
1680 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1681 SmallPtrSet<Value *, 4> UniqueSrc;
1682 for (Value *PV1 : PN->incoming_values()) {
1683 if (isa<PHINode>(PV1))
1684 // If any of the source itself is a PHI, return MayAlias conservatively
1685 // to avoid compile time explosion. The worst possible case is if both
1686 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1687 // and 'n' are the number of PHI sources.
1688 return MayAlias;
1689
1690 if (EnableRecPhiAnalysis)
1691 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1692 // Check whether the incoming value is a GEP that advances the pointer
1693 // result of this PHI node (e.g. in a loop). If this is the case, we
1694 // would recurse and always get a MayAlias. Handle this case specially
1695 // below.
1696 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1697 isa<ConstantInt>(PV1GEP->idx_begin())) {
1698 isRecursive = true;
1699 continue;
1700 }
1701 }
1702
1703 if (UniqueSrc.insert(PV1).second)
1704 V1Srcs.push_back(PV1);
1705 }
1706 }
1707
1708 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1709 // value. This should only be possible in blocks unreachable from the entry
1710 // block, but return MayAlias just in case.
1711 if (V1Srcs.empty())
1712 return MayAlias;
1713
1714 // If this PHI node is recursive, set the size of the accessed memory to
1715 // unknown to represent all the possible values the GEP could advance the
1716 // pointer to.
1717 if (isRecursive)
1718 PNSize = LocationSize::unknown();
1719
1720 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize,
1721 PNAAInfo, AAQI, UnderV2);
1722
1723 // Early exit if the check of the first PHI source against V2 is MayAlias.
1724 // Other results are not possible.
1725 if (Alias == MayAlias)
1726 return MayAlias;
1727 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1728 // remain valid to all elements and needs to conservatively return MayAlias.
1729 if (isRecursive && Alias != NoAlias)
1730 return MayAlias;
1731
1732 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1733 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1734 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1735 Value *V = V1Srcs[i];
1736
1737 AliasResult ThisAlias =
1738 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2);
1739 Alias = MergeAliasResults(ThisAlias, Alias);
1740 if (Alias == MayAlias)
1741 break;
1742 }
1743
1744 return Alias;
1745 }
1746
1747 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1748 /// array references.
aliasCheck(const Value * V1,LocationSize V1Size,AAMDNodes V1AAInfo,const Value * V2,LocationSize V2Size,AAMDNodes V2AAInfo,AAQueryInfo & AAQI,const Value * O1,const Value * O2)1749 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1750 AAMDNodes V1AAInfo, const Value *V2,
1751 LocationSize V2Size, AAMDNodes V2AAInfo,
1752 AAQueryInfo &AAQI, const Value *O1,
1753 const Value *O2) {
1754 // If either of the memory references is empty, it doesn't matter what the
1755 // pointer values are.
1756 if (V1Size.isZero() || V2Size.isZero())
1757 return NoAlias;
1758
1759 // Strip off any casts if they exist.
1760 V1 = V1->stripPointerCastsAndInvariantGroups();
1761 V2 = V2->stripPointerCastsAndInvariantGroups();
1762
1763 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1764 // value for undef that aliases nothing in the program.
1765 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1766 return NoAlias;
1767
1768 // Are we checking for alias of the same value?
1769 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1770 // different iterations. We must therefore make sure that this is not the
1771 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1772 // happen by looking at the visited phi nodes and making sure they cannot
1773 // reach the value.
1774 if (isValueEqualInPotentialCycles(V1, V2))
1775 return MustAlias;
1776
1777 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1778 return NoAlias; // Scalars cannot alias each other
1779
1780 // Figure out what objects these things are pointing to if we can.
1781 if (O1 == nullptr)
1782 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1783
1784 if (O2 == nullptr)
1785 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1786
1787 // Null values in the default address space don't point to any object, so they
1788 // don't alias any other pointer.
1789 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1790 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1791 return NoAlias;
1792 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1793 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1794 return NoAlias;
1795
1796 if (O1 != O2) {
1797 // If V1/V2 point to two different objects, we know that we have no alias.
1798 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1799 return NoAlias;
1800
1801 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1802 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1803 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1804 return NoAlias;
1805
1806 // Function arguments can't alias with things that are known to be
1807 // unambigously identified at the function level.
1808 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1809 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1810 return NoAlias;
1811
1812 // If one pointer is the result of a call/invoke or load and the other is a
1813 // non-escaping local object within the same function, then we know the
1814 // object couldn't escape to a point where the call could return it.
1815 //
1816 // Note that if the pointers are in different functions, there are a
1817 // variety of complications. A call with a nocapture argument may still
1818 // temporary store the nocapture argument's value in a temporary memory
1819 // location if that memory location doesn't escape. Or it may pass a
1820 // nocapture value to other functions as long as they don't capture it.
1821 if (isEscapeSource(O1) &&
1822 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1823 return NoAlias;
1824 if (isEscapeSource(O2) &&
1825 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1826 return NoAlias;
1827 }
1828
1829 // If the size of one access is larger than the entire object on the other
1830 // side, then we know such behavior is undefined and can assume no alias.
1831 bool NullIsValidLocation = NullPointerIsDefined(&F);
1832 if ((isObjectSmallerThan(
1833 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1834 TLI, NullIsValidLocation)) ||
1835 (isObjectSmallerThan(
1836 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1837 TLI, NullIsValidLocation)))
1838 return NoAlias;
1839
1840 // Check the cache before climbing up use-def chains. This also terminates
1841 // otherwise infinitely recursive queries.
1842 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1843 MemoryLocation(V2, V2Size, V2AAInfo));
1844 if (V1 > V2)
1845 std::swap(Locs.first, Locs.second);
1846 std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair =
1847 AAQI.AliasCache.try_emplace(Locs, MayAlias);
1848 if (!Pair.second)
1849 return Pair.first->second;
1850
1851 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1852 // GEP can't simplify, we don't even look at the PHI cases.
1853 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1854 std::swap(V1, V2);
1855 std::swap(V1Size, V2Size);
1856 std::swap(O1, O2);
1857 std::swap(V1AAInfo, V2AAInfo);
1858 }
1859 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1860 AliasResult Result =
1861 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
1862 if (Result != MayAlias) {
1863 auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result));
1864 assert(!ItInsPair.second && "Entry must have existed");
1865 ItInsPair.first->second = Result;
1866 return Result;
1867 }
1868 }
1869
1870 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1871 std::swap(V1, V2);
1872 std::swap(O1, O2);
1873 std::swap(V1Size, V2Size);
1874 std::swap(V1AAInfo, V2AAInfo);
1875 }
1876 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1877 AliasResult Result =
1878 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1879 if (Result != MayAlias) {
1880 Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1881 assert(!Pair.second && "Entry must have existed");
1882 return Pair.first->second = Result;
1883 }
1884 }
1885
1886 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1887 std::swap(V1, V2);
1888 std::swap(O1, O2);
1889 std::swap(V1Size, V2Size);
1890 std::swap(V1AAInfo, V2AAInfo);
1891 }
1892 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1893 AliasResult Result =
1894 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1895 if (Result != MayAlias) {
1896 Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1897 assert(!Pair.second && "Entry must have existed");
1898 return Pair.first->second = Result;
1899 }
1900 }
1901
1902 // If both pointers are pointing into the same object and one of them
1903 // accesses the entire object, then the accesses must overlap in some way.
1904 if (O1 == O2)
1905 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1906 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1907 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) {
1908 Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias);
1909 assert(!Pair.second && "Entry must have existed");
1910 return Pair.first->second = PartialAlias;
1911 }
1912
1913 // Recurse back into the best AA results we have, potentially with refined
1914 // memory locations. We have already ensured that BasicAA has a MayAlias
1915 // cache result for these, so any recursion back into BasicAA won't loop.
1916 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI);
1917 Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1918 assert(!Pair.second && "Entry must have existed");
1919 return Pair.first->second = Result;
1920 }
1921
1922 /// Check whether two Values can be considered equivalent.
1923 ///
1924 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1925 /// they can not be part of a cycle in the value graph by looking at all
1926 /// visited phi nodes an making sure that the phis cannot reach the value. We
1927 /// have to do this because we are looking through phi nodes (That is we say
1928 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2)1929 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1930 const Value *V2) {
1931 if (V != V2)
1932 return false;
1933
1934 const Instruction *Inst = dyn_cast<Instruction>(V);
1935 if (!Inst)
1936 return true;
1937
1938 if (VisitedPhiBBs.empty())
1939 return true;
1940
1941 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1942 return false;
1943
1944 // Make sure that the visited phis cannot reach the Value. This ensures that
1945 // the Values cannot come from different iterations of a potential cycle the
1946 // phi nodes could be involved in.
1947 for (auto *P : VisitedPhiBBs)
1948 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI))
1949 return false;
1950
1951 return true;
1952 }
1953
1954 /// Computes the symbolic difference between two de-composed GEPs.
1955 ///
1956 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1957 /// instructions GEP1 and GEP2 which have common base pointers.
GetIndexDifference(SmallVectorImpl<VariableGEPIndex> & Dest,const SmallVectorImpl<VariableGEPIndex> & Src)1958 void BasicAAResult::GetIndexDifference(
1959 SmallVectorImpl<VariableGEPIndex> &Dest,
1960 const SmallVectorImpl<VariableGEPIndex> &Src) {
1961 if (Src.empty())
1962 return;
1963
1964 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1965 const Value *V = Src[i].V;
1966 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1967 APInt Scale = Src[i].Scale;
1968
1969 // Find V in Dest. This is N^2, but pointer indices almost never have more
1970 // than a few variable indexes.
1971 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1972 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1973 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1974 continue;
1975
1976 // If we found it, subtract off Scale V's from the entry in Dest. If it
1977 // goes to zero, remove the entry.
1978 if (Dest[j].Scale != Scale)
1979 Dest[j].Scale -= Scale;
1980 else
1981 Dest.erase(Dest.begin() + j);
1982 Scale = 0;
1983 break;
1984 }
1985
1986 // If we didn't consume this entry, add it to the end of the Dest list.
1987 if (!!Scale) {
1988 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1989 Dest.push_back(Entry);
1990 }
1991 }
1992 }
1993
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> & VarIndices,LocationSize MaybeV1Size,LocationSize MaybeV2Size,const APInt & BaseOffset,AssumptionCache * AC,DominatorTree * DT)1994 bool BasicAAResult::constantOffsetHeuristic(
1995 const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1996 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1997 AssumptionCache *AC, DominatorTree *DT) {
1998 if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1999 MaybeV2Size == LocationSize::unknown())
2000 return false;
2001
2002 const uint64_t V1Size = MaybeV1Size.getValue();
2003 const uint64_t V2Size = MaybeV2Size.getValue();
2004
2005 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
2006
2007 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
2008 Var0.Scale != -Var1.Scale)
2009 return false;
2010
2011 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
2012
2013 // We'll strip off the Extensions of Var0 and Var1 and do another round
2014 // of GetLinearExpression decomposition. In the example above, if Var0
2015 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
2016
2017 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
2018 V1Offset(Width, 0);
2019 bool NSW = true, NUW = true;
2020 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
2021 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
2022 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
2023 NSW = true;
2024 NUW = true;
2025 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
2026 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
2027
2028 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
2029 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
2030 return false;
2031
2032 // We have a hit - Var0 and Var1 only differ by a constant offset!
2033
2034 // If we've been sext'ed then zext'd the maximum difference between Var0 and
2035 // Var1 is possible to calculate, but we're just interested in the absolute
2036 // minimum difference between the two. The minimum distance may occur due to
2037 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
2038 // the minimum distance between %i and %i + 5 is 3.
2039 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
2040 MinDiff = APIntOps::umin(MinDiff, Wrapped);
2041 APInt MinDiffBytes =
2042 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2043
2044 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2045 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2046 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2047 // V2Size can fit in the MinDiffBytes gap.
2048 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
2049 MinDiffBytes.uge(V2Size + BaseOffset.abs());
2050 }
2051
2052 //===----------------------------------------------------------------------===//
2053 // BasicAliasAnalysis Pass
2054 //===----------------------------------------------------------------------===//
2055
2056 AnalysisKey BasicAA::Key;
2057
run(Function & F,FunctionAnalysisManager & AM)2058 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2059 return BasicAAResult(F.getParent()->getDataLayout(),
2060 F,
2061 AM.getResult<TargetLibraryAnalysis>(F),
2062 AM.getResult<AssumptionAnalysis>(F),
2063 &AM.getResult<DominatorTreeAnalysis>(F),
2064 AM.getCachedResult<LoopAnalysis>(F),
2065 AM.getCachedResult<PhiValuesAnalysis>(F));
2066 }
2067
BasicAAWrapperPass()2068 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
2069 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
2070 }
2071
2072 char BasicAAWrapperPass::ID = 0;
2073
anchor()2074 void BasicAAWrapperPass::anchor() {}
2075
2076 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
2077 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)2078 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2079 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2080 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2081 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
2082 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
2083 "Basic Alias Analysis (stateless AA impl)", true, true)
2084
2085 FunctionPass *llvm::createBasicAAWrapperPass() {
2086 return new BasicAAWrapperPass();
2087 }
2088
runOnFunction(Function & F)2089 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2090 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2091 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2092 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2093 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2094 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
2095
2096 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
2097 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2098 &DTWP.getDomTree(),
2099 LIWP ? &LIWP->getLoopInfo() : nullptr,
2100 PVWP ? &PVWP->getResult() : nullptr));
2101
2102 return false;
2103 }
2104
getAnalysisUsage(AnalysisUsage & AU) const2105 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2106 AU.setPreservesAll();
2107 AU.addRequired<AssumptionCacheTracker>();
2108 AU.addRequired<DominatorTreeWrapperPass>();
2109 AU.addRequired<TargetLibraryInfoWrapperPass>();
2110 AU.addUsedIfAvailable<PhiValuesWrapperPass>();
2111 }
2112
createLegacyPMBasicAAResult(Pass & P,Function & F)2113 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
2114 return BasicAAResult(
2115 F.getParent()->getDataLayout(), F,
2116 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
2117 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
2118 }
2119