1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/LoopIterator.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/ScalarEvolution.h"
34 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/PassManager.h"
52 #include "llvm/IR/PatternMatch.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/IR/ValueHandle.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include <algorithm>
64 #include <cassert>
65 #include <cstdint>
66 #include <iterator>
67 #include <utility>
68 #include <vector>
69 
70 using namespace llvm;
71 using namespace llvm::PatternMatch;
72 
73 #define DEBUG_TYPE "loop-accesses"
74 
75 static cl::opt<unsigned, true>
76 VectorizationFactor("force-vector-width", cl::Hidden,
77                     cl::desc("Sets the SIMD width. Zero is autoselect."),
78                     cl::location(VectorizerParams::VectorizationFactor));
79 unsigned VectorizerParams::VectorizationFactor;
80 
81 static cl::opt<unsigned, true>
82 VectorizationInterleave("force-vector-interleave", cl::Hidden,
83                         cl::desc("Sets the vectorization interleave count. "
84                                  "Zero is autoselect."),
85                         cl::location(
86                             VectorizerParams::VectorizationInterleave));
87 unsigned VectorizerParams::VectorizationInterleave;
88 
89 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
90     "runtime-memory-check-threshold", cl::Hidden,
91     cl::desc("When performing memory disambiguation checks at runtime do not "
92              "generate more than this number of comparisons (default = 8)."),
93     cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
94 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
95 
96 /// The maximum iterations used to merge memory checks
97 static cl::opt<unsigned> MemoryCheckMergeThreshold(
98     "memory-check-merge-threshold", cl::Hidden,
99     cl::desc("Maximum number of comparisons done when trying to merge "
100              "runtime memory checks. (default = 100)"),
101     cl::init(100));
102 
103 /// Maximum SIMD width.
104 const unsigned VectorizerParams::MaxVectorWidth = 64;
105 
106 /// We collect dependences up to this threshold.
107 static cl::opt<unsigned>
108     MaxDependences("max-dependences", cl::Hidden,
109                    cl::desc("Maximum number of dependences collected by "
110                             "loop-access analysis (default = 100)"),
111                    cl::init(100));
112 
113 /// This enables versioning on the strides of symbolically striding memory
114 /// accesses in code like the following.
115 ///   for (i = 0; i < N; ++i)
116 ///     A[i * Stride1] += B[i * Stride2] ...
117 ///
118 /// Will be roughly translated to
119 ///    if (Stride1 == 1 && Stride2 == 1) {
120 ///      for (i = 0; i < N; i+=4)
121 ///       A[i:i+3] += ...
122 ///    } else
123 ///      ...
124 static cl::opt<bool> EnableMemAccessVersioning(
125     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
126     cl::desc("Enable symbolic stride memory access versioning"));
127 
128 /// Enable store-to-load forwarding conflict detection. This option can
129 /// be disabled for correctness testing.
130 static cl::opt<bool> EnableForwardingConflictDetection(
131     "store-to-load-forwarding-conflict-detection", cl::Hidden,
132     cl::desc("Enable conflict detection in loop-access analysis"),
133     cl::init(true));
134 
135 static cl::opt<unsigned> MaxForkedSCEVDepth(
136     "max-forked-scev-depth", cl::Hidden,
137     cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
138     cl::init(5));
139 
140 static cl::opt<bool> SpeculateUnitStride(
141     "laa-speculate-unit-stride", cl::Hidden,
142     cl::desc("Speculate that non-constant strides are unit in LAA"),
143     cl::init(true));
144 
145 bool VectorizerParams::isInterleaveForced() {
146   return ::VectorizationInterleave.getNumOccurrences() > 0;
147 }
148 
149 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
150                                             const DenseMap<Value *, const SCEV *> &PtrToStride,
151                                             Value *Ptr) {
152   const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
153 
154   // If there is an entry in the map return the SCEV of the pointer with the
155   // symbolic stride replaced by one.
156   DenseMap<Value *, const SCEV *>::const_iterator SI = PtrToStride.find(Ptr);
157   if (SI == PtrToStride.end())
158     // For a non-symbolic stride, just return the original expression.
159     return OrigSCEV;
160 
161   const SCEV *StrideSCEV = SI->second;
162   // Note: This assert is both overly strong and overly weak.  The actual
163   // invariant here is that StrideSCEV should be loop invariant.  The only
164   // such invariant strides we happen to speculate right now are unknowns
165   // and thus this is a reasonable proxy of the actual invariant.
166   assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
167 
168   ScalarEvolution *SE = PSE.getSE();
169   const auto *CT = SE->getOne(StrideSCEV->getType());
170   PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
171   auto *Expr = PSE.getSCEV(Ptr);
172 
173   LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
174 	     << " by: " << *Expr << "\n");
175   return Expr;
176 }
177 
178 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
179     unsigned Index, RuntimePointerChecking &RtCheck)
180     : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
181       AddressSpace(RtCheck.Pointers[Index]
182                        .PointerValue->getType()
183                        ->getPointerAddressSpace()),
184       NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
185   Members.push_back(Index);
186 }
187 
188 /// Calculate Start and End points of memory access.
189 /// Let's assume A is the first access and B is a memory access on N-th loop
190 /// iteration. Then B is calculated as:
191 ///   B = A + Step*N .
192 /// Step value may be positive or negative.
193 /// N is a calculated back-edge taken count:
194 ///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
195 /// Start and End points are calculated in the following way:
196 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
197 /// where SizeOfElt is the size of single memory access in bytes.
198 ///
199 /// There is no conflict when the intervals are disjoint:
200 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
201 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
202                                     Type *AccessTy, bool WritePtr,
203                                     unsigned DepSetId, unsigned ASId,
204                                     PredicatedScalarEvolution &PSE,
205                                     bool NeedsFreeze) {
206   ScalarEvolution *SE = PSE.getSE();
207 
208   const SCEV *ScStart;
209   const SCEV *ScEnd;
210 
211   if (SE->isLoopInvariant(PtrExpr, Lp)) {
212     ScStart = ScEnd = PtrExpr;
213   } else {
214     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
215     assert(AR && "Invalid addrec expression");
216     const SCEV *Ex = PSE.getBackedgeTakenCount();
217 
218     ScStart = AR->getStart();
219     ScEnd = AR->evaluateAtIteration(Ex, *SE);
220     const SCEV *Step = AR->getStepRecurrence(*SE);
221 
222     // For expressions with negative step, the upper bound is ScStart and the
223     // lower bound is ScEnd.
224     if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
225       if (CStep->getValue()->isNegative())
226         std::swap(ScStart, ScEnd);
227     } else {
228       // Fallback case: the step is not constant, but we can still
229       // get the upper and lower bounds of the interval by using min/max
230       // expressions.
231       ScStart = SE->getUMinExpr(ScStart, ScEnd);
232       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
233     }
234   }
235   assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
236   assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
237 
238   // Add the size of the pointed element to ScEnd.
239   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
240   Type *IdxTy = DL.getIndexType(Ptr->getType());
241   const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
242   ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
243 
244   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
245                         NeedsFreeze);
246 }
247 
248 void RuntimePointerChecking::tryToCreateDiffCheck(
249     const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
250   if (!CanUseDiffCheck)
251     return;
252 
253   // If either group contains multiple different pointers, bail out.
254   // TODO: Support multiple pointers by using the minimum or maximum pointer,
255   // depending on src & sink.
256   if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
257     CanUseDiffCheck = false;
258     return;
259   }
260 
261   PointerInfo *Src = &Pointers[CGI.Members[0]];
262   PointerInfo *Sink = &Pointers[CGJ.Members[0]];
263 
264   // If either pointer is read and written, multiple checks may be needed. Bail
265   // out.
266   if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
267       !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
268     CanUseDiffCheck = false;
269     return;
270   }
271 
272   ArrayRef<unsigned> AccSrc =
273       DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
274   ArrayRef<unsigned> AccSink =
275       DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
276   // If either pointer is accessed multiple times, there may not be a clear
277   // src/sink relation. Bail out for now.
278   if (AccSrc.size() != 1 || AccSink.size() != 1) {
279     CanUseDiffCheck = false;
280     return;
281   }
282   // If the sink is accessed before src, swap src/sink.
283   if (AccSink[0] < AccSrc[0])
284     std::swap(Src, Sink);
285 
286   auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
287   auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
288   if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
289       SinkAR->getLoop() != DC.getInnermostLoop()) {
290     CanUseDiffCheck = false;
291     return;
292   }
293 
294   SmallVector<Instruction *, 4> SrcInsts =
295       DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
296   SmallVector<Instruction *, 4> SinkInsts =
297       DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
298   Type *SrcTy = getLoadStoreType(SrcInsts[0]);
299   Type *DstTy = getLoadStoreType(SinkInsts[0]);
300   if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
301     CanUseDiffCheck = false;
302     return;
303   }
304   const DataLayout &DL =
305       SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
306   unsigned AllocSize =
307       std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
308 
309   // Only matching constant steps matching the AllocSize are supported at the
310   // moment. This simplifies the difference computation. Can be extended in the
311   // future.
312   auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
313   if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
314       Step->getAPInt().abs() != AllocSize) {
315     CanUseDiffCheck = false;
316     return;
317   }
318 
319   IntegerType *IntTy =
320       IntegerType::get(Src->PointerValue->getContext(),
321                        DL.getPointerSizeInBits(CGI.AddressSpace));
322 
323   // When counting down, the dependence distance needs to be swapped.
324   if (Step->getValue()->isNegative())
325     std::swap(SinkAR, SrcAR);
326 
327   const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
328   const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
329   if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
330       isa<SCEVCouldNotCompute>(SrcStartInt)) {
331     CanUseDiffCheck = false;
332     return;
333   }
334   DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
335                           Src->NeedsFreeze || Sink->NeedsFreeze);
336 }
337 
338 SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
339   SmallVector<RuntimePointerCheck, 4> Checks;
340 
341   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
342     for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
343       const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
344       const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
345 
346       if (needsChecking(CGI, CGJ)) {
347         tryToCreateDiffCheck(CGI, CGJ);
348         Checks.push_back(std::make_pair(&CGI, &CGJ));
349       }
350     }
351   }
352   return Checks;
353 }
354 
355 void RuntimePointerChecking::generateChecks(
356     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
357   assert(Checks.empty() && "Checks is not empty");
358   groupChecks(DepCands, UseDependencies);
359   Checks = generateChecks();
360 }
361 
362 bool RuntimePointerChecking::needsChecking(
363     const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
364   for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
365     for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
366       if (needsChecking(M.Members[I], N.Members[J]))
367         return true;
368   return false;
369 }
370 
371 /// Compare \p I and \p J and return the minimum.
372 /// Return nullptr in case we couldn't find an answer.
373 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
374                                    ScalarEvolution *SE) {
375   const SCEV *Diff = SE->getMinusSCEV(J, I);
376   const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
377 
378   if (!C)
379     return nullptr;
380   if (C->getValue()->isNegative())
381     return J;
382   return I;
383 }
384 
385 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
386                                          RuntimePointerChecking &RtCheck) {
387   return addPointer(
388       Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
389       RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
390       RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
391 }
392 
393 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
394                                          const SCEV *End, unsigned AS,
395                                          bool NeedsFreeze,
396                                          ScalarEvolution &SE) {
397   assert(AddressSpace == AS &&
398          "all pointers in a checking group must be in the same address space");
399 
400   // Compare the starts and ends with the known minimum and maximum
401   // of this set. We need to know how we compare against the min/max
402   // of the set in order to be able to emit memchecks.
403   const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
404   if (!Min0)
405     return false;
406 
407   const SCEV *Min1 = getMinFromExprs(End, High, &SE);
408   if (!Min1)
409     return false;
410 
411   // Update the low bound  expression if we've found a new min value.
412   if (Min0 == Start)
413     Low = Start;
414 
415   // Update the high bound expression if we've found a new max value.
416   if (Min1 != End)
417     High = End;
418 
419   Members.push_back(Index);
420   this->NeedsFreeze |= NeedsFreeze;
421   return true;
422 }
423 
424 void RuntimePointerChecking::groupChecks(
425     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
426   // We build the groups from dependency candidates equivalence classes
427   // because:
428   //    - We know that pointers in the same equivalence class share
429   //      the same underlying object and therefore there is a chance
430   //      that we can compare pointers
431   //    - We wouldn't be able to merge two pointers for which we need
432   //      to emit a memcheck. The classes in DepCands are already
433   //      conveniently built such that no two pointers in the same
434   //      class need checking against each other.
435 
436   // We use the following (greedy) algorithm to construct the groups
437   // For every pointer in the equivalence class:
438   //   For each existing group:
439   //   - if the difference between this pointer and the min/max bounds
440   //     of the group is a constant, then make the pointer part of the
441   //     group and update the min/max bounds of that group as required.
442 
443   CheckingGroups.clear();
444 
445   // If we need to check two pointers to the same underlying object
446   // with a non-constant difference, we shouldn't perform any pointer
447   // grouping with those pointers. This is because we can easily get
448   // into cases where the resulting check would return false, even when
449   // the accesses are safe.
450   //
451   // The following example shows this:
452   // for (i = 0; i < 1000; ++i)
453   //   a[5000 + i * m] = a[i] + a[i + 9000]
454   //
455   // Here grouping gives a check of (5000, 5000 + 1000 * m) against
456   // (0, 10000) which is always false. However, if m is 1, there is no
457   // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
458   // us to perform an accurate check in this case.
459   //
460   // The above case requires that we have an UnknownDependence between
461   // accesses to the same underlying object. This cannot happen unless
462   // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
463   // is also false. In this case we will use the fallback path and create
464   // separate checking groups for all pointers.
465 
466   // If we don't have the dependency partitions, construct a new
467   // checking pointer group for each pointer. This is also required
468   // for correctness, because in this case we can have checking between
469   // pointers to the same underlying object.
470   if (!UseDependencies) {
471     for (unsigned I = 0; I < Pointers.size(); ++I)
472       CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
473     return;
474   }
475 
476   unsigned TotalComparisons = 0;
477 
478   DenseMap<Value *, SmallVector<unsigned>> PositionMap;
479   for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
480     auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
481     Iter.first->second.push_back(Index);
482   }
483 
484   // We need to keep track of what pointers we've already seen so we
485   // don't process them twice.
486   SmallSet<unsigned, 2> Seen;
487 
488   // Go through all equivalence classes, get the "pointer check groups"
489   // and add them to the overall solution. We use the order in which accesses
490   // appear in 'Pointers' to enforce determinism.
491   for (unsigned I = 0; I < Pointers.size(); ++I) {
492     // We've seen this pointer before, and therefore already processed
493     // its equivalence class.
494     if (Seen.count(I))
495       continue;
496 
497     MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
498                                            Pointers[I].IsWritePtr);
499 
500     SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
501     auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
502 
503     // Because DepCands is constructed by visiting accesses in the order in
504     // which they appear in alias sets (which is deterministic) and the
505     // iteration order within an equivalence class member is only dependent on
506     // the order in which unions and insertions are performed on the
507     // equivalence class, the iteration order is deterministic.
508     for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
509          MI != ME; ++MI) {
510       auto PointerI = PositionMap.find(MI->getPointer());
511       assert(PointerI != PositionMap.end() &&
512              "pointer in equivalence class not found in PositionMap");
513       for (unsigned Pointer : PointerI->second) {
514         bool Merged = false;
515         // Mark this pointer as seen.
516         Seen.insert(Pointer);
517 
518         // Go through all the existing sets and see if we can find one
519         // which can include this pointer.
520         for (RuntimeCheckingPtrGroup &Group : Groups) {
521           // Don't perform more than a certain amount of comparisons.
522           // This should limit the cost of grouping the pointers to something
523           // reasonable.  If we do end up hitting this threshold, the algorithm
524           // will create separate groups for all remaining pointers.
525           if (TotalComparisons > MemoryCheckMergeThreshold)
526             break;
527 
528           TotalComparisons++;
529 
530           if (Group.addPointer(Pointer, *this)) {
531             Merged = true;
532             break;
533           }
534         }
535 
536         if (!Merged)
537           // We couldn't add this pointer to any existing set or the threshold
538           // for the number of comparisons has been reached. Create a new group
539           // to hold the current pointer.
540           Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
541       }
542     }
543 
544     // We've computed the grouped checks for this partition.
545     // Save the results and continue with the next one.
546     llvm::copy(Groups, std::back_inserter(CheckingGroups));
547   }
548 }
549 
550 bool RuntimePointerChecking::arePointersInSamePartition(
551     const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
552     unsigned PtrIdx2) {
553   return (PtrToPartition[PtrIdx1] != -1 &&
554           PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
555 }
556 
557 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
558   const PointerInfo &PointerI = Pointers[I];
559   const PointerInfo &PointerJ = Pointers[J];
560 
561   // No need to check if two readonly pointers intersect.
562   if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
563     return false;
564 
565   // Only need to check pointers between two different dependency sets.
566   if (PointerI.DependencySetId == PointerJ.DependencySetId)
567     return false;
568 
569   // Only need to check pointers in the same alias set.
570   if (PointerI.AliasSetId != PointerJ.AliasSetId)
571     return false;
572 
573   return true;
574 }
575 
576 void RuntimePointerChecking::printChecks(
577     raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
578     unsigned Depth) const {
579   unsigned N = 0;
580   for (const auto &Check : Checks) {
581     const auto &First = Check.first->Members, &Second = Check.second->Members;
582 
583     OS.indent(Depth) << "Check " << N++ << ":\n";
584 
585     OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
586     for (unsigned K = 0; K < First.size(); ++K)
587       OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
588 
589     OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
590     for (unsigned K = 0; K < Second.size(); ++K)
591       OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
592   }
593 }
594 
595 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
596 
597   OS.indent(Depth) << "Run-time memory checks:\n";
598   printChecks(OS, Checks, Depth);
599 
600   OS.indent(Depth) << "Grouped accesses:\n";
601   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
602     const auto &CG = CheckingGroups[I];
603 
604     OS.indent(Depth + 2) << "Group " << &CG << ":\n";
605     OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
606                          << ")\n";
607     for (unsigned J = 0; J < CG.Members.size(); ++J) {
608       OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
609                            << "\n";
610     }
611   }
612 }
613 
614 namespace {
615 
616 /// Analyses memory accesses in a loop.
617 ///
618 /// Checks whether run time pointer checks are needed and builds sets for data
619 /// dependence checking.
620 class AccessAnalysis {
621 public:
622   /// Read or write access location.
623   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
624   typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
625 
626   AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
627                  MemoryDepChecker::DepCandidates &DA,
628                  PredicatedScalarEvolution &PSE)
629       : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
630     // We're analyzing dependences across loop iterations.
631     BAA.enableCrossIterationMode();
632   }
633 
634   /// Register a load  and whether it is only read from.
635   void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
636     Value *Ptr = const_cast<Value*>(Loc.Ptr);
637     AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
638     Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
639     if (IsReadOnly)
640       ReadOnlyPtr.insert(Ptr);
641   }
642 
643   /// Register a store.
644   void addStore(MemoryLocation &Loc, Type *AccessTy) {
645     Value *Ptr = const_cast<Value*>(Loc.Ptr);
646     AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
647     Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
648   }
649 
650   /// Check if we can emit a run-time no-alias check for \p Access.
651   ///
652   /// Returns true if we can emit a run-time no alias check for \p Access.
653   /// If we can check this access, this also adds it to a dependence set and
654   /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
655   /// we will attempt to use additional run-time checks in order to get
656   /// the bounds of the pointer.
657   bool createCheckForAccess(RuntimePointerChecking &RtCheck,
658                             MemAccessInfo Access, Type *AccessTy,
659                             const DenseMap<Value *, const SCEV *> &Strides,
660                             DenseMap<Value *, unsigned> &DepSetId,
661                             Loop *TheLoop, unsigned &RunningDepId,
662                             unsigned ASId, bool ShouldCheckStride, bool Assume);
663 
664   /// Check whether we can check the pointers at runtime for
665   /// non-intersection.
666   ///
667   /// Returns true if we need no check or if we do and we can generate them
668   /// (i.e. the pointers have computable bounds).
669   bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
670                        Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
671                        Value *&UncomputablePtr, bool ShouldCheckWrap = false);
672 
673   /// Goes over all memory accesses, checks whether a RT check is needed
674   /// and builds sets of dependent accesses.
675   void buildDependenceSets() {
676     processMemAccesses();
677   }
678 
679   /// Initial processing of memory accesses determined that we need to
680   /// perform dependency checking.
681   ///
682   /// Note that this can later be cleared if we retry memcheck analysis without
683   /// dependency checking (i.e. FoundNonConstantDistanceDependence).
684   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
685 
686   /// We decided that no dependence analysis would be used.  Reset the state.
687   void resetDepChecks(MemoryDepChecker &DepChecker) {
688     CheckDeps.clear();
689     DepChecker.clearDependences();
690   }
691 
692   MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
693 
694 private:
695   typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
696 
697   /// Go over all memory access and check whether runtime pointer checks
698   /// are needed and build sets of dependency check candidates.
699   void processMemAccesses();
700 
701   /// Map of all accesses. Values are the types used to access memory pointed to
702   /// by the pointer.
703   PtrAccessMap Accesses;
704 
705   /// The loop being checked.
706   const Loop *TheLoop;
707 
708   /// List of accesses that need a further dependence check.
709   MemAccessInfoList CheckDeps;
710 
711   /// Set of pointers that are read only.
712   SmallPtrSet<Value*, 16> ReadOnlyPtr;
713 
714   /// Batched alias analysis results.
715   BatchAAResults BAA;
716 
717   /// An alias set tracker to partition the access set by underlying object and
718   //intrinsic property (such as TBAA metadata).
719   AliasSetTracker AST;
720 
721   LoopInfo *LI;
722 
723   /// Sets of potentially dependent accesses - members of one set share an
724   /// underlying pointer. The set "CheckDeps" identfies which sets really need a
725   /// dependence check.
726   MemoryDepChecker::DepCandidates &DepCands;
727 
728   /// Initial processing of memory accesses determined that we may need
729   /// to add memchecks.  Perform the analysis to determine the necessary checks.
730   ///
731   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
732   /// memcheck analysis without dependency checking
733   /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
734   /// cleared while this remains set if we have potentially dependent accesses.
735   bool IsRTCheckAnalysisNeeded = false;
736 
737   /// The SCEV predicate containing all the SCEV-related assumptions.
738   PredicatedScalarEvolution &PSE;
739 };
740 
741 } // end anonymous namespace
742 
743 /// Check whether a pointer can participate in a runtime bounds check.
744 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
745 /// by adding run-time checks (overflow checks) if necessary.
746 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr,
747                                 const SCEV *PtrScev, Loop *L, bool Assume) {
748   // The bounds for loop-invariant pointer is trivial.
749   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
750     return true;
751 
752   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
753 
754   if (!AR && Assume)
755     AR = PSE.getAsAddRec(Ptr);
756 
757   if (!AR)
758     return false;
759 
760   return AR->isAffine();
761 }
762 
763 /// Check whether a pointer address cannot wrap.
764 static bool isNoWrap(PredicatedScalarEvolution &PSE,
765                      const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
766                      Loop *L) {
767   const SCEV *PtrScev = PSE.getSCEV(Ptr);
768   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
769     return true;
770 
771   int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
772   if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
773     return true;
774 
775   return false;
776 }
777 
778 static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
779                           function_ref<void(Value *)> AddPointer) {
780   SmallPtrSet<Value *, 8> Visited;
781   SmallVector<Value *> WorkList;
782   WorkList.push_back(StartPtr);
783 
784   while (!WorkList.empty()) {
785     Value *Ptr = WorkList.pop_back_val();
786     if (!Visited.insert(Ptr).second)
787       continue;
788     auto *PN = dyn_cast<PHINode>(Ptr);
789     // SCEV does not look through non-header PHIs inside the loop. Such phis
790     // can be analyzed by adding separate accesses for each incoming pointer
791     // value.
792     if (PN && InnermostLoop.contains(PN->getParent()) &&
793         PN->getParent() != InnermostLoop.getHeader()) {
794       for (const Use &Inc : PN->incoming_values())
795         WorkList.push_back(Inc);
796     } else
797       AddPointer(Ptr);
798   }
799 }
800 
801 // Walk back through the IR for a pointer, looking for a select like the
802 // following:
803 //
804 //  %offset = select i1 %cmp, i64 %a, i64 %b
805 //  %addr = getelementptr double, double* %base, i64 %offset
806 //  %ld = load double, double* %addr, align 8
807 //
808 // We won't be able to form a single SCEVAddRecExpr from this since the
809 // address for each loop iteration depends on %cmp. We could potentially
810 // produce multiple valid SCEVAddRecExprs, though, and check all of them for
811 // memory safety/aliasing if needed.
812 //
813 // If we encounter some IR we don't yet handle, or something obviously fine
814 // like a constant, then we just add the SCEV for that term to the list passed
815 // in by the caller. If we have a node that may potentially yield a valid
816 // SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
817 // ourselves before adding to the list.
818 static void findForkedSCEVs(
819     ScalarEvolution *SE, const Loop *L, Value *Ptr,
820     SmallVectorImpl<PointerIntPair<const SCEV *, 1, bool>> &ScevList,
821     unsigned Depth) {
822   // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
823   // we've exceeded our limit on recursion, just return whatever we have
824   // regardless of whether it can be used for a forked pointer or not, along
825   // with an indication of whether it might be a poison or undef value.
826   const SCEV *Scev = SE->getSCEV(Ptr);
827   if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
828       !isa<Instruction>(Ptr) || Depth == 0) {
829     ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
830     return;
831   }
832 
833   Depth--;
834 
835   auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
836     return get<1>(S);
837   };
838 
839   auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
840     switch (Opcode) {
841     case Instruction::Add:
842       return SE->getAddExpr(L, R);
843     case Instruction::Sub:
844       return SE->getMinusSCEV(L, R);
845     default:
846       llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
847     }
848   };
849 
850   Instruction *I = cast<Instruction>(Ptr);
851   unsigned Opcode = I->getOpcode();
852   switch (Opcode) {
853   case Instruction::GetElementPtr: {
854     GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
855     Type *SourceTy = GEP->getSourceElementType();
856     // We only handle base + single offset GEPs here for now.
857     // Not dealing with preexisting gathers yet, so no vectors.
858     if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
859       ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
860       break;
861     }
862     SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> BaseScevs;
863     SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> OffsetScevs;
864     findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
865     findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
866 
867     // See if we need to freeze our fork...
868     bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
869                        any_of(OffsetScevs, UndefPoisonCheck);
870 
871     // Check that we only have a single fork, on either the base or the offset.
872     // Copy the SCEV across for the one without a fork in order to generate
873     // the full SCEV for both sides of the GEP.
874     if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
875       BaseScevs.push_back(BaseScevs[0]);
876     else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
877       OffsetScevs.push_back(OffsetScevs[0]);
878     else {
879       ScevList.emplace_back(Scev, NeedsFreeze);
880       break;
881     }
882 
883     // Find the pointer type we need to extend to.
884     Type *IntPtrTy = SE->getEffectiveSCEVType(
885         SE->getSCEV(GEP->getPointerOperand())->getType());
886 
887     // Find the size of the type being pointed to. We only have a single
888     // index term (guarded above) so we don't need to index into arrays or
889     // structures, just get the size of the scalar value.
890     const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
891 
892     // Scale up the offsets by the size of the type, then add to the bases.
893     const SCEV *Scaled1 = SE->getMulExpr(
894         Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
895     const SCEV *Scaled2 = SE->getMulExpr(
896         Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
897     ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
898                           NeedsFreeze);
899     ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
900                           NeedsFreeze);
901     break;
902   }
903   case Instruction::Select: {
904     SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
905     // A select means we've found a forked pointer, but we currently only
906     // support a single select per pointer so if there's another behind this
907     // then we just bail out and return the generic SCEV.
908     findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
909     findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
910     if (ChildScevs.size() == 2) {
911       ScevList.push_back(ChildScevs[0]);
912       ScevList.push_back(ChildScevs[1]);
913     } else
914       ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
915     break;
916   }
917   case Instruction::Add:
918   case Instruction::Sub: {
919     SmallVector<PointerIntPair<const SCEV *, 1, bool>> LScevs;
920     SmallVector<PointerIntPair<const SCEV *, 1, bool>> RScevs;
921     findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
922     findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
923 
924     // See if we need to freeze our fork...
925     bool NeedsFreeze =
926         any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
927 
928     // Check that we only have a single fork, on either the left or right side.
929     // Copy the SCEV across for the one without a fork in order to generate
930     // the full SCEV for both sides of the BinOp.
931     if (LScevs.size() == 2 && RScevs.size() == 1)
932       RScevs.push_back(RScevs[0]);
933     else if (RScevs.size() == 2 && LScevs.size() == 1)
934       LScevs.push_back(LScevs[0]);
935     else {
936       ScevList.emplace_back(Scev, NeedsFreeze);
937       break;
938     }
939 
940     ScevList.emplace_back(
941         GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
942         NeedsFreeze);
943     ScevList.emplace_back(
944         GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
945         NeedsFreeze);
946     break;
947   }
948   default:
949     // Just return the current SCEV if we haven't handled the instruction yet.
950     LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
951     ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
952     break;
953   }
954 }
955 
956 static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
957 findForkedPointer(PredicatedScalarEvolution &PSE,
958                   const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
959                   const Loop *L) {
960   ScalarEvolution *SE = PSE.getSE();
961   assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
962   SmallVector<PointerIntPair<const SCEV *, 1, bool>> Scevs;
963   findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
964 
965   // For now, we will only accept a forked pointer with two possible SCEVs
966   // that are either SCEVAddRecExprs or loop invariant.
967   if (Scevs.size() == 2 &&
968       (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
969        SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
970       (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
971        SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
972     LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
973     LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
974     LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
975     return Scevs;
976   }
977 
978   return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
979 }
980 
981 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
982                                           MemAccessInfo Access, Type *AccessTy,
983                                           const DenseMap<Value *, const SCEV *> &StridesMap,
984                                           DenseMap<Value *, unsigned> &DepSetId,
985                                           Loop *TheLoop, unsigned &RunningDepId,
986                                           unsigned ASId, bool ShouldCheckWrap,
987                                           bool Assume) {
988   Value *Ptr = Access.getPointer();
989 
990   SmallVector<PointerIntPair<const SCEV *, 1, bool>> TranslatedPtrs =
991       findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
992 
993   for (auto &P : TranslatedPtrs) {
994     const SCEV *PtrExpr = get<0>(P);
995     if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
996       return false;
997 
998     // When we run after a failing dependency check we have to make sure
999     // we don't have wrapping pointers.
1000     if (ShouldCheckWrap) {
1001       // Skip wrap checking when translating pointers.
1002       if (TranslatedPtrs.size() > 1)
1003         return false;
1004 
1005       if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1006         auto *Expr = PSE.getSCEV(Ptr);
1007         if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1008           return false;
1009         PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1010       }
1011     }
1012     // If there's only one option for Ptr, look it up after bounds and wrap
1013     // checking, because assumptions might have been added to PSE.
1014     if (TranslatedPtrs.size() == 1)
1015       TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1016                            false};
1017   }
1018 
1019   for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1020     // The id of the dependence set.
1021     unsigned DepId;
1022 
1023     if (isDependencyCheckNeeded()) {
1024       Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1025       unsigned &LeaderId = DepSetId[Leader];
1026       if (!LeaderId)
1027         LeaderId = RunningDepId++;
1028       DepId = LeaderId;
1029     } else
1030       // Each access has its own dependence set.
1031       DepId = RunningDepId++;
1032 
1033     bool IsWrite = Access.getInt();
1034     RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1035                    NeedsFreeze);
1036     LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1037   }
1038 
1039   return true;
1040 }
1041 
1042 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1043                                      ScalarEvolution *SE, Loop *TheLoop,
1044                                      const DenseMap<Value *, const SCEV *> &StridesMap,
1045                                      Value *&UncomputablePtr, bool ShouldCheckWrap) {
1046   // Find pointers with computable bounds. We are going to use this information
1047   // to place a runtime bound check.
1048   bool CanDoRT = true;
1049 
1050   bool MayNeedRTCheck = false;
1051   if (!IsRTCheckAnalysisNeeded) return true;
1052 
1053   bool IsDepCheckNeeded = isDependencyCheckNeeded();
1054 
1055   // We assign a consecutive id to access from different alias sets.
1056   // Accesses between different groups doesn't need to be checked.
1057   unsigned ASId = 0;
1058   for (auto &AS : AST) {
1059     int NumReadPtrChecks = 0;
1060     int NumWritePtrChecks = 0;
1061     bool CanDoAliasSetRT = true;
1062     ++ASId;
1063 
1064     // We assign consecutive id to access from different dependence sets.
1065     // Accesses within the same set don't need a runtime check.
1066     unsigned RunningDepId = 1;
1067     DenseMap<Value *, unsigned> DepSetId;
1068 
1069     SmallVector<std::pair<MemAccessInfo, Type *>, 4> Retries;
1070 
1071     // First, count how many write and read accesses are in the alias set. Also
1072     // collect MemAccessInfos for later.
1073     SmallVector<MemAccessInfo, 4> AccessInfos;
1074     for (const auto &A : AS) {
1075       Value *Ptr = A.getValue();
1076       bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1077 
1078       if (IsWrite)
1079         ++NumWritePtrChecks;
1080       else
1081         ++NumReadPtrChecks;
1082       AccessInfos.emplace_back(Ptr, IsWrite);
1083     }
1084 
1085     // We do not need runtime checks for this alias set, if there are no writes
1086     // or a single write and no reads.
1087     if (NumWritePtrChecks == 0 ||
1088         (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1089       assert((AS.size() <= 1 ||
1090               all_of(AS,
1091                      [this](auto AC) {
1092                        MemAccessInfo AccessWrite(AC.getValue(), true);
1093                        return DepCands.findValue(AccessWrite) == DepCands.end();
1094                      })) &&
1095              "Can only skip updating CanDoRT below, if all entries in AS "
1096              "are reads or there is at most 1 entry");
1097       continue;
1098     }
1099 
1100     for (auto &Access : AccessInfos) {
1101       for (const auto &AccessTy : Accesses[Access]) {
1102         if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1103                                   DepSetId, TheLoop, RunningDepId, ASId,
1104                                   ShouldCheckWrap, false)) {
1105           LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1106                             << *Access.getPointer() << '\n');
1107           Retries.push_back({Access, AccessTy});
1108           CanDoAliasSetRT = false;
1109         }
1110       }
1111     }
1112 
1113     // Note that this function computes CanDoRT and MayNeedRTCheck
1114     // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1115     // we have a pointer for which we couldn't find the bounds but we don't
1116     // actually need to emit any checks so it does not matter.
1117     //
1118     // We need runtime checks for this alias set, if there are at least 2
1119     // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1120     // any bound checks (because in that case the number of dependence sets is
1121     // incomplete).
1122     bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1123 
1124     // We need to perform run-time alias checks, but some pointers had bounds
1125     // that couldn't be checked.
1126     if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1127       // Reset the CanDoSetRt flag and retry all accesses that have failed.
1128       // We know that we need these checks, so we can now be more aggressive
1129       // and add further checks if required (overflow checks).
1130       CanDoAliasSetRT = true;
1131       for (auto Retry : Retries) {
1132         MemAccessInfo Access = Retry.first;
1133         Type *AccessTy = Retry.second;
1134         if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1135                                   DepSetId, TheLoop, RunningDepId, ASId,
1136                                   ShouldCheckWrap, /*Assume=*/true)) {
1137           CanDoAliasSetRT = false;
1138           UncomputablePtr = Access.getPointer();
1139           break;
1140         }
1141       }
1142     }
1143 
1144     CanDoRT &= CanDoAliasSetRT;
1145     MayNeedRTCheck |= NeedsAliasSetRTCheck;
1146     ++ASId;
1147   }
1148 
1149   // If the pointers that we would use for the bounds comparison have different
1150   // address spaces, assume the values aren't directly comparable, so we can't
1151   // use them for the runtime check. We also have to assume they could
1152   // overlap. In the future there should be metadata for whether address spaces
1153   // are disjoint.
1154   unsigned NumPointers = RtCheck.Pointers.size();
1155   for (unsigned i = 0; i < NumPointers; ++i) {
1156     for (unsigned j = i + 1; j < NumPointers; ++j) {
1157       // Only need to check pointers between two different dependency sets.
1158       if (RtCheck.Pointers[i].DependencySetId ==
1159           RtCheck.Pointers[j].DependencySetId)
1160        continue;
1161       // Only need to check pointers in the same alias set.
1162       if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1163         continue;
1164 
1165       Value *PtrI = RtCheck.Pointers[i].PointerValue;
1166       Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1167 
1168       unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1169       unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1170       if (ASi != ASj) {
1171         LLVM_DEBUG(
1172             dbgs() << "LAA: Runtime check would require comparison between"
1173                       " different address spaces\n");
1174         return false;
1175       }
1176     }
1177   }
1178 
1179   if (MayNeedRTCheck && CanDoRT)
1180     RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1181 
1182   LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1183                     << " pointer comparisons.\n");
1184 
1185   // If we can do run-time checks, but there are no checks, no runtime checks
1186   // are needed. This can happen when all pointers point to the same underlying
1187   // object for example.
1188   RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1189 
1190   bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1191   if (!CanDoRTIfNeeded)
1192     RtCheck.reset();
1193   return CanDoRTIfNeeded;
1194 }
1195 
1196 void AccessAnalysis::processMemAccesses() {
1197   // We process the set twice: first we process read-write pointers, last we
1198   // process read-only pointers. This allows us to skip dependence tests for
1199   // read-only pointers.
1200 
1201   LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1202   LLVM_DEBUG(dbgs() << "  AST: "; AST.dump());
1203   LLVM_DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
1204   LLVM_DEBUG({
1205     for (auto A : Accesses)
1206       dbgs() << "\t" << *A.first.getPointer() << " ("
1207              << (A.first.getInt()
1208                      ? "write"
1209                      : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1210                                                                 : "read"))
1211              << ")\n";
1212   });
1213 
1214   // The AliasSetTracker has nicely partitioned our pointers by metadata
1215   // compatibility and potential for underlying-object overlap. As a result, we
1216   // only need to check for potential pointer dependencies within each alias
1217   // set.
1218   for (const auto &AS : AST) {
1219     // Note that both the alias-set tracker and the alias sets themselves used
1220     // linked lists internally and so the iteration order here is deterministic
1221     // (matching the original instruction order within each set).
1222 
1223     bool SetHasWrite = false;
1224 
1225     // Map of pointers to last access encountered.
1226     typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1227     UnderlyingObjToAccessMap ObjToLastAccess;
1228 
1229     // Set of access to check after all writes have been processed.
1230     PtrAccessMap DeferredAccesses;
1231 
1232     // Iterate over each alias set twice, once to process read/write pointers,
1233     // and then to process read-only pointers.
1234     for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1235       bool UseDeferred = SetIteration > 0;
1236       PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1237 
1238       for (const auto &AV : AS) {
1239         Value *Ptr = AV.getValue();
1240 
1241         // For a single memory access in AliasSetTracker, Accesses may contain
1242         // both read and write, and they both need to be handled for CheckDeps.
1243         for (const auto &AC : S) {
1244           if (AC.first.getPointer() != Ptr)
1245             continue;
1246 
1247           bool IsWrite = AC.first.getInt();
1248 
1249           // If we're using the deferred access set, then it contains only
1250           // reads.
1251           bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1252           if (UseDeferred && !IsReadOnlyPtr)
1253             continue;
1254           // Otherwise, the pointer must be in the PtrAccessSet, either as a
1255           // read or a write.
1256           assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1257                   S.count(MemAccessInfo(Ptr, false))) &&
1258                  "Alias-set pointer not in the access set?");
1259 
1260           MemAccessInfo Access(Ptr, IsWrite);
1261           DepCands.insert(Access);
1262 
1263           // Memorize read-only pointers for later processing and skip them in
1264           // the first round (they need to be checked after we have seen all
1265           // write pointers). Note: we also mark pointer that are not
1266           // consecutive as "read-only" pointers (so that we check
1267           // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1268           if (!UseDeferred && IsReadOnlyPtr) {
1269             // We only use the pointer keys, the types vector values don't
1270             // matter.
1271             DeferredAccesses.insert({Access, {}});
1272             continue;
1273           }
1274 
1275           // If this is a write - check other reads and writes for conflicts. If
1276           // this is a read only check other writes for conflicts (but only if
1277           // there is no other write to the ptr - this is an optimization to
1278           // catch "a[i] = a[i] + " without having to do a dependence check).
1279           if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1280             CheckDeps.push_back(Access);
1281             IsRTCheckAnalysisNeeded = true;
1282           }
1283 
1284           if (IsWrite)
1285             SetHasWrite = true;
1286 
1287           // Create sets of pointers connected by a shared alias set and
1288           // underlying object.
1289           typedef SmallVector<const Value *, 16> ValueVector;
1290           ValueVector TempObjects;
1291 
1292           getUnderlyingObjects(Ptr, TempObjects, LI);
1293           LLVM_DEBUG(dbgs()
1294                      << "Underlying objects for pointer " << *Ptr << "\n");
1295           for (const Value *UnderlyingObj : TempObjects) {
1296             // nullptr never alias, don't join sets for pointer that have "null"
1297             // in their UnderlyingObjects list.
1298             if (isa<ConstantPointerNull>(UnderlyingObj) &&
1299                 !NullPointerIsDefined(
1300                     TheLoop->getHeader()->getParent(),
1301                     UnderlyingObj->getType()->getPointerAddressSpace()))
1302               continue;
1303 
1304             UnderlyingObjToAccessMap::iterator Prev =
1305                 ObjToLastAccess.find(UnderlyingObj);
1306             if (Prev != ObjToLastAccess.end())
1307               DepCands.unionSets(Access, Prev->second);
1308 
1309             ObjToLastAccess[UnderlyingObj] = Access;
1310             LLVM_DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
1311           }
1312         }
1313       }
1314     }
1315   }
1316 }
1317 
1318 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1319 /// i.e. monotonically increasing/decreasing.
1320 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1321                            PredicatedScalarEvolution &PSE, const Loop *L) {
1322 
1323   // FIXME: This should probably only return true for NUW.
1324   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
1325     return true;
1326 
1327   if (PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
1328     return true;
1329 
1330   // Scalar evolution does not propagate the non-wrapping flags to values that
1331   // are derived from a non-wrapping induction variable because non-wrapping
1332   // could be flow-sensitive.
1333   //
1334   // Look through the potentially overflowing instruction to try to prove
1335   // non-wrapping for the *specific* value of Ptr.
1336 
1337   // The arithmetic implied by an inbounds GEP can't overflow.
1338   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1339   if (!GEP || !GEP->isInBounds())
1340     return false;
1341 
1342   // Make sure there is only one non-const index and analyze that.
1343   Value *NonConstIndex = nullptr;
1344   for (Value *Index : GEP->indices())
1345     if (!isa<ConstantInt>(Index)) {
1346       if (NonConstIndex)
1347         return false;
1348       NonConstIndex = Index;
1349     }
1350   if (!NonConstIndex)
1351     // The recurrence is on the pointer, ignore for now.
1352     return false;
1353 
1354   // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
1355   // AddRec using a NSW operation.
1356   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1357     if (OBO->hasNoSignedWrap() &&
1358         // Assume constant for other the operand so that the AddRec can be
1359         // easily found.
1360         isa<ConstantInt>(OBO->getOperand(1))) {
1361       auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1362 
1363       if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1364         return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1365     }
1366 
1367   return false;
1368 }
1369 
1370 /// Check whether the access through \p Ptr has a constant stride.
1371 std::optional<int64_t> llvm::getPtrStride(PredicatedScalarEvolution &PSE,
1372                                           Type *AccessTy, Value *Ptr,
1373                                           const Loop *Lp,
1374                                           const DenseMap<Value *, const SCEV *> &StridesMap,
1375                                           bool Assume, bool ShouldCheckWrap) {
1376   Type *Ty = Ptr->getType();
1377   assert(Ty->isPointerTy() && "Unexpected non-ptr");
1378 
1379   if (isa<ScalableVectorType>(AccessTy)) {
1380     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1381                       << "\n");
1382     return std::nullopt;
1383   }
1384 
1385   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1386 
1387   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1388   if (Assume && !AR)
1389     AR = PSE.getAsAddRec(Ptr);
1390 
1391   if (!AR) {
1392     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1393                       << " SCEV: " << *PtrScev << "\n");
1394     return std::nullopt;
1395   }
1396 
1397   // The access function must stride over the innermost loop.
1398   if (Lp != AR->getLoop()) {
1399     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1400                       << *Ptr << " SCEV: " << *AR << "\n");
1401     return std::nullopt;
1402   }
1403 
1404   // Check the step is constant.
1405   const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1406 
1407   // Calculate the pointer stride and check if it is constant.
1408   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1409   if (!C) {
1410     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1411                       << " SCEV: " << *AR << "\n");
1412     return std::nullopt;
1413   }
1414 
1415   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1416   TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1417   int64_t Size = AllocSize.getFixedValue();
1418   const APInt &APStepVal = C->getAPInt();
1419 
1420   // Huge step value - give up.
1421   if (APStepVal.getBitWidth() > 64)
1422     return std::nullopt;
1423 
1424   int64_t StepVal = APStepVal.getSExtValue();
1425 
1426   // Strided access.
1427   int64_t Stride = StepVal / Size;
1428   int64_t Rem = StepVal % Size;
1429   if (Rem)
1430     return std::nullopt;
1431 
1432   if (!ShouldCheckWrap)
1433     return Stride;
1434 
1435   // The address calculation must not wrap. Otherwise, a dependence could be
1436   // inverted.
1437   if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1438     return Stride;
1439 
1440   // An inbounds getelementptr that is a AddRec with a unit stride
1441   // cannot wrap per definition.  If it did, the result would be poison
1442   // and any memory access dependent on it would be immediate UB
1443   // when executed.
1444   if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1445       GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1446     return Stride;
1447 
1448   // If the null pointer is undefined, then a access sequence which would
1449   // otherwise access it can be assumed not to unsigned wrap.  Note that this
1450   // assumes the object in memory is aligned to the natural alignment.
1451   unsigned AddrSpace = Ty->getPointerAddressSpace();
1452   if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1453       (Stride == 1 || Stride == -1))
1454     return Stride;
1455 
1456   if (Assume) {
1457     PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1458     LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1459                       << "LAA:   Pointer: " << *Ptr << "\n"
1460                       << "LAA:   SCEV: " << *AR << "\n"
1461                       << "LAA:   Added an overflow assumption\n");
1462     return Stride;
1463   }
1464   LLVM_DEBUG(
1465       dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1466              << *Ptr << " SCEV: " << *AR << "\n");
1467   return std::nullopt;
1468 }
1469 
1470 std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1471                                          Type *ElemTyB, Value *PtrB,
1472                                          const DataLayout &DL,
1473                                          ScalarEvolution &SE, bool StrictCheck,
1474                                          bool CheckType) {
1475   assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1476 
1477   // Make sure that A and B are different pointers.
1478   if (PtrA == PtrB)
1479     return 0;
1480 
1481   // Make sure that the element types are the same if required.
1482   if (CheckType && ElemTyA != ElemTyB)
1483     return std::nullopt;
1484 
1485   unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1486   unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1487 
1488   // Check that the address spaces match.
1489   if (ASA != ASB)
1490     return std::nullopt;
1491   unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1492 
1493   APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1494   Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1495   Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1496 
1497   int Val;
1498   if (PtrA1 == PtrB1) {
1499     // Retrieve the address space again as pointer stripping now tracks through
1500     // `addrspacecast`.
1501     ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1502     ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1503     // Check that the address spaces match and that the pointers are valid.
1504     if (ASA != ASB)
1505       return std::nullopt;
1506 
1507     IdxWidth = DL.getIndexSizeInBits(ASA);
1508     OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1509     OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1510 
1511     OffsetB -= OffsetA;
1512     Val = OffsetB.getSExtValue();
1513   } else {
1514     // Otherwise compute the distance with SCEV between the base pointers.
1515     const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1516     const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1517     const auto *Diff =
1518         dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1519     if (!Diff)
1520       return std::nullopt;
1521     Val = Diff->getAPInt().getSExtValue();
1522   }
1523   int Size = DL.getTypeStoreSize(ElemTyA);
1524   int Dist = Val / Size;
1525 
1526   // Ensure that the calculated distance matches the type-based one after all
1527   // the bitcasts removal in the provided pointers.
1528   if (!StrictCheck || Dist * Size == Val)
1529     return Dist;
1530   return std::nullopt;
1531 }
1532 
1533 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
1534                            const DataLayout &DL, ScalarEvolution &SE,
1535                            SmallVectorImpl<unsigned> &SortedIndices) {
1536   assert(llvm::all_of(
1537              VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1538          "Expected list of pointer operands.");
1539   // Walk over the pointers, and map each of them to an offset relative to
1540   // first pointer in the array.
1541   Value *Ptr0 = VL[0];
1542 
1543   using DistOrdPair = std::pair<int64_t, int>;
1544   auto Compare = llvm::less_first();
1545   std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1546   Offsets.emplace(0, 0);
1547   int Cnt = 1;
1548   bool IsConsecutive = true;
1549   for (auto *Ptr : VL.drop_front()) {
1550     std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1551                                               /*StrictCheck=*/true);
1552     if (!Diff)
1553       return false;
1554 
1555     // Check if the pointer with the same offset is found.
1556     int64_t Offset = *Diff;
1557     auto Res = Offsets.emplace(Offset, Cnt);
1558     if (!Res.second)
1559       return false;
1560     // Consecutive order if the inserted element is the last one.
1561     IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1562     ++Cnt;
1563   }
1564   SortedIndices.clear();
1565   if (!IsConsecutive) {
1566     // Fill SortedIndices array only if it is non-consecutive.
1567     SortedIndices.resize(VL.size());
1568     Cnt = 0;
1569     for (const std::pair<int64_t, int> &Pair : Offsets) {
1570       SortedIndices[Cnt] = Pair.second;
1571       ++Cnt;
1572     }
1573   }
1574   return true;
1575 }
1576 
1577 /// Returns true if the memory operations \p A and \p B are consecutive.
1578 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1579                                ScalarEvolution &SE, bool CheckType) {
1580   Value *PtrA = getLoadStorePointerOperand(A);
1581   Value *PtrB = getLoadStorePointerOperand(B);
1582   if (!PtrA || !PtrB)
1583     return false;
1584   Type *ElemTyA = getLoadStoreType(A);
1585   Type *ElemTyB = getLoadStoreType(B);
1586   std::optional<int> Diff =
1587       getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1588                       /*StrictCheck=*/true, CheckType);
1589   return Diff && *Diff == 1;
1590 }
1591 
1592 void MemoryDepChecker::addAccess(StoreInst *SI) {
1593   visitPointers(SI->getPointerOperand(), *InnermostLoop,
1594                 [this, SI](Value *Ptr) {
1595                   Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1596                   InstMap.push_back(SI);
1597                   ++AccessIdx;
1598                 });
1599 }
1600 
1601 void MemoryDepChecker::addAccess(LoadInst *LI) {
1602   visitPointers(LI->getPointerOperand(), *InnermostLoop,
1603                 [this, LI](Value *Ptr) {
1604                   Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1605                   InstMap.push_back(LI);
1606                   ++AccessIdx;
1607                 });
1608 }
1609 
1610 MemoryDepChecker::VectorizationSafetyStatus
1611 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1612   switch (Type) {
1613   case NoDep:
1614   case Forward:
1615   case BackwardVectorizable:
1616     return VectorizationSafetyStatus::Safe;
1617 
1618   case Unknown:
1619     return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1620   case ForwardButPreventsForwarding:
1621   case Backward:
1622   case BackwardVectorizableButPreventsForwarding:
1623     return VectorizationSafetyStatus::Unsafe;
1624   }
1625   llvm_unreachable("unexpected DepType!");
1626 }
1627 
1628 bool MemoryDepChecker::Dependence::isBackward() const {
1629   switch (Type) {
1630   case NoDep:
1631   case Forward:
1632   case ForwardButPreventsForwarding:
1633   case Unknown:
1634     return false;
1635 
1636   case BackwardVectorizable:
1637   case Backward:
1638   case BackwardVectorizableButPreventsForwarding:
1639     return true;
1640   }
1641   llvm_unreachable("unexpected DepType!");
1642 }
1643 
1644 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1645   return isBackward() || Type == Unknown;
1646 }
1647 
1648 bool MemoryDepChecker::Dependence::isForward() const {
1649   switch (Type) {
1650   case Forward:
1651   case ForwardButPreventsForwarding:
1652     return true;
1653 
1654   case NoDep:
1655   case Unknown:
1656   case BackwardVectorizable:
1657   case Backward:
1658   case BackwardVectorizableButPreventsForwarding:
1659     return false;
1660   }
1661   llvm_unreachable("unexpected DepType!");
1662 }
1663 
1664 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1665                                                     uint64_t TypeByteSize) {
1666   // If loads occur at a distance that is not a multiple of a feasible vector
1667   // factor store-load forwarding does not take place.
1668   // Positive dependences might cause troubles because vectorizing them might
1669   // prevent store-load forwarding making vectorized code run a lot slower.
1670   //   a[i] = a[i-3] ^ a[i-8];
1671   //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1672   //   hence on your typical architecture store-load forwarding does not take
1673   //   place. Vectorizing in such cases does not make sense.
1674   // Store-load forwarding distance.
1675 
1676   // After this many iterations store-to-load forwarding conflicts should not
1677   // cause any slowdowns.
1678   const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1679   // Maximum vector factor.
1680   uint64_t MaxVFWithoutSLForwardIssues = std::min(
1681       VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1682 
1683   // Compute the smallest VF at which the store and load would be misaligned.
1684   for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1685        VF *= 2) {
1686     // If the number of vector iteration between the store and the load are
1687     // small we could incur conflicts.
1688     if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1689       MaxVFWithoutSLForwardIssues = (VF >> 1);
1690       break;
1691     }
1692   }
1693 
1694   if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1695     LLVM_DEBUG(
1696         dbgs() << "LAA: Distance " << Distance
1697                << " that could cause a store-load forwarding conflict\n");
1698     return true;
1699   }
1700 
1701   if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1702       MaxVFWithoutSLForwardIssues !=
1703           VectorizerParams::MaxVectorWidth * TypeByteSize)
1704     MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1705   return false;
1706 }
1707 
1708 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1709   if (Status < S)
1710     Status = S;
1711 }
1712 
1713 /// Given a dependence-distance \p Dist between two
1714 /// memory accesses, that have the same stride whose absolute value is given
1715 /// in \p Stride, and that have the same type size \p TypeByteSize,
1716 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1717 /// possible to prove statically that the dependence distance is larger
1718 /// than the range that the accesses will travel through the execution of
1719 /// the loop. If so, return true; false otherwise. This is useful for
1720 /// example in loops such as the following (PR31098):
1721 ///     for (i = 0; i < D; ++i) {
1722 ///                = out[i];
1723 ///       out[i+D] =
1724 ///     }
1725 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1726                                      const SCEV &BackedgeTakenCount,
1727                                      const SCEV &Dist, uint64_t Stride,
1728                                      uint64_t TypeByteSize) {
1729 
1730   // If we can prove that
1731   //      (**) |Dist| > BackedgeTakenCount * Step
1732   // where Step is the absolute stride of the memory accesses in bytes,
1733   // then there is no dependence.
1734   //
1735   // Rationale:
1736   // We basically want to check if the absolute distance (|Dist/Step|)
1737   // is >= the loop iteration count (or > BackedgeTakenCount).
1738   // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1739   // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1740   // that the dependence distance is >= VF; This is checked elsewhere.
1741   // But in some cases we can prune dependence distances early, and
1742   // even before selecting the VF, and without a runtime test, by comparing
1743   // the distance against the loop iteration count. Since the vectorized code
1744   // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1745   // also guarantees that distance >= VF.
1746   //
1747   const uint64_t ByteStride = Stride * TypeByteSize;
1748   const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1749   const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1750 
1751   const SCEV *CastedDist = &Dist;
1752   const SCEV *CastedProduct = Product;
1753   uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1754   uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1755 
1756   // The dependence distance can be positive/negative, so we sign extend Dist;
1757   // The multiplication of the absolute stride in bytes and the
1758   // backedgeTakenCount is non-negative, so we zero extend Product.
1759   if (DistTypeSizeBits > ProductTypeSizeBits)
1760     CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1761   else
1762     CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1763 
1764   // Is  Dist - (BackedgeTakenCount * Step) > 0 ?
1765   // (If so, then we have proven (**) because |Dist| >= Dist)
1766   const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1767   if (SE.isKnownPositive(Minus))
1768     return true;
1769 
1770   // Second try: Is  -Dist - (BackedgeTakenCount * Step) > 0 ?
1771   // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1772   const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1773   Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1774   if (SE.isKnownPositive(Minus))
1775     return true;
1776 
1777   return false;
1778 }
1779 
1780 /// Check the dependence for two accesses with the same stride \p Stride.
1781 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1782 /// bytes.
1783 ///
1784 /// \returns true if they are independent.
1785 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1786                                           uint64_t TypeByteSize) {
1787   assert(Stride > 1 && "The stride must be greater than 1");
1788   assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1789   assert(Distance > 0 && "The distance must be non-zero");
1790 
1791   // Skip if the distance is not multiple of type byte size.
1792   if (Distance % TypeByteSize)
1793     return false;
1794 
1795   uint64_t ScaledDist = Distance / TypeByteSize;
1796 
1797   // No dependence if the scaled distance is not multiple of the stride.
1798   // E.g.
1799   //      for (i = 0; i < 1024 ; i += 4)
1800   //        A[i+2] = A[i] + 1;
1801   //
1802   // Two accesses in memory (scaled distance is 2, stride is 4):
1803   //     | A[0] |      |      |      | A[4] |      |      |      |
1804   //     |      |      | A[2] |      |      |      | A[6] |      |
1805   //
1806   // E.g.
1807   //      for (i = 0; i < 1024 ; i += 3)
1808   //        A[i+4] = A[i] + 1;
1809   //
1810   // Two accesses in memory (scaled distance is 4, stride is 3):
1811   //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1812   //     |      |      |      |      | A[4] |      |      | A[7] |      |
1813   return ScaledDist % Stride;
1814 }
1815 
1816 MemoryDepChecker::Dependence::DepType
1817 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1818                               const MemAccessInfo &B, unsigned BIdx,
1819                               const DenseMap<Value *, const SCEV *> &Strides) {
1820   assert (AIdx < BIdx && "Must pass arguments in program order");
1821 
1822   auto [APtr, AIsWrite] = A;
1823   auto [BPtr, BIsWrite] = B;
1824   Type *ATy = getLoadStoreType(InstMap[AIdx]);
1825   Type *BTy = getLoadStoreType(InstMap[BIdx]);
1826 
1827   // Two reads are independent.
1828   if (!AIsWrite && !BIsWrite)
1829     return Dependence::NoDep;
1830 
1831   // We cannot check pointers in different address spaces.
1832   if (APtr->getType()->getPointerAddressSpace() !=
1833       BPtr->getType()->getPointerAddressSpace())
1834     return Dependence::Unknown;
1835 
1836   int64_t StrideAPtr =
1837     getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1838   int64_t StrideBPtr =
1839     getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1840 
1841   const SCEV *Src = PSE.getSCEV(APtr);
1842   const SCEV *Sink = PSE.getSCEV(BPtr);
1843 
1844   // If the induction step is negative we have to invert source and sink of the
1845   // dependence.
1846   if (StrideAPtr < 0) {
1847     std::swap(APtr, BPtr);
1848     std::swap(ATy, BTy);
1849     std::swap(Src, Sink);
1850     std::swap(AIsWrite, BIsWrite);
1851     std::swap(AIdx, BIdx);
1852     std::swap(StrideAPtr, StrideBPtr);
1853   }
1854 
1855   ScalarEvolution &SE = *PSE.getSE();
1856   const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1857 
1858   LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1859                     << "(Induction step: " << StrideAPtr << ")\n");
1860   LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1861                     << *InstMap[BIdx] << ": " << *Dist << "\n");
1862 
1863   // Need accesses with constant stride. We don't want to vectorize
1864   // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1865   // the address space.
1866   if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1867     LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1868     return Dependence::Unknown;
1869   }
1870 
1871   auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1872   uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1873   bool HasSameSize =
1874       DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1875   uint64_t Stride = std::abs(StrideAPtr);
1876 
1877   if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1878       isSafeDependenceDistance(DL, SE, *(PSE.getBackedgeTakenCount()), *Dist,
1879                                Stride, TypeByteSize))
1880     return Dependence::NoDep;
1881 
1882   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1883   if (!C) {
1884     LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1885     FoundNonConstantDistanceDependence = true;
1886     return Dependence::Unknown;
1887   }
1888 
1889   const APInt &Val = C->getAPInt();
1890   int64_t Distance = Val.getSExtValue();
1891 
1892   // Attempt to prove strided accesses independent.
1893   if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1894       areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1895     LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1896     return Dependence::NoDep;
1897   }
1898 
1899   // Negative distances are not plausible dependencies.
1900   if (Val.isNegative()) {
1901     bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1902     if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1903         (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1904          !HasSameSize)) {
1905       LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1906       return Dependence::ForwardButPreventsForwarding;
1907     }
1908 
1909     LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1910     return Dependence::Forward;
1911   }
1912 
1913   // Write to the same location with the same size.
1914   if (Val == 0) {
1915     if (HasSameSize)
1916       return Dependence::Forward;
1917     LLVM_DEBUG(
1918         dbgs() << "LAA: Zero dependence difference but different type sizes\n");
1919     return Dependence::Unknown;
1920   }
1921 
1922   assert(Val.isStrictlyPositive() && "Expect a positive value");
1923 
1924   if (!HasSameSize) {
1925     LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
1926                          "different type sizes\n");
1927     return Dependence::Unknown;
1928   }
1929 
1930   // Bail out early if passed-in parameters make vectorization not feasible.
1931   unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1932                            VectorizerParams::VectorizationFactor : 1);
1933   unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1934                            VectorizerParams::VectorizationInterleave : 1);
1935   // The minimum number of iterations for a vectorized/unrolled version.
1936   unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1937 
1938   // It's not vectorizable if the distance is smaller than the minimum distance
1939   // needed for a vectroized/unrolled version. Vectorizing one iteration in
1940   // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1941   // TypeByteSize (No need to plus the last gap distance).
1942   //
1943   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1944   //      foo(int *A) {
1945   //        int *B = (int *)((char *)A + 14);
1946   //        for (i = 0 ; i < 1024 ; i += 2)
1947   //          B[i] = A[i] + 1;
1948   //      }
1949   //
1950   // Two accesses in memory (stride is 2):
1951   //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
1952   //                              | B[0] |      | B[2] |      | B[4] |
1953   //
1954   // Distance needs for vectorizing iterations except the last iteration:
1955   // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1956   // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1957   //
1958   // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1959   // 12, which is less than distance.
1960   //
1961   // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1962   // the minimum distance needed is 28, which is greater than distance. It is
1963   // not safe to do vectorization.
1964   uint64_t MinDistanceNeeded =
1965       TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1966   if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1967     LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1968                       << Distance << '\n');
1969     return Dependence::Backward;
1970   }
1971 
1972   // Unsafe if the minimum distance needed is greater than max safe distance.
1973   if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1974     LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1975                       << MinDistanceNeeded << " size in bytes\n");
1976     return Dependence::Backward;
1977   }
1978 
1979   // Positive distance bigger than max vectorization factor.
1980   // FIXME: Should use max factor instead of max distance in bytes, which could
1981   // not handle different types.
1982   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1983   //      void foo (int *A, char *B) {
1984   //        for (unsigned i = 0; i < 1024; i++) {
1985   //          A[i+2] = A[i] + 1;
1986   //          B[i+2] = B[i] + 1;
1987   //        }
1988   //      }
1989   //
1990   // This case is currently unsafe according to the max safe distance. If we
1991   // analyze the two accesses on array B, the max safe dependence distance
1992   // is 2. Then we analyze the accesses on array A, the minimum distance needed
1993   // is 8, which is less than 2 and forbidden vectorization, But actually
1994   // both A and B could be vectorized by 2 iterations.
1995   MaxSafeDepDistBytes =
1996       std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1997 
1998   bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1999   if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2000       couldPreventStoreLoadForward(Distance, TypeByteSize))
2001     return Dependence::BackwardVectorizableButPreventsForwarding;
2002 
2003   uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
2004   LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
2005                     << " with max VF = " << MaxVF << '\n');
2006   uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2007   MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2008   return Dependence::BackwardVectorizable;
2009 }
2010 
2011 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
2012                                    MemAccessInfoList &CheckDeps,
2013                                    const DenseMap<Value *, const SCEV *> &Strides) {
2014 
2015   MaxSafeDepDistBytes = -1;
2016   SmallPtrSet<MemAccessInfo, 8> Visited;
2017   for (MemAccessInfo CurAccess : CheckDeps) {
2018     if (Visited.count(CurAccess))
2019       continue;
2020 
2021     // Get the relevant memory access set.
2022     EquivalenceClasses<MemAccessInfo>::iterator I =
2023       AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2024 
2025     // Check accesses within this set.
2026     EquivalenceClasses<MemAccessInfo>::member_iterator AI =
2027         AccessSets.member_begin(I);
2028     EquivalenceClasses<MemAccessInfo>::member_iterator AE =
2029         AccessSets.member_end();
2030 
2031     // Check every access pair.
2032     while (AI != AE) {
2033       Visited.insert(*AI);
2034       bool AIIsWrite = AI->getInt();
2035       // Check loads only against next equivalent class, but stores also against
2036       // other stores in the same equivalence class - to the same address.
2037       EquivalenceClasses<MemAccessInfo>::member_iterator OI =
2038           (AIIsWrite ? AI : std::next(AI));
2039       while (OI != AE) {
2040         // Check every accessing instruction pair in program order.
2041         for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2042              I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2043           // Scan all accesses of another equivalence class, but only the next
2044           // accesses of the same equivalent class.
2045           for (std::vector<unsigned>::iterator
2046                    I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2047                    I2E = (OI == AI ? I1E : Accesses[*OI].end());
2048                I2 != I2E; ++I2) {
2049             auto A = std::make_pair(&*AI, *I1);
2050             auto B = std::make_pair(&*OI, *I2);
2051 
2052             assert(*I1 != *I2);
2053             if (*I1 > *I2)
2054               std::swap(A, B);
2055 
2056             Dependence::DepType Type =
2057                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
2058             mergeInStatus(Dependence::isSafeForVectorization(Type));
2059 
2060             // Gather dependences unless we accumulated MaxDependences
2061             // dependences.  In that case return as soon as we find the first
2062             // unsafe dependence.  This puts a limit on this quadratic
2063             // algorithm.
2064             if (RecordDependences) {
2065               if (Type != Dependence::NoDep)
2066                 Dependences.push_back(Dependence(A.second, B.second, Type));
2067 
2068               if (Dependences.size() >= MaxDependences) {
2069                 RecordDependences = false;
2070                 Dependences.clear();
2071                 LLVM_DEBUG(dbgs()
2072                            << "Too many dependences, stopped recording\n");
2073               }
2074             }
2075             if (!RecordDependences && !isSafeForVectorization())
2076               return false;
2077           }
2078         ++OI;
2079       }
2080       AI++;
2081     }
2082   }
2083 
2084   LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2085   return isSafeForVectorization();
2086 }
2087 
2088 SmallVector<Instruction *, 4>
2089 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
2090   MemAccessInfo Access(Ptr, isWrite);
2091   auto &IndexVector = Accesses.find(Access)->second;
2092 
2093   SmallVector<Instruction *, 4> Insts;
2094   transform(IndexVector,
2095                  std::back_inserter(Insts),
2096                  [&](unsigned Idx) { return this->InstMap[Idx]; });
2097   return Insts;
2098 }
2099 
2100 const char *MemoryDepChecker::Dependence::DepName[] = {
2101     "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
2102     "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
2103 
2104 void MemoryDepChecker::Dependence::print(
2105     raw_ostream &OS, unsigned Depth,
2106     const SmallVectorImpl<Instruction *> &Instrs) const {
2107   OS.indent(Depth) << DepName[Type] << ":\n";
2108   OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2109   OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2110 }
2111 
2112 bool LoopAccessInfo::canAnalyzeLoop() {
2113   // We need to have a loop header.
2114   LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2115                     << TheLoop->getHeader()->getParent()->getName() << ": "
2116                     << TheLoop->getHeader()->getName() << '\n');
2117 
2118   // We can only analyze innermost loops.
2119   if (!TheLoop->isInnermost()) {
2120     LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2121     recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2122     return false;
2123   }
2124 
2125   // We must have a single backedge.
2126   if (TheLoop->getNumBackEdges() != 1) {
2127     LLVM_DEBUG(
2128         dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2129     recordAnalysis("CFGNotUnderstood")
2130         << "loop control flow is not understood by analyzer";
2131     return false;
2132   }
2133 
2134   // ScalarEvolution needs to be able to find the exit count.
2135   const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2136   if (isa<SCEVCouldNotCompute>(ExitCount)) {
2137     recordAnalysis("CantComputeNumberOfIterations")
2138         << "could not determine number of loop iterations";
2139     LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2140     return false;
2141   }
2142 
2143   return true;
2144 }
2145 
2146 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2147                                  const TargetLibraryInfo *TLI,
2148                                  DominatorTree *DT) {
2149   // Holds the Load and Store instructions.
2150   SmallVector<LoadInst *, 16> Loads;
2151   SmallVector<StoreInst *, 16> Stores;
2152 
2153   // Holds all the different accesses in the loop.
2154   unsigned NumReads = 0;
2155   unsigned NumReadWrites = 0;
2156 
2157   bool HasComplexMemInst = false;
2158 
2159   // A runtime check is only legal to insert if there are no convergent calls.
2160   HasConvergentOp = false;
2161 
2162   PtrRtChecking->Pointers.clear();
2163   PtrRtChecking->Need = false;
2164 
2165   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2166 
2167   const bool EnableMemAccessVersioningOfLoop =
2168       EnableMemAccessVersioning &&
2169       !TheLoop->getHeader()->getParent()->hasOptSize();
2170 
2171   // Traverse blocks in fixed RPOT order, regardless of their storage in the
2172   // loop info, as it may be arbitrary.
2173   LoopBlocksRPO RPOT(TheLoop);
2174   RPOT.perform(LI);
2175   for (BasicBlock *BB : RPOT) {
2176     // Scan the BB and collect legal loads and stores. Also detect any
2177     // convergent instructions.
2178     for (Instruction &I : *BB) {
2179       if (auto *Call = dyn_cast<CallBase>(&I)) {
2180         if (Call->isConvergent())
2181           HasConvergentOp = true;
2182       }
2183 
2184       // With both a non-vectorizable memory instruction and a convergent
2185       // operation, found in this loop, no reason to continue the search.
2186       if (HasComplexMemInst && HasConvergentOp) {
2187         CanVecMem = false;
2188         return;
2189       }
2190 
2191       // Avoid hitting recordAnalysis multiple times.
2192       if (HasComplexMemInst)
2193         continue;
2194 
2195       // If this is a load, save it. If this instruction can read from memory
2196       // but is not a load, then we quit. Notice that we don't handle function
2197       // calls that read or write.
2198       if (I.mayReadFromMemory()) {
2199         // Many math library functions read the rounding mode. We will only
2200         // vectorize a loop if it contains known function calls that don't set
2201         // the flag. Therefore, it is safe to ignore this read from memory.
2202         auto *Call = dyn_cast<CallInst>(&I);
2203         if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2204           continue;
2205 
2206         // If the function has an explicit vectorized counterpart, we can safely
2207         // assume that it can be vectorized.
2208         if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2209             !VFDatabase::getMappings(*Call).empty())
2210           continue;
2211 
2212         auto *Ld = dyn_cast<LoadInst>(&I);
2213         if (!Ld) {
2214           recordAnalysis("CantVectorizeInstruction", Ld)
2215             << "instruction cannot be vectorized";
2216           HasComplexMemInst = true;
2217           continue;
2218         }
2219         if (!Ld->isSimple() && !IsAnnotatedParallel) {
2220           recordAnalysis("NonSimpleLoad", Ld)
2221               << "read with atomic ordering or volatile read";
2222           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2223           HasComplexMemInst = true;
2224           continue;
2225         }
2226         NumLoads++;
2227         Loads.push_back(Ld);
2228         DepChecker->addAccess(Ld);
2229         if (EnableMemAccessVersioningOfLoop)
2230           collectStridedAccess(Ld);
2231         continue;
2232       }
2233 
2234       // Save 'store' instructions. Abort if other instructions write to memory.
2235       if (I.mayWriteToMemory()) {
2236         auto *St = dyn_cast<StoreInst>(&I);
2237         if (!St) {
2238           recordAnalysis("CantVectorizeInstruction", St)
2239               << "instruction cannot be vectorized";
2240           HasComplexMemInst = true;
2241           continue;
2242         }
2243         if (!St->isSimple() && !IsAnnotatedParallel) {
2244           recordAnalysis("NonSimpleStore", St)
2245               << "write with atomic ordering or volatile write";
2246           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2247           HasComplexMemInst = true;
2248           continue;
2249         }
2250         NumStores++;
2251         Stores.push_back(St);
2252         DepChecker->addAccess(St);
2253         if (EnableMemAccessVersioningOfLoop)
2254           collectStridedAccess(St);
2255       }
2256     } // Next instr.
2257   } // Next block.
2258 
2259   if (HasComplexMemInst) {
2260     CanVecMem = false;
2261     return;
2262   }
2263 
2264   // Now we have two lists that hold the loads and the stores.
2265   // Next, we find the pointers that they use.
2266 
2267   // Check if we see any stores. If there are no stores, then we don't
2268   // care if the pointers are *restrict*.
2269   if (!Stores.size()) {
2270     LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2271     CanVecMem = true;
2272     return;
2273   }
2274 
2275   MemoryDepChecker::DepCandidates DependentAccesses;
2276   AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2277 
2278   // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2279   // multiple times on the same object. If the ptr is accessed twice, once
2280   // for read and once for write, it will only appear once (on the write
2281   // list). This is okay, since we are going to check for conflicts between
2282   // writes and between reads and writes, but not between reads and reads.
2283   SmallSet<std::pair<Value *, Type *>, 16> Seen;
2284 
2285   // Record uniform store addresses to identify if we have multiple stores
2286   // to the same address.
2287   SmallPtrSet<Value *, 16> UniformStores;
2288 
2289   for (StoreInst *ST : Stores) {
2290     Value *Ptr = ST->getPointerOperand();
2291 
2292     if (isInvariant(Ptr)) {
2293       // Record store instructions to loop invariant addresses
2294       StoresToInvariantAddresses.push_back(ST);
2295       HasDependenceInvolvingLoopInvariantAddress |=
2296           !UniformStores.insert(Ptr).second;
2297     }
2298 
2299     // If we did *not* see this pointer before, insert it to  the read-write
2300     // list. At this phase it is only a 'write' list.
2301     Type *AccessTy = getLoadStoreType(ST);
2302     if (Seen.insert({Ptr, AccessTy}).second) {
2303       ++NumReadWrites;
2304 
2305       MemoryLocation Loc = MemoryLocation::get(ST);
2306       // The TBAA metadata could have a control dependency on the predication
2307       // condition, so we cannot rely on it when determining whether or not we
2308       // need runtime pointer checks.
2309       if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2310         Loc.AATags.TBAA = nullptr;
2311 
2312       visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2313                     [&Accesses, AccessTy, Loc](Value *Ptr) {
2314                       MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2315                       Accesses.addStore(NewLoc, AccessTy);
2316                     });
2317     }
2318   }
2319 
2320   if (IsAnnotatedParallel) {
2321     LLVM_DEBUG(
2322         dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2323                << "checks.\n");
2324     CanVecMem = true;
2325     return;
2326   }
2327 
2328   for (LoadInst *LD : Loads) {
2329     Value *Ptr = LD->getPointerOperand();
2330     // If we did *not* see this pointer before, insert it to the
2331     // read list. If we *did* see it before, then it is already in
2332     // the read-write list. This allows us to vectorize expressions
2333     // such as A[i] += x;  Because the address of A[i] is a read-write
2334     // pointer. This only works if the index of A[i] is consecutive.
2335     // If the address of i is unknown (for example A[B[i]]) then we may
2336     // read a few words, modify, and write a few words, and some of the
2337     // words may be written to the same address.
2338     bool IsReadOnlyPtr = false;
2339     Type *AccessTy = getLoadStoreType(LD);
2340     if (Seen.insert({Ptr, AccessTy}).second ||
2341         !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2342       ++NumReads;
2343       IsReadOnlyPtr = true;
2344     }
2345 
2346     // See if there is an unsafe dependency between a load to a uniform address and
2347     // store to the same uniform address.
2348     if (UniformStores.count(Ptr)) {
2349       LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2350                            "load and uniform store to the same address!\n");
2351       HasDependenceInvolvingLoopInvariantAddress = true;
2352     }
2353 
2354     MemoryLocation Loc = MemoryLocation::get(LD);
2355     // The TBAA metadata could have a control dependency on the predication
2356     // condition, so we cannot rely on it when determining whether or not we
2357     // need runtime pointer checks.
2358     if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2359       Loc.AATags.TBAA = nullptr;
2360 
2361     visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2362                   [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2363                     MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2364                     Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2365                   });
2366   }
2367 
2368   // If we write (or read-write) to a single destination and there are no
2369   // other reads in this loop then is it safe to vectorize.
2370   if (NumReadWrites == 1 && NumReads == 0) {
2371     LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2372     CanVecMem = true;
2373     return;
2374   }
2375 
2376   // Build dependence sets and check whether we need a runtime pointer bounds
2377   // check.
2378   Accesses.buildDependenceSets();
2379 
2380   // Find pointers with computable bounds. We are going to use this information
2381   // to place a runtime bound check.
2382   Value *UncomputablePtr = nullptr;
2383   bool CanDoRTIfNeeded =
2384       Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2385                                SymbolicStrides, UncomputablePtr, false);
2386   if (!CanDoRTIfNeeded) {
2387     auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2388     recordAnalysis("CantIdentifyArrayBounds", I)
2389         << "cannot identify array bounds";
2390     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2391                       << "the array bounds.\n");
2392     CanVecMem = false;
2393     return;
2394   }
2395 
2396   LLVM_DEBUG(
2397     dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2398 
2399   CanVecMem = true;
2400   if (Accesses.isDependencyCheckNeeded()) {
2401     LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2402     CanVecMem = DepChecker->areDepsSafe(
2403         DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2404     MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2405 
2406     if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2407       LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2408 
2409       // Clear the dependency checks. We assume they are not needed.
2410       Accesses.resetDepChecks(*DepChecker);
2411 
2412       PtrRtChecking->reset();
2413       PtrRtChecking->Need = true;
2414 
2415       auto *SE = PSE->getSE();
2416       UncomputablePtr = nullptr;
2417       CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2418           *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2419 
2420       // Check that we found the bounds for the pointer.
2421       if (!CanDoRTIfNeeded) {
2422         auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2423         recordAnalysis("CantCheckMemDepsAtRunTime", I)
2424             << "cannot check memory dependencies at runtime";
2425         LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2426         CanVecMem = false;
2427         return;
2428       }
2429 
2430       CanVecMem = true;
2431     }
2432   }
2433 
2434   if (HasConvergentOp) {
2435     recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2436       << "cannot add control dependency to convergent operation";
2437     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2438                          "would be needed with a convergent operation\n");
2439     CanVecMem = false;
2440     return;
2441   }
2442 
2443   if (CanVecMem)
2444     LLVM_DEBUG(
2445         dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
2446                << (PtrRtChecking->Need ? "" : " don't")
2447                << " need runtime memory checks.\n");
2448   else
2449     emitUnsafeDependenceRemark();
2450 }
2451 
2452 void LoopAccessInfo::emitUnsafeDependenceRemark() {
2453   auto Deps = getDepChecker().getDependences();
2454   if (!Deps)
2455     return;
2456   auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2457     return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
2458            MemoryDepChecker::VectorizationSafetyStatus::Safe;
2459   });
2460   if (Found == Deps->end())
2461     return;
2462   MemoryDepChecker::Dependence Dep = *Found;
2463 
2464   LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2465 
2466   // Emit remark for first unsafe dependence
2467   OptimizationRemarkAnalysis &R =
2468       recordAnalysis("UnsafeDep", Dep.getDestination(*this))
2469       << "unsafe dependent memory operations in loop. Use "
2470          "#pragma loop distribute(enable) to allow loop distribution "
2471          "to attempt to isolate the offending operations into a separate "
2472          "loop";
2473 
2474   switch (Dep.Type) {
2475   case MemoryDepChecker::Dependence::NoDep:
2476   case MemoryDepChecker::Dependence::Forward:
2477   case MemoryDepChecker::Dependence::BackwardVectorizable:
2478     llvm_unreachable("Unexpected dependence");
2479   case MemoryDepChecker::Dependence::Backward:
2480     R << "\nBackward loop carried data dependence.";
2481     break;
2482   case MemoryDepChecker::Dependence::ForwardButPreventsForwarding:
2483     R << "\nForward loop carried data dependence that prevents "
2484          "store-to-load forwarding.";
2485     break;
2486   case MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding:
2487     R << "\nBackward loop carried data dependence that prevents "
2488          "store-to-load forwarding.";
2489     break;
2490   case MemoryDepChecker::Dependence::Unknown:
2491     R << "\nUnknown data dependence.";
2492     break;
2493   }
2494 
2495   if (Instruction *I = Dep.getSource(*this)) {
2496     DebugLoc SourceLoc = I->getDebugLoc();
2497     if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2498       SourceLoc = DD->getDebugLoc();
2499     if (SourceLoc)
2500       R << " Memory location is the same as accessed at "
2501         << ore::NV("Location", SourceLoc);
2502   }
2503 }
2504 
2505 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2506                                            DominatorTree *DT)  {
2507   assert(TheLoop->contains(BB) && "Unknown block used");
2508 
2509   // Blocks that do not dominate the latch need predication.
2510   BasicBlock* Latch = TheLoop->getLoopLatch();
2511   return !DT->dominates(BB, Latch);
2512 }
2513 
2514 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2515                                                            Instruction *I) {
2516   assert(!Report && "Multiple reports generated");
2517 
2518   Value *CodeRegion = TheLoop->getHeader();
2519   DebugLoc DL = TheLoop->getStartLoc();
2520 
2521   if (I) {
2522     CodeRegion = I->getParent();
2523     // If there is no debug location attached to the instruction, revert back to
2524     // using the loop's.
2525     if (I->getDebugLoc())
2526       DL = I->getDebugLoc();
2527   }
2528 
2529   Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2530                                                    CodeRegion);
2531   return *Report;
2532 }
2533 
2534 bool LoopAccessInfo::isInvariant(Value *V) const {
2535   auto *SE = PSE->getSE();
2536   // TODO: Is this really what we want? Even without FP SCEV, we may want some
2537   // trivially loop-invariant FP values to be considered invariant.
2538   if (!SE->isSCEVable(V->getType()))
2539     return false;
2540   const SCEV *S = SE->getSCEV(V);
2541   return SE->isLoopInvariant(S, TheLoop);
2542 }
2543 
2544 /// Find the operand of the GEP that should be checked for consecutive
2545 /// stores. This ignores trailing indices that have no effect on the final
2546 /// pointer.
2547 static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2548   const DataLayout &DL = Gep->getModule()->getDataLayout();
2549   unsigned LastOperand = Gep->getNumOperands() - 1;
2550   TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2551 
2552   // Walk backwards and try to peel off zeros.
2553   while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2554     // Find the type we're currently indexing into.
2555     gep_type_iterator GEPTI = gep_type_begin(Gep);
2556     std::advance(GEPTI, LastOperand - 2);
2557 
2558     // If it's a type with the same allocation size as the result of the GEP we
2559     // can peel off the zero index.
2560     if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
2561       break;
2562     --LastOperand;
2563   }
2564 
2565   return LastOperand;
2566 }
2567 
2568 /// If the argument is a GEP, then returns the operand identified by
2569 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
2570 /// operand, it returns that instead.
2571 static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2572   GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2573   if (!GEP)
2574     return Ptr;
2575 
2576   unsigned InductionOperand = getGEPInductionOperand(GEP);
2577 
2578   // Check that all of the gep indices are uniform except for our induction
2579   // operand.
2580   for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2581     if (i != InductionOperand &&
2582         !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2583       return Ptr;
2584   return GEP->getOperand(InductionOperand);
2585 }
2586 
2587 /// If a value has only one user that is a CastInst, return it.
2588 static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
2589   Value *UniqueCast = nullptr;
2590   for (User *U : Ptr->users()) {
2591     CastInst *CI = dyn_cast<CastInst>(U);
2592     if (CI && CI->getType() == Ty) {
2593       if (!UniqueCast)
2594         UniqueCast = CI;
2595       else
2596         return nullptr;
2597     }
2598   }
2599   return UniqueCast;
2600 }
2601 
2602 /// Get the stride of a pointer access in a loop. Looks for symbolic
2603 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2604 static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2605   auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2606   if (!PtrTy || PtrTy->isAggregateType())
2607     return nullptr;
2608 
2609   // Try to remove a gep instruction to make the pointer (actually index at this
2610   // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2611   // pointer, otherwise, we are analyzing the index.
2612   Value *OrigPtr = Ptr;
2613 
2614   // The size of the pointer access.
2615   int64_t PtrAccessSize = 1;
2616 
2617   Ptr = stripGetElementPtr(Ptr, SE, Lp);
2618   const SCEV *V = SE->getSCEV(Ptr);
2619 
2620   if (Ptr != OrigPtr)
2621     // Strip off casts.
2622     while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2623       V = C->getOperand();
2624 
2625   const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2626   if (!S)
2627     return nullptr;
2628 
2629   // If the pointer is invariant then there is no stride and it makes no
2630   // sense to add it here.
2631   if (Lp != S->getLoop())
2632     return nullptr;
2633 
2634   V = S->getStepRecurrence(*SE);
2635   if (!V)
2636     return nullptr;
2637 
2638   // Strip off the size of access multiplication if we are still analyzing the
2639   // pointer.
2640   if (OrigPtr == Ptr) {
2641     if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2642       if (M->getOperand(0)->getSCEVType() != scConstant)
2643         return nullptr;
2644 
2645       const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2646 
2647       // Huge step value - give up.
2648       if (APStepVal.getBitWidth() > 64)
2649         return nullptr;
2650 
2651       int64_t StepVal = APStepVal.getSExtValue();
2652       if (PtrAccessSize != StepVal)
2653         return nullptr;
2654       V = M->getOperand(1);
2655     }
2656   }
2657 
2658   // Note that the restriction after this loop invariant check are only
2659   // profitability restrictions.
2660   if (!SE->isLoopInvariant(V, Lp))
2661     return nullptr;
2662 
2663   // Look for the loop invariant symbolic value.
2664   const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2665   if (!U) {
2666     const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2667     if (!C)
2668       return nullptr;
2669     U = dyn_cast<SCEVUnknown>(C->getOperand());
2670     if (!U)
2671       return nullptr;
2672 
2673     // Match legacy behavior - this is not needed for correctness
2674     if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2675       return nullptr;
2676   }
2677 
2678   return V;
2679 }
2680 
2681 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2682   Value *Ptr = getLoadStorePointerOperand(MemAccess);
2683   if (!Ptr)
2684     return;
2685 
2686   // Note: getStrideFromPointer is a *profitability* heuristic.  We
2687   // could broaden the scope of values returned here - to anything
2688   // which happens to be loop invariant and contributes to the
2689   // computation of an interesting IV - but we chose not to as we
2690   // don't have a cost model here, and broadening the scope exposes
2691   // far too many unprofitable cases.
2692   const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2693   if (!StrideExpr)
2694     return;
2695 
2696   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2697                        "versioning:");
2698   LLVM_DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2699 
2700   if (!SpeculateUnitStride) {
2701     LLVM_DEBUG(dbgs() << "  Chose not to due to -laa-speculate-unit-stride\n");
2702     return;
2703   }
2704 
2705   // Avoid adding the "Stride == 1" predicate when we know that
2706   // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2707   // or zero iteration loop, as Trip-Count <= Stride == 1.
2708   //
2709   // TODO: We are currently not making a very informed decision on when it is
2710   // beneficial to apply stride versioning. It might make more sense that the
2711   // users of this analysis (such as the vectorizer) will trigger it, based on
2712   // their specific cost considerations; For example, in cases where stride
2713   // versioning does  not help resolving memory accesses/dependences, the
2714   // vectorizer should evaluate the cost of the runtime test, and the benefit
2715   // of various possible stride specializations, considering the alternatives
2716   // of using gather/scatters (if available).
2717 
2718   const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2719 
2720   // Match the types so we can compare the stride and the BETakenCount.
2721   // The Stride can be positive/negative, so we sign extend Stride;
2722   // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2723   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2724   uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2725   uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2726   const SCEV *CastedStride = StrideExpr;
2727   const SCEV *CastedBECount = BETakenCount;
2728   ScalarEvolution *SE = PSE->getSE();
2729   if (BETypeSizeBits >= StrideTypeSizeBits)
2730     CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2731   else
2732     CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2733   const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2734   // Since TripCount == BackEdgeTakenCount + 1, checking:
2735   // "Stride >= TripCount" is equivalent to checking:
2736   // Stride - BETakenCount > 0
2737   if (SE->isKnownPositive(StrideMinusBETaken)) {
2738     LLVM_DEBUG(
2739         dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2740                   "Stride==1 predicate will imply that the loop executes "
2741                   "at most once.\n");
2742     return;
2743   }
2744   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2745 
2746   // Strip back off the integer cast, and check that our result is a
2747   // SCEVUnknown as we expect.
2748   const SCEV *StrideBase = StrideExpr;
2749   if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2750     StrideBase = C->getOperand();
2751   SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2752 }
2753 
2754 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2755                                const TargetLibraryInfo *TLI, AAResults *AA,
2756                                DominatorTree *DT, LoopInfo *LI)
2757     : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2758       PtrRtChecking(nullptr),
2759       DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2760   PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2761   if (canAnalyzeLoop()) {
2762     analyzeLoop(AA, LI, TLI, DT);
2763   }
2764 }
2765 
2766 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2767   if (CanVecMem) {
2768     OS.indent(Depth) << "Memory dependences are safe";
2769     if (MaxSafeDepDistBytes != -1ULL)
2770       OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2771          << " bytes";
2772     if (PtrRtChecking->Need)
2773       OS << " with run-time checks";
2774     OS << "\n";
2775   }
2776 
2777   if (HasConvergentOp)
2778     OS.indent(Depth) << "Has convergent operation in loop\n";
2779 
2780   if (Report)
2781     OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2782 
2783   if (auto *Dependences = DepChecker->getDependences()) {
2784     OS.indent(Depth) << "Dependences:\n";
2785     for (const auto &Dep : *Dependences) {
2786       Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2787       OS << "\n";
2788     }
2789   } else
2790     OS.indent(Depth) << "Too many dependences, not recorded\n";
2791 
2792   // List the pair of accesses need run-time checks to prove independence.
2793   PtrRtChecking->print(OS, Depth);
2794   OS << "\n";
2795 
2796   OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2797                    << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2798                    << "found in loop.\n";
2799 
2800   OS.indent(Depth) << "SCEV assumptions:\n";
2801   PSE->getPredicate().print(OS, Depth);
2802 
2803   OS << "\n";
2804 
2805   OS.indent(Depth) << "Expressions re-written:\n";
2806   PSE->print(OS, Depth);
2807 }
2808 
2809 const LoopAccessInfo &LoopAccessInfoManager::getInfo(Loop &L) {
2810   auto I = LoopAccessInfoMap.insert({&L, nullptr});
2811 
2812   if (I.second)
2813     I.first->second =
2814         std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2815 
2816   return *I.first->second;
2817 }
2818 
2819 bool LoopAccessInfoManager::invalidate(
2820     Function &F, const PreservedAnalyses &PA,
2821     FunctionAnalysisManager::Invalidator &Inv) {
2822   // Check whether our analysis is preserved.
2823   auto PAC = PA.getChecker<LoopAccessAnalysis>();
2824   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
2825     // If not, give up now.
2826     return true;
2827 
2828   // Check whether the analyses we depend on became invalid for any reason.
2829   // Skip checking TargetLibraryAnalysis as it is immutable and can't become
2830   // invalid.
2831   return Inv.invalidate<AAManager>(F, PA) ||
2832          Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
2833          Inv.invalidate<LoopAnalysis>(F, PA) ||
2834          Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2835 }
2836 
2837 LoopAccessInfoManager LoopAccessAnalysis::run(Function &F,
2838                                               FunctionAnalysisManager &FAM) {
2839   auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
2840   auto &AA = FAM.getResult<AAManager>(F);
2841   auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2842   auto &LI = FAM.getResult<LoopAnalysis>(F);
2843   auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
2844   return LoopAccessInfoManager(SE, AA, DT, LI, &TLI);
2845 }
2846 
2847 AnalysisKey LoopAccessAnalysis::Key;
2848