1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Analysis/VectorUtils.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstdint>
63 #include <cstdlib>
64 #include <iterator>
65 #include <utility>
66 #include <vector>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "loop-accesses"
71 
72 static cl::opt<unsigned, true>
73 VectorizationFactor("force-vector-width", cl::Hidden,
74                     cl::desc("Sets the SIMD width. Zero is autoselect."),
75                     cl::location(VectorizerParams::VectorizationFactor));
76 unsigned VectorizerParams::VectorizationFactor;
77 
78 static cl::opt<unsigned, true>
79 VectorizationInterleave("force-vector-interleave", cl::Hidden,
80                         cl::desc("Sets the vectorization interleave count. "
81                                  "Zero is autoselect."),
82                         cl::location(
83                             VectorizerParams::VectorizationInterleave));
84 unsigned VectorizerParams::VectorizationInterleave;
85 
86 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
87     "runtime-memory-check-threshold", cl::Hidden,
88     cl::desc("When performing memory disambiguation checks at runtime do not "
89              "generate more than this number of comparisons (default = 8)."),
90     cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
92 
93 /// The maximum iterations used to merge memory checks
94 static cl::opt<unsigned> MemoryCheckMergeThreshold(
95     "memory-check-merge-threshold", cl::Hidden,
96     cl::desc("Maximum number of comparisons done when trying to merge "
97              "runtime memory checks. (default = 100)"),
98     cl::init(100));
99 
100 /// Maximum SIMD width.
101 const unsigned VectorizerParams::MaxVectorWidth = 64;
102 
103 /// We collect dependences up to this threshold.
104 static cl::opt<unsigned>
105     MaxDependences("max-dependences", cl::Hidden,
106                    cl::desc("Maximum number of dependences collected by "
107                             "loop-access analysis (default = 100)"),
108                    cl::init(100));
109 
110 /// This enables versioning on the strides of symbolically striding memory
111 /// accesses in code like the following.
112 ///   for (i = 0; i < N; ++i)
113 ///     A[i * Stride1] += B[i * Stride2] ...
114 ///
115 /// Will be roughly translated to
116 ///    if (Stride1 == 1 && Stride2 == 1) {
117 ///      for (i = 0; i < N; i+=4)
118 ///       A[i:i+3] += ...
119 ///    } else
120 ///      ...
121 static cl::opt<bool> EnableMemAccessVersioning(
122     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
123     cl::desc("Enable symbolic stride memory access versioning"));
124 
125 /// Enable store-to-load forwarding conflict detection. This option can
126 /// be disabled for correctness testing.
127 static cl::opt<bool> EnableForwardingConflictDetection(
128     "store-to-load-forwarding-conflict-detection", cl::Hidden,
129     cl::desc("Enable conflict detection in loop-access analysis"),
130     cl::init(true));
131 
132 bool VectorizerParams::isInterleaveForced() {
133   return ::VectorizationInterleave.getNumOccurrences() > 0;
134 }
135 
136 Value *llvm::stripIntegerCast(Value *V) {
137   if (auto *CI = dyn_cast<CastInst>(V))
138     if (CI->getOperand(0)->getType()->isIntegerTy())
139       return CI->getOperand(0);
140   return V;
141 }
142 
143 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
144                                             const ValueToValueMap &PtrToStride,
145                                             Value *Ptr, Value *OrigPtr) {
146   const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
147 
148   // If there is an entry in the map return the SCEV of the pointer with the
149   // symbolic stride replaced by one.
150   ValueToValueMap::const_iterator SI =
151       PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
152   if (SI == PtrToStride.end())
153     // For a non-symbolic stride, just return the original expression.
154     return OrigSCEV;
155 
156   Value *StrideVal = stripIntegerCast(SI->second);
157 
158   ScalarEvolution *SE = PSE.getSE();
159   const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
160   const auto *CT =
161     static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
162 
163   PSE.addPredicate(*SE->getEqualPredicate(U, CT));
164   auto *Expr = PSE.getSCEV(Ptr);
165 
166   LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
167 	     << " by: " << *Expr << "\n");
168   return Expr;
169 }
170 
171 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
172     unsigned Index, RuntimePointerChecking &RtCheck)
173     : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
174       AddressSpace(RtCheck.Pointers[Index]
175                        .PointerValue->getType()
176                        ->getPointerAddressSpace()) {
177   Members.push_back(Index);
178 }
179 
180 /// Calculate Start and End points of memory access.
181 /// Let's assume A is the first access and B is a memory access on N-th loop
182 /// iteration. Then B is calculated as:
183 ///   B = A + Step*N .
184 /// Step value may be positive or negative.
185 /// N is a calculated back-edge taken count:
186 ///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
187 /// Start and End points are calculated in the following way:
188 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
189 /// where SizeOfElt is the size of single memory access in bytes.
190 ///
191 /// There is no conflict when the intervals are disjoint:
192 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
193 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
194                                     unsigned DepSetId, unsigned ASId,
195                                     const ValueToValueMap &Strides,
196                                     PredicatedScalarEvolution &PSE) {
197   // Get the stride replaced scev.
198   const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
199   ScalarEvolution *SE = PSE.getSE();
200 
201   const SCEV *ScStart;
202   const SCEV *ScEnd;
203 
204   if (SE->isLoopInvariant(Sc, Lp)) {
205     ScStart = ScEnd = Sc;
206   } else {
207     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
208     assert(AR && "Invalid addrec expression");
209     const SCEV *Ex = PSE.getBackedgeTakenCount();
210 
211     ScStart = AR->getStart();
212     ScEnd = AR->evaluateAtIteration(Ex, *SE);
213     const SCEV *Step = AR->getStepRecurrence(*SE);
214 
215     // For expressions with negative step, the upper bound is ScStart and the
216     // lower bound is ScEnd.
217     if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
218       if (CStep->getValue()->isNegative())
219         std::swap(ScStart, ScEnd);
220     } else {
221       // Fallback case: the step is not constant, but we can still
222       // get the upper and lower bounds of the interval by using min/max
223       // expressions.
224       ScStart = SE->getUMinExpr(ScStart, ScEnd);
225       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
226     }
227   }
228   // Add the size of the pointed element to ScEnd.
229   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
230   Type *IdxTy = DL.getIndexType(Ptr->getType());
231   const SCEV *EltSizeSCEV =
232       SE->getStoreSizeOfExpr(IdxTy, Ptr->getType()->getPointerElementType());
233   ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
234 
235   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
236 }
237 
238 SmallVector<RuntimePointerCheck, 4>
239 RuntimePointerChecking::generateChecks() const {
240   SmallVector<RuntimePointerCheck, 4> Checks;
241 
242   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
243     for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
244       const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
245       const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
246 
247       if (needsChecking(CGI, CGJ))
248         Checks.push_back(std::make_pair(&CGI, &CGJ));
249     }
250   }
251   return Checks;
252 }
253 
254 void RuntimePointerChecking::generateChecks(
255     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
256   assert(Checks.empty() && "Checks is not empty");
257   groupChecks(DepCands, UseDependencies);
258   Checks = generateChecks();
259 }
260 
261 bool RuntimePointerChecking::needsChecking(
262     const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
263   for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
264     for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
265       if (needsChecking(M.Members[I], N.Members[J]))
266         return true;
267   return false;
268 }
269 
270 /// Compare \p I and \p J and return the minimum.
271 /// Return nullptr in case we couldn't find an answer.
272 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
273                                    ScalarEvolution *SE) {
274   const SCEV *Diff = SE->getMinusSCEV(J, I);
275   const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
276 
277   if (!C)
278     return nullptr;
279   if (C->getValue()->isNegative())
280     return J;
281   return I;
282 }
283 
284 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
285                                          RuntimePointerChecking &RtCheck) {
286   return addPointer(
287       Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
288       RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
289       *RtCheck.SE);
290 }
291 
292 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
293                                          const SCEV *End, unsigned AS,
294                                          ScalarEvolution &SE) {
295   assert(AddressSpace == AS &&
296          "all pointers in a checking group must be in the same address space");
297 
298   // Compare the starts and ends with the known minimum and maximum
299   // of this set. We need to know how we compare against the min/max
300   // of the set in order to be able to emit memchecks.
301   const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
302   if (!Min0)
303     return false;
304 
305   const SCEV *Min1 = getMinFromExprs(End, High, &SE);
306   if (!Min1)
307     return false;
308 
309   // Update the low bound  expression if we've found a new min value.
310   if (Min0 == Start)
311     Low = Start;
312 
313   // Update the high bound expression if we've found a new max value.
314   if (Min1 != End)
315     High = End;
316 
317   Members.push_back(Index);
318   return true;
319 }
320 
321 void RuntimePointerChecking::groupChecks(
322     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
323   // We build the groups from dependency candidates equivalence classes
324   // because:
325   //    - We know that pointers in the same equivalence class share
326   //      the same underlying object and therefore there is a chance
327   //      that we can compare pointers
328   //    - We wouldn't be able to merge two pointers for which we need
329   //      to emit a memcheck. The classes in DepCands are already
330   //      conveniently built such that no two pointers in the same
331   //      class need checking against each other.
332 
333   // We use the following (greedy) algorithm to construct the groups
334   // For every pointer in the equivalence class:
335   //   For each existing group:
336   //   - if the difference between this pointer and the min/max bounds
337   //     of the group is a constant, then make the pointer part of the
338   //     group and update the min/max bounds of that group as required.
339 
340   CheckingGroups.clear();
341 
342   // If we need to check two pointers to the same underlying object
343   // with a non-constant difference, we shouldn't perform any pointer
344   // grouping with those pointers. This is because we can easily get
345   // into cases where the resulting check would return false, even when
346   // the accesses are safe.
347   //
348   // The following example shows this:
349   // for (i = 0; i < 1000; ++i)
350   //   a[5000 + i * m] = a[i] + a[i + 9000]
351   //
352   // Here grouping gives a check of (5000, 5000 + 1000 * m) against
353   // (0, 10000) which is always false. However, if m is 1, there is no
354   // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
355   // us to perform an accurate check in this case.
356   //
357   // The above case requires that we have an UnknownDependence between
358   // accesses to the same underlying object. This cannot happen unless
359   // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
360   // is also false. In this case we will use the fallback path and create
361   // separate checking groups for all pointers.
362 
363   // If we don't have the dependency partitions, construct a new
364   // checking pointer group for each pointer. This is also required
365   // for correctness, because in this case we can have checking between
366   // pointers to the same underlying object.
367   if (!UseDependencies) {
368     for (unsigned I = 0; I < Pointers.size(); ++I)
369       CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
370     return;
371   }
372 
373   unsigned TotalComparisons = 0;
374 
375   DenseMap<Value *, unsigned> PositionMap;
376   for (unsigned Index = 0; Index < Pointers.size(); ++Index)
377     PositionMap[Pointers[Index].PointerValue] = Index;
378 
379   // We need to keep track of what pointers we've already seen so we
380   // don't process them twice.
381   SmallSet<unsigned, 2> Seen;
382 
383   // Go through all equivalence classes, get the "pointer check groups"
384   // and add them to the overall solution. We use the order in which accesses
385   // appear in 'Pointers' to enforce determinism.
386   for (unsigned I = 0; I < Pointers.size(); ++I) {
387     // We've seen this pointer before, and therefore already processed
388     // its equivalence class.
389     if (Seen.count(I))
390       continue;
391 
392     MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
393                                            Pointers[I].IsWritePtr);
394 
395     SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
396     auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
397 
398     // Because DepCands is constructed by visiting accesses in the order in
399     // which they appear in alias sets (which is deterministic) and the
400     // iteration order within an equivalence class member is only dependent on
401     // the order in which unions and insertions are performed on the
402     // equivalence class, the iteration order is deterministic.
403     for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
404          MI != ME; ++MI) {
405       auto PointerI = PositionMap.find(MI->getPointer());
406       assert(PointerI != PositionMap.end() &&
407              "pointer in equivalence class not found in PositionMap");
408       unsigned Pointer = PointerI->second;
409       bool Merged = false;
410       // Mark this pointer as seen.
411       Seen.insert(Pointer);
412 
413       // Go through all the existing sets and see if we can find one
414       // which can include this pointer.
415       for (RuntimeCheckingPtrGroup &Group : Groups) {
416         // Don't perform more than a certain amount of comparisons.
417         // This should limit the cost of grouping the pointers to something
418         // reasonable.  If we do end up hitting this threshold, the algorithm
419         // will create separate groups for all remaining pointers.
420         if (TotalComparisons > MemoryCheckMergeThreshold)
421           break;
422 
423         TotalComparisons++;
424 
425         if (Group.addPointer(Pointer, *this)) {
426           Merged = true;
427           break;
428         }
429       }
430 
431       if (!Merged)
432         // We couldn't add this pointer to any existing set or the threshold
433         // for the number of comparisons has been reached. Create a new group
434         // to hold the current pointer.
435         Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
436     }
437 
438     // We've computed the grouped checks for this partition.
439     // Save the results and continue with the next one.
440     llvm::copy(Groups, std::back_inserter(CheckingGroups));
441   }
442 }
443 
444 bool RuntimePointerChecking::arePointersInSamePartition(
445     const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
446     unsigned PtrIdx2) {
447   return (PtrToPartition[PtrIdx1] != -1 &&
448           PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
449 }
450 
451 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
452   const PointerInfo &PointerI = Pointers[I];
453   const PointerInfo &PointerJ = Pointers[J];
454 
455   // No need to check if two readonly pointers intersect.
456   if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
457     return false;
458 
459   // Only need to check pointers between two different dependency sets.
460   if (PointerI.DependencySetId == PointerJ.DependencySetId)
461     return false;
462 
463   // Only need to check pointers in the same alias set.
464   if (PointerI.AliasSetId != PointerJ.AliasSetId)
465     return false;
466 
467   return true;
468 }
469 
470 void RuntimePointerChecking::printChecks(
471     raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
472     unsigned Depth) const {
473   unsigned N = 0;
474   for (const auto &Check : Checks) {
475     const auto &First = Check.first->Members, &Second = Check.second->Members;
476 
477     OS.indent(Depth) << "Check " << N++ << ":\n";
478 
479     OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
480     for (unsigned K = 0; K < First.size(); ++K)
481       OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
482 
483     OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
484     for (unsigned K = 0; K < Second.size(); ++K)
485       OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
486   }
487 }
488 
489 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
490 
491   OS.indent(Depth) << "Run-time memory checks:\n";
492   printChecks(OS, Checks, Depth);
493 
494   OS.indent(Depth) << "Grouped accesses:\n";
495   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
496     const auto &CG = CheckingGroups[I];
497 
498     OS.indent(Depth + 2) << "Group " << &CG << ":\n";
499     OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
500                          << ")\n";
501     for (unsigned J = 0; J < CG.Members.size(); ++J) {
502       OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
503                            << "\n";
504     }
505   }
506 }
507 
508 namespace {
509 
510 /// Analyses memory accesses in a loop.
511 ///
512 /// Checks whether run time pointer checks are needed and builds sets for data
513 /// dependence checking.
514 class AccessAnalysis {
515 public:
516   /// Read or write access location.
517   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
518   typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
519 
520   AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
521                  MemoryDepChecker::DepCandidates &DA,
522                  PredicatedScalarEvolution &PSE)
523       : TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
524         IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
525 
526   /// Register a load  and whether it is only read from.
527   void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
528     Value *Ptr = const_cast<Value*>(Loc.Ptr);
529     AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
530     Accesses.insert(MemAccessInfo(Ptr, false));
531     if (IsReadOnly)
532       ReadOnlyPtr.insert(Ptr);
533   }
534 
535   /// Register a store.
536   void addStore(MemoryLocation &Loc) {
537     Value *Ptr = const_cast<Value*>(Loc.Ptr);
538     AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
539     Accesses.insert(MemAccessInfo(Ptr, true));
540   }
541 
542   /// Check if we can emit a run-time no-alias check for \p Access.
543   ///
544   /// Returns true if we can emit a run-time no alias check for \p Access.
545   /// If we can check this access, this also adds it to a dependence set and
546   /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
547   /// we will attempt to use additional run-time checks in order to get
548   /// the bounds of the pointer.
549   bool createCheckForAccess(RuntimePointerChecking &RtCheck,
550                             MemAccessInfo Access,
551                             const ValueToValueMap &Strides,
552                             DenseMap<Value *, unsigned> &DepSetId,
553                             Loop *TheLoop, unsigned &RunningDepId,
554                             unsigned ASId, bool ShouldCheckStride,
555                             bool Assume);
556 
557   /// Check whether we can check the pointers at runtime for
558   /// non-intersection.
559   ///
560   /// Returns true if we need no check or if we do and we can generate them
561   /// (i.e. the pointers have computable bounds).
562   bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
563                        Loop *TheLoop, const ValueToValueMap &Strides,
564                        bool ShouldCheckWrap = false);
565 
566   /// Goes over all memory accesses, checks whether a RT check is needed
567   /// and builds sets of dependent accesses.
568   void buildDependenceSets() {
569     processMemAccesses();
570   }
571 
572   /// Initial processing of memory accesses determined that we need to
573   /// perform dependency checking.
574   ///
575   /// Note that this can later be cleared if we retry memcheck analysis without
576   /// dependency checking (i.e. FoundNonConstantDistanceDependence).
577   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
578 
579   /// We decided that no dependence analysis would be used.  Reset the state.
580   void resetDepChecks(MemoryDepChecker &DepChecker) {
581     CheckDeps.clear();
582     DepChecker.clearDependences();
583   }
584 
585   MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
586 
587 private:
588   typedef SetVector<MemAccessInfo> PtrAccessSet;
589 
590   /// Go over all memory access and check whether runtime pointer checks
591   /// are needed and build sets of dependency check candidates.
592   void processMemAccesses();
593 
594   /// Set of all accesses.
595   PtrAccessSet Accesses;
596 
597   /// The loop being checked.
598   const Loop *TheLoop;
599 
600   /// List of accesses that need a further dependence check.
601   MemAccessInfoList CheckDeps;
602 
603   /// Set of pointers that are read only.
604   SmallPtrSet<Value*, 16> ReadOnlyPtr;
605 
606   /// An alias set tracker to partition the access set by underlying object and
607   //intrinsic property (such as TBAA metadata).
608   AliasSetTracker AST;
609 
610   LoopInfo *LI;
611 
612   /// Sets of potentially dependent accesses - members of one set share an
613   /// underlying pointer. The set "CheckDeps" identfies which sets really need a
614   /// dependence check.
615   MemoryDepChecker::DepCandidates &DepCands;
616 
617   /// Initial processing of memory accesses determined that we may need
618   /// to add memchecks.  Perform the analysis to determine the necessary checks.
619   ///
620   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
621   /// memcheck analysis without dependency checking
622   /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
623   /// cleared while this remains set if we have potentially dependent accesses.
624   bool IsRTCheckAnalysisNeeded;
625 
626   /// The SCEV predicate containing all the SCEV-related assumptions.
627   PredicatedScalarEvolution &PSE;
628 };
629 
630 } // end anonymous namespace
631 
632 /// Check whether a pointer can participate in a runtime bounds check.
633 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
634 /// by adding run-time checks (overflow checks) if necessary.
635 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
636                                 const ValueToValueMap &Strides, Value *Ptr,
637                                 Loop *L, bool Assume) {
638   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
639 
640   // The bounds for loop-invariant pointer is trivial.
641   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
642     return true;
643 
644   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
645 
646   if (!AR && Assume)
647     AR = PSE.getAsAddRec(Ptr);
648 
649   if (!AR)
650     return false;
651 
652   return AR->isAffine();
653 }
654 
655 /// Check whether a pointer address cannot wrap.
656 static bool isNoWrap(PredicatedScalarEvolution &PSE,
657                      const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
658   const SCEV *PtrScev = PSE.getSCEV(Ptr);
659   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
660     return true;
661 
662   int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
663   if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
664     return true;
665 
666   return false;
667 }
668 
669 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
670                                           MemAccessInfo Access,
671                                           const ValueToValueMap &StridesMap,
672                                           DenseMap<Value *, unsigned> &DepSetId,
673                                           Loop *TheLoop, unsigned &RunningDepId,
674                                           unsigned ASId, bool ShouldCheckWrap,
675                                           bool Assume) {
676   Value *Ptr = Access.getPointer();
677 
678   if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume))
679     return false;
680 
681   // When we run after a failing dependency check we have to make sure
682   // we don't have wrapping pointers.
683   if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) {
684     auto *Expr = PSE.getSCEV(Ptr);
685     if (!Assume || !isa<SCEVAddRecExpr>(Expr))
686       return false;
687     PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
688   }
689 
690   // The id of the dependence set.
691   unsigned DepId;
692 
693   if (isDependencyCheckNeeded()) {
694     Value *Leader = DepCands.getLeaderValue(Access).getPointer();
695     unsigned &LeaderId = DepSetId[Leader];
696     if (!LeaderId)
697       LeaderId = RunningDepId++;
698     DepId = LeaderId;
699   } else
700     // Each access has its own dependence set.
701     DepId = RunningDepId++;
702 
703   bool IsWrite = Access.getInt();
704   RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
705   LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
706 
707   return true;
708  }
709 
710 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
711                                      ScalarEvolution *SE, Loop *TheLoop,
712                                      const ValueToValueMap &StridesMap,
713                                      bool ShouldCheckWrap) {
714   // Find pointers with computable bounds. We are going to use this information
715   // to place a runtime bound check.
716   bool CanDoRT = true;
717 
718   bool MayNeedRTCheck = false;
719   if (!IsRTCheckAnalysisNeeded) return true;
720 
721   bool IsDepCheckNeeded = isDependencyCheckNeeded();
722 
723   // We assign a consecutive id to access from different alias sets.
724   // Accesses between different groups doesn't need to be checked.
725   unsigned ASId = 0;
726   for (auto &AS : AST) {
727     int NumReadPtrChecks = 0;
728     int NumWritePtrChecks = 0;
729     bool CanDoAliasSetRT = true;
730     ++ASId;
731 
732     // We assign consecutive id to access from different dependence sets.
733     // Accesses within the same set don't need a runtime check.
734     unsigned RunningDepId = 1;
735     DenseMap<Value *, unsigned> DepSetId;
736 
737     SmallVector<MemAccessInfo, 4> Retries;
738 
739     // First, count how many write and read accesses are in the alias set. Also
740     // collect MemAccessInfos for later.
741     SmallVector<MemAccessInfo, 4> AccessInfos;
742     for (const auto &A : AS) {
743       Value *Ptr = A.getValue();
744       bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
745 
746       if (IsWrite)
747         ++NumWritePtrChecks;
748       else
749         ++NumReadPtrChecks;
750       AccessInfos.emplace_back(Ptr, IsWrite);
751     }
752 
753     // We do not need runtime checks for this alias set, if there are no writes
754     // or a single write and no reads.
755     if (NumWritePtrChecks == 0 ||
756         (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
757       assert((AS.size() <= 1 ||
758               all_of(AS,
759                      [this](auto AC) {
760                        MemAccessInfo AccessWrite(AC.getValue(), true);
761                        return DepCands.findValue(AccessWrite) == DepCands.end();
762                      })) &&
763              "Can only skip updating CanDoRT below, if all entries in AS "
764              "are reads or there is at most 1 entry");
765       continue;
766     }
767 
768     for (auto &Access : AccessInfos) {
769       if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
770                                 RunningDepId, ASId, ShouldCheckWrap, false)) {
771         LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
772                           << *Access.getPointer() << '\n');
773         Retries.push_back(Access);
774         CanDoAliasSetRT = false;
775       }
776     }
777 
778     // Note that this function computes CanDoRT and MayNeedRTCheck
779     // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
780     // we have a pointer for which we couldn't find the bounds but we don't
781     // actually need to emit any checks so it does not matter.
782     //
783     // We need runtime checks for this alias set, if there are at least 2
784     // dependence sets (in which case RunningDepId > 2) or if we need to re-try
785     // any bound checks (because in that case the number of dependence sets is
786     // incomplete).
787     bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
788 
789     // We need to perform run-time alias checks, but some pointers had bounds
790     // that couldn't be checked.
791     if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
792       // Reset the CanDoSetRt flag and retry all accesses that have failed.
793       // We know that we need these checks, so we can now be more aggressive
794       // and add further checks if required (overflow checks).
795       CanDoAliasSetRT = true;
796       for (auto Access : Retries)
797         if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId,
798                                   TheLoop, RunningDepId, ASId,
799                                   ShouldCheckWrap, /*Assume=*/true)) {
800           CanDoAliasSetRT = false;
801           break;
802         }
803     }
804 
805     CanDoRT &= CanDoAliasSetRT;
806     MayNeedRTCheck |= NeedsAliasSetRTCheck;
807     ++ASId;
808   }
809 
810   // If the pointers that we would use for the bounds comparison have different
811   // address spaces, assume the values aren't directly comparable, so we can't
812   // use them for the runtime check. We also have to assume they could
813   // overlap. In the future there should be metadata for whether address spaces
814   // are disjoint.
815   unsigned NumPointers = RtCheck.Pointers.size();
816   for (unsigned i = 0; i < NumPointers; ++i) {
817     for (unsigned j = i + 1; j < NumPointers; ++j) {
818       // Only need to check pointers between two different dependency sets.
819       if (RtCheck.Pointers[i].DependencySetId ==
820           RtCheck.Pointers[j].DependencySetId)
821        continue;
822       // Only need to check pointers in the same alias set.
823       if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
824         continue;
825 
826       Value *PtrI = RtCheck.Pointers[i].PointerValue;
827       Value *PtrJ = RtCheck.Pointers[j].PointerValue;
828 
829       unsigned ASi = PtrI->getType()->getPointerAddressSpace();
830       unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
831       if (ASi != ASj) {
832         LLVM_DEBUG(
833             dbgs() << "LAA: Runtime check would require comparison between"
834                       " different address spaces\n");
835         return false;
836       }
837     }
838   }
839 
840   if (MayNeedRTCheck && CanDoRT)
841     RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
842 
843   LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
844                     << " pointer comparisons.\n");
845 
846   // If we can do run-time checks, but there are no checks, no runtime checks
847   // are needed. This can happen when all pointers point to the same underlying
848   // object for example.
849   RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
850 
851   bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
852   if (!CanDoRTIfNeeded)
853     RtCheck.reset();
854   return CanDoRTIfNeeded;
855 }
856 
857 void AccessAnalysis::processMemAccesses() {
858   // We process the set twice: first we process read-write pointers, last we
859   // process read-only pointers. This allows us to skip dependence tests for
860   // read-only pointers.
861 
862   LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
863   LLVM_DEBUG(dbgs() << "  AST: "; AST.dump());
864   LLVM_DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
865   LLVM_DEBUG({
866     for (auto A : Accesses)
867       dbgs() << "\t" << *A.getPointer() << " (" <<
868                 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
869                                          "read-only" : "read")) << ")\n";
870   });
871 
872   // The AliasSetTracker has nicely partitioned our pointers by metadata
873   // compatibility and potential for underlying-object overlap. As a result, we
874   // only need to check for potential pointer dependencies within each alias
875   // set.
876   for (const auto &AS : AST) {
877     // Note that both the alias-set tracker and the alias sets themselves used
878     // linked lists internally and so the iteration order here is deterministic
879     // (matching the original instruction order within each set).
880 
881     bool SetHasWrite = false;
882 
883     // Map of pointers to last access encountered.
884     typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
885     UnderlyingObjToAccessMap ObjToLastAccess;
886 
887     // Set of access to check after all writes have been processed.
888     PtrAccessSet DeferredAccesses;
889 
890     // Iterate over each alias set twice, once to process read/write pointers,
891     // and then to process read-only pointers.
892     for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
893       bool UseDeferred = SetIteration > 0;
894       PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
895 
896       for (const auto &AV : AS) {
897         Value *Ptr = AV.getValue();
898 
899         // For a single memory access in AliasSetTracker, Accesses may contain
900         // both read and write, and they both need to be handled for CheckDeps.
901         for (const auto &AC : S) {
902           if (AC.getPointer() != Ptr)
903             continue;
904 
905           bool IsWrite = AC.getInt();
906 
907           // If we're using the deferred access set, then it contains only
908           // reads.
909           bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
910           if (UseDeferred && !IsReadOnlyPtr)
911             continue;
912           // Otherwise, the pointer must be in the PtrAccessSet, either as a
913           // read or a write.
914           assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
915                   S.count(MemAccessInfo(Ptr, false))) &&
916                  "Alias-set pointer not in the access set?");
917 
918           MemAccessInfo Access(Ptr, IsWrite);
919           DepCands.insert(Access);
920 
921           // Memorize read-only pointers for later processing and skip them in
922           // the first round (they need to be checked after we have seen all
923           // write pointers). Note: we also mark pointer that are not
924           // consecutive as "read-only" pointers (so that we check
925           // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
926           if (!UseDeferred && IsReadOnlyPtr) {
927             DeferredAccesses.insert(Access);
928             continue;
929           }
930 
931           // If this is a write - check other reads and writes for conflicts. If
932           // this is a read only check other writes for conflicts (but only if
933           // there is no other write to the ptr - this is an optimization to
934           // catch "a[i] = a[i] + " without having to do a dependence check).
935           if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
936             CheckDeps.push_back(Access);
937             IsRTCheckAnalysisNeeded = true;
938           }
939 
940           if (IsWrite)
941             SetHasWrite = true;
942 
943           // Create sets of pointers connected by a shared alias set and
944           // underlying object.
945           typedef SmallVector<const Value *, 16> ValueVector;
946           ValueVector TempObjects;
947 
948           getUnderlyingObjects(Ptr, TempObjects, LI);
949           LLVM_DEBUG(dbgs()
950                      << "Underlying objects for pointer " << *Ptr << "\n");
951           for (const Value *UnderlyingObj : TempObjects) {
952             // nullptr never alias, don't join sets for pointer that have "null"
953             // in their UnderlyingObjects list.
954             if (isa<ConstantPointerNull>(UnderlyingObj) &&
955                 !NullPointerIsDefined(
956                     TheLoop->getHeader()->getParent(),
957                     UnderlyingObj->getType()->getPointerAddressSpace()))
958               continue;
959 
960             UnderlyingObjToAccessMap::iterator Prev =
961                 ObjToLastAccess.find(UnderlyingObj);
962             if (Prev != ObjToLastAccess.end())
963               DepCands.unionSets(Access, Prev->second);
964 
965             ObjToLastAccess[UnderlyingObj] = Access;
966             LLVM_DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
967           }
968         }
969       }
970     }
971   }
972 }
973 
974 static bool isInBoundsGep(Value *Ptr) {
975   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
976     return GEP->isInBounds();
977   return false;
978 }
979 
980 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
981 /// i.e. monotonically increasing/decreasing.
982 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
983                            PredicatedScalarEvolution &PSE, const Loop *L) {
984   // FIXME: This should probably only return true for NUW.
985   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
986     return true;
987 
988   // Scalar evolution does not propagate the non-wrapping flags to values that
989   // are derived from a non-wrapping induction variable because non-wrapping
990   // could be flow-sensitive.
991   //
992   // Look through the potentially overflowing instruction to try to prove
993   // non-wrapping for the *specific* value of Ptr.
994 
995   // The arithmetic implied by an inbounds GEP can't overflow.
996   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
997   if (!GEP || !GEP->isInBounds())
998     return false;
999 
1000   // Make sure there is only one non-const index and analyze that.
1001   Value *NonConstIndex = nullptr;
1002   for (Value *Index : GEP->indices())
1003     if (!isa<ConstantInt>(Index)) {
1004       if (NonConstIndex)
1005         return false;
1006       NonConstIndex = Index;
1007     }
1008   if (!NonConstIndex)
1009     // The recurrence is on the pointer, ignore for now.
1010     return false;
1011 
1012   // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
1013   // AddRec using a NSW operation.
1014   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1015     if (OBO->hasNoSignedWrap() &&
1016         // Assume constant for other the operand so that the AddRec can be
1017         // easily found.
1018         isa<ConstantInt>(OBO->getOperand(1))) {
1019       auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1020 
1021       if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1022         return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1023     }
1024 
1025   return false;
1026 }
1027 
1028 /// Check whether the access through \p Ptr has a constant stride.
1029 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
1030                            const Loop *Lp, const ValueToValueMap &StridesMap,
1031                            bool Assume, bool ShouldCheckWrap) {
1032   Type *Ty = Ptr->getType();
1033   assert(Ty->isPointerTy() && "Unexpected non-ptr");
1034 
1035   // Make sure that the pointer does not point to aggregate types.
1036   auto *PtrTy = cast<PointerType>(Ty);
1037   if (PtrTy->getElementType()->isAggregateType()) {
1038     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
1039                       << *Ptr << "\n");
1040     return 0;
1041   }
1042 
1043   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1044 
1045   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1046   if (Assume && !AR)
1047     AR = PSE.getAsAddRec(Ptr);
1048 
1049   if (!AR) {
1050     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1051                       << " SCEV: " << *PtrScev << "\n");
1052     return 0;
1053   }
1054 
1055   // The access function must stride over the innermost loop.
1056   if (Lp != AR->getLoop()) {
1057     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1058                       << *Ptr << " SCEV: " << *AR << "\n");
1059     return 0;
1060   }
1061 
1062   // The address calculation must not wrap. Otherwise, a dependence could be
1063   // inverted.
1064   // An inbounds getelementptr that is a AddRec with a unit stride
1065   // cannot wrap per definition. The unit stride requirement is checked later.
1066   // An getelementptr without an inbounds attribute and unit stride would have
1067   // to access the pointer value "0" which is undefined behavior in address
1068   // space 0, therefore we can also vectorize this case.
1069   bool IsInBoundsGEP = isInBoundsGep(Ptr);
1070   bool IsNoWrapAddRec = !ShouldCheckWrap ||
1071     PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
1072     isNoWrapAddRec(Ptr, AR, PSE, Lp);
1073   if (!IsNoWrapAddRec && !IsInBoundsGEP &&
1074       NullPointerIsDefined(Lp->getHeader()->getParent(),
1075                            PtrTy->getAddressSpace())) {
1076     if (Assume) {
1077       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1078       IsNoWrapAddRec = true;
1079       LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1080                         << "LAA:   Pointer: " << *Ptr << "\n"
1081                         << "LAA:   SCEV: " << *AR << "\n"
1082                         << "LAA:   Added an overflow assumption\n");
1083     } else {
1084       LLVM_DEBUG(
1085           dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1086                  << *Ptr << " SCEV: " << *AR << "\n");
1087       return 0;
1088     }
1089   }
1090 
1091   // Check the step is constant.
1092   const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1093 
1094   // Calculate the pointer stride and check if it is constant.
1095   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1096   if (!C) {
1097     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1098                       << " SCEV: " << *AR << "\n");
1099     return 0;
1100   }
1101 
1102   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1103   int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
1104   const APInt &APStepVal = C->getAPInt();
1105 
1106   // Huge step value - give up.
1107   if (APStepVal.getBitWidth() > 64)
1108     return 0;
1109 
1110   int64_t StepVal = APStepVal.getSExtValue();
1111 
1112   // Strided access.
1113   int64_t Stride = StepVal / Size;
1114   int64_t Rem = StepVal % Size;
1115   if (Rem)
1116     return 0;
1117 
1118   // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1119   // know we can't "wrap around the address space". In case of address space
1120   // zero we know that this won't happen without triggering undefined behavior.
1121   if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
1122       (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
1123                                               PtrTy->getAddressSpace()))) {
1124     if (Assume) {
1125       // We can avoid this case by adding a run-time check.
1126       LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1127                         << "inbounds or in address space 0 may wrap:\n"
1128                         << "LAA:   Pointer: " << *Ptr << "\n"
1129                         << "LAA:   SCEV: " << *AR << "\n"
1130                         << "LAA:   Added an overflow assumption\n");
1131       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1132     } else
1133       return 0;
1134   }
1135 
1136   return Stride;
1137 }
1138 
1139 Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
1140                                     Value *PtrB, const DataLayout &DL,
1141                                     ScalarEvolution &SE, bool StrictCheck,
1142                                     bool CheckType) {
1143   assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1144   assert(cast<PointerType>(PtrA->getType())
1145              ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type");
1146   assert(cast<PointerType>(PtrB->getType())
1147              ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type");
1148 
1149   // Make sure that A and B are different pointers.
1150   if (PtrA == PtrB)
1151     return 0;
1152 
1153   // Make sure that the element types are the same if required.
1154   if (CheckType && ElemTyA != ElemTyB)
1155     return None;
1156 
1157   unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1158   unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1159 
1160   // Check that the address spaces match.
1161   if (ASA != ASB)
1162     return None;
1163   unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1164 
1165   APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1166   Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1167   Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1168 
1169   int Val;
1170   if (PtrA1 == PtrB1) {
1171     // Retrieve the address space again as pointer stripping now tracks through
1172     // `addrspacecast`.
1173     ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1174     ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1175     // Check that the address spaces match and that the pointers are valid.
1176     if (ASA != ASB)
1177       return None;
1178 
1179     IdxWidth = DL.getIndexSizeInBits(ASA);
1180     OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1181     OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1182 
1183     OffsetB -= OffsetA;
1184     Val = OffsetB.getSExtValue();
1185   } else {
1186     // Otherwise compute the distance with SCEV between the base pointers.
1187     const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1188     const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1189     const auto *Diff =
1190         dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1191     if (!Diff)
1192       return None;
1193     Val = Diff->getAPInt().getSExtValue();
1194   }
1195   int Size = DL.getTypeStoreSize(ElemTyA);
1196   int Dist = Val / Size;
1197 
1198   // Ensure that the calculated distance matches the type-based one after all
1199   // the bitcasts removal in the provided pointers.
1200   if (!StrictCheck || Dist * Size == Val)
1201     return Dist;
1202   return None;
1203 }
1204 
1205 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
1206                            const DataLayout &DL, ScalarEvolution &SE,
1207                            SmallVectorImpl<unsigned> &SortedIndices) {
1208   assert(llvm::all_of(
1209              VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1210          "Expected list of pointer operands.");
1211   // Walk over the pointers, and map each of them to an offset relative to
1212   // first pointer in the array.
1213   Value *Ptr0 = VL[0];
1214 
1215   using DistOrdPair = std::pair<int64_t, int>;
1216   auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) {
1217     return L.first < R.first;
1218   };
1219   std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1220   Offsets.emplace(0, 0);
1221   int Cnt = 1;
1222   bool IsConsecutive = true;
1223   for (auto *Ptr : VL.drop_front()) {
1224     Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1225                                          /*StrictCheck=*/true);
1226     if (!Diff)
1227       return false;
1228 
1229     // Check if the pointer with the same offset is found.
1230     int64_t Offset = *Diff;
1231     auto Res = Offsets.emplace(Offset, Cnt);
1232     if (!Res.second)
1233       return false;
1234     // Consecutive order if the inserted element is the last one.
1235     IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1236     ++Cnt;
1237   }
1238   SortedIndices.clear();
1239   if (!IsConsecutive) {
1240     // Fill SortedIndices array only if it is non-consecutive.
1241     SortedIndices.resize(VL.size());
1242     Cnt = 0;
1243     for (const std::pair<int64_t, int> &Pair : Offsets) {
1244       SortedIndices[Cnt] = Pair.second;
1245       ++Cnt;
1246     }
1247   }
1248   return true;
1249 }
1250 
1251 /// Returns true if the memory operations \p A and \p B are consecutive.
1252 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1253                                ScalarEvolution &SE, bool CheckType) {
1254   Value *PtrA = getLoadStorePointerOperand(A);
1255   Value *PtrB = getLoadStorePointerOperand(B);
1256   if (!PtrA || !PtrB)
1257     return false;
1258   Type *ElemTyA = getLoadStoreType(A);
1259   Type *ElemTyB = getLoadStoreType(B);
1260   Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1261                                        /*StrictCheck=*/true, CheckType);
1262   return Diff && *Diff == 1;
1263 }
1264 
1265 MemoryDepChecker::VectorizationSafetyStatus
1266 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1267   switch (Type) {
1268   case NoDep:
1269   case Forward:
1270   case BackwardVectorizable:
1271     return VectorizationSafetyStatus::Safe;
1272 
1273   case Unknown:
1274     return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1275   case ForwardButPreventsForwarding:
1276   case Backward:
1277   case BackwardVectorizableButPreventsForwarding:
1278     return VectorizationSafetyStatus::Unsafe;
1279   }
1280   llvm_unreachable("unexpected DepType!");
1281 }
1282 
1283 bool MemoryDepChecker::Dependence::isBackward() const {
1284   switch (Type) {
1285   case NoDep:
1286   case Forward:
1287   case ForwardButPreventsForwarding:
1288   case Unknown:
1289     return false;
1290 
1291   case BackwardVectorizable:
1292   case Backward:
1293   case BackwardVectorizableButPreventsForwarding:
1294     return true;
1295   }
1296   llvm_unreachable("unexpected DepType!");
1297 }
1298 
1299 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1300   return isBackward() || Type == Unknown;
1301 }
1302 
1303 bool MemoryDepChecker::Dependence::isForward() const {
1304   switch (Type) {
1305   case Forward:
1306   case ForwardButPreventsForwarding:
1307     return true;
1308 
1309   case NoDep:
1310   case Unknown:
1311   case BackwardVectorizable:
1312   case Backward:
1313   case BackwardVectorizableButPreventsForwarding:
1314     return false;
1315   }
1316   llvm_unreachable("unexpected DepType!");
1317 }
1318 
1319 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1320                                                     uint64_t TypeByteSize) {
1321   // If loads occur at a distance that is not a multiple of a feasible vector
1322   // factor store-load forwarding does not take place.
1323   // Positive dependences might cause troubles because vectorizing them might
1324   // prevent store-load forwarding making vectorized code run a lot slower.
1325   //   a[i] = a[i-3] ^ a[i-8];
1326   //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1327   //   hence on your typical architecture store-load forwarding does not take
1328   //   place. Vectorizing in such cases does not make sense.
1329   // Store-load forwarding distance.
1330 
1331   // After this many iterations store-to-load forwarding conflicts should not
1332   // cause any slowdowns.
1333   const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1334   // Maximum vector factor.
1335   uint64_t MaxVFWithoutSLForwardIssues = std::min(
1336       VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1337 
1338   // Compute the smallest VF at which the store and load would be misaligned.
1339   for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1340        VF *= 2) {
1341     // If the number of vector iteration between the store and the load are
1342     // small we could incur conflicts.
1343     if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1344       MaxVFWithoutSLForwardIssues = (VF >> 1);
1345       break;
1346     }
1347   }
1348 
1349   if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1350     LLVM_DEBUG(
1351         dbgs() << "LAA: Distance " << Distance
1352                << " that could cause a store-load forwarding conflict\n");
1353     return true;
1354   }
1355 
1356   if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1357       MaxVFWithoutSLForwardIssues !=
1358           VectorizerParams::MaxVectorWidth * TypeByteSize)
1359     MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1360   return false;
1361 }
1362 
1363 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1364   if (Status < S)
1365     Status = S;
1366 }
1367 
1368 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1369 /// memory accesses, that have the same stride whose absolute value is given
1370 /// in \p Stride, and that have the same type size \p TypeByteSize,
1371 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1372 /// possible to prove statically that the dependence distance is larger
1373 /// than the range that the accesses will travel through the execution of
1374 /// the loop. If so, return true; false otherwise. This is useful for
1375 /// example in loops such as the following (PR31098):
1376 ///     for (i = 0; i < D; ++i) {
1377 ///                = out[i];
1378 ///       out[i+D] =
1379 ///     }
1380 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1381                                      const SCEV &BackedgeTakenCount,
1382                                      const SCEV &Dist, uint64_t Stride,
1383                                      uint64_t TypeByteSize) {
1384 
1385   // If we can prove that
1386   //      (**) |Dist| > BackedgeTakenCount * Step
1387   // where Step is the absolute stride of the memory accesses in bytes,
1388   // then there is no dependence.
1389   //
1390   // Rationale:
1391   // We basically want to check if the absolute distance (|Dist/Step|)
1392   // is >= the loop iteration count (or > BackedgeTakenCount).
1393   // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1394   // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1395   // that the dependence distance is >= VF; This is checked elsewhere.
1396   // But in some cases we can prune unknown dependence distances early, and
1397   // even before selecting the VF, and without a runtime test, by comparing
1398   // the distance against the loop iteration count. Since the vectorized code
1399   // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1400   // also guarantees that distance >= VF.
1401   //
1402   const uint64_t ByteStride = Stride * TypeByteSize;
1403   const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1404   const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1405 
1406   const SCEV *CastedDist = &Dist;
1407   const SCEV *CastedProduct = Product;
1408   uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1409   uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1410 
1411   // The dependence distance can be positive/negative, so we sign extend Dist;
1412   // The multiplication of the absolute stride in bytes and the
1413   // backedgeTakenCount is non-negative, so we zero extend Product.
1414   if (DistTypeSize > ProductTypeSize)
1415     CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1416   else
1417     CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1418 
1419   // Is  Dist - (BackedgeTakenCount * Step) > 0 ?
1420   // (If so, then we have proven (**) because |Dist| >= Dist)
1421   const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1422   if (SE.isKnownPositive(Minus))
1423     return true;
1424 
1425   // Second try: Is  -Dist - (BackedgeTakenCount * Step) > 0 ?
1426   // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1427   const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1428   Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1429   if (SE.isKnownPositive(Minus))
1430     return true;
1431 
1432   return false;
1433 }
1434 
1435 /// Check the dependence for two accesses with the same stride \p Stride.
1436 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1437 /// bytes.
1438 ///
1439 /// \returns true if they are independent.
1440 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1441                                           uint64_t TypeByteSize) {
1442   assert(Stride > 1 && "The stride must be greater than 1");
1443   assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1444   assert(Distance > 0 && "The distance must be non-zero");
1445 
1446   // Skip if the distance is not multiple of type byte size.
1447   if (Distance % TypeByteSize)
1448     return false;
1449 
1450   uint64_t ScaledDist = Distance / TypeByteSize;
1451 
1452   // No dependence if the scaled distance is not multiple of the stride.
1453   // E.g.
1454   //      for (i = 0; i < 1024 ; i += 4)
1455   //        A[i+2] = A[i] + 1;
1456   //
1457   // Two accesses in memory (scaled distance is 2, stride is 4):
1458   //     | A[0] |      |      |      | A[4] |      |      |      |
1459   //     |      |      | A[2] |      |      |      | A[6] |      |
1460   //
1461   // E.g.
1462   //      for (i = 0; i < 1024 ; i += 3)
1463   //        A[i+4] = A[i] + 1;
1464   //
1465   // Two accesses in memory (scaled distance is 4, stride is 3):
1466   //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1467   //     |      |      |      |      | A[4] |      |      | A[7] |      |
1468   return ScaledDist % Stride;
1469 }
1470 
1471 MemoryDepChecker::Dependence::DepType
1472 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1473                               const MemAccessInfo &B, unsigned BIdx,
1474                               const ValueToValueMap &Strides) {
1475   assert (AIdx < BIdx && "Must pass arguments in program order");
1476 
1477   Value *APtr = A.getPointer();
1478   Value *BPtr = B.getPointer();
1479   bool AIsWrite = A.getInt();
1480   bool BIsWrite = B.getInt();
1481 
1482   // Two reads are independent.
1483   if (!AIsWrite && !BIsWrite)
1484     return Dependence::NoDep;
1485 
1486   // We cannot check pointers in different address spaces.
1487   if (APtr->getType()->getPointerAddressSpace() !=
1488       BPtr->getType()->getPointerAddressSpace())
1489     return Dependence::Unknown;
1490 
1491   int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1492   int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1493 
1494   const SCEV *Src = PSE.getSCEV(APtr);
1495   const SCEV *Sink = PSE.getSCEV(BPtr);
1496 
1497   // If the induction step is negative we have to invert source and sink of the
1498   // dependence.
1499   if (StrideAPtr < 0) {
1500     std::swap(APtr, BPtr);
1501     std::swap(Src, Sink);
1502     std::swap(AIsWrite, BIsWrite);
1503     std::swap(AIdx, BIdx);
1504     std::swap(StrideAPtr, StrideBPtr);
1505   }
1506 
1507   const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1508 
1509   LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1510                     << "(Induction step: " << StrideAPtr << ")\n");
1511   LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1512                     << *InstMap[BIdx] << ": " << *Dist << "\n");
1513 
1514   // Need accesses with constant stride. We don't want to vectorize
1515   // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1516   // the address space.
1517   if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1518     LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1519     return Dependence::Unknown;
1520   }
1521 
1522   Type *ATy = APtr->getType()->getPointerElementType();
1523   Type *BTy = BPtr->getType()->getPointerElementType();
1524   auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1525   uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1526   uint64_t Stride = std::abs(StrideAPtr);
1527   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1528   if (!C) {
1529     if (!isa<SCEVCouldNotCompute>(Dist) &&
1530         TypeByteSize == DL.getTypeAllocSize(BTy) &&
1531         isSafeDependenceDistance(DL, *(PSE.getSE()),
1532                                  *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1533                                  TypeByteSize))
1534       return Dependence::NoDep;
1535 
1536     LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1537     FoundNonConstantDistanceDependence = true;
1538     return Dependence::Unknown;
1539   }
1540 
1541   const APInt &Val = C->getAPInt();
1542   int64_t Distance = Val.getSExtValue();
1543 
1544   // Attempt to prove strided accesses independent.
1545   if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1546       areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1547     LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1548     return Dependence::NoDep;
1549   }
1550 
1551   // Negative distances are not plausible dependencies.
1552   if (Val.isNegative()) {
1553     bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1554     if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1555         (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1556          ATy != BTy)) {
1557       LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1558       return Dependence::ForwardButPreventsForwarding;
1559     }
1560 
1561     LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1562     return Dependence::Forward;
1563   }
1564 
1565   // Write to the same location with the same size.
1566   // Could be improved to assert type sizes are the same (i32 == float, etc).
1567   if (Val == 0) {
1568     if (ATy == BTy)
1569       return Dependence::Forward;
1570     LLVM_DEBUG(
1571         dbgs() << "LAA: Zero dependence difference but different types\n");
1572     return Dependence::Unknown;
1573   }
1574 
1575   assert(Val.isStrictlyPositive() && "Expect a positive value");
1576 
1577   if (ATy != BTy) {
1578     LLVM_DEBUG(
1579         dbgs()
1580         << "LAA: ReadWrite-Write positive dependency with different types\n");
1581     return Dependence::Unknown;
1582   }
1583 
1584   // Bail out early if passed-in parameters make vectorization not feasible.
1585   unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1586                            VectorizerParams::VectorizationFactor : 1);
1587   unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1588                            VectorizerParams::VectorizationInterleave : 1);
1589   // The minimum number of iterations for a vectorized/unrolled version.
1590   unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1591 
1592   // It's not vectorizable if the distance is smaller than the minimum distance
1593   // needed for a vectroized/unrolled version. Vectorizing one iteration in
1594   // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1595   // TypeByteSize (No need to plus the last gap distance).
1596   //
1597   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1598   //      foo(int *A) {
1599   //        int *B = (int *)((char *)A + 14);
1600   //        for (i = 0 ; i < 1024 ; i += 2)
1601   //          B[i] = A[i] + 1;
1602   //      }
1603   //
1604   // Two accesses in memory (stride is 2):
1605   //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
1606   //                              | B[0] |      | B[2] |      | B[4] |
1607   //
1608   // Distance needs for vectorizing iterations except the last iteration:
1609   // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1610   // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1611   //
1612   // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1613   // 12, which is less than distance.
1614   //
1615   // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1616   // the minimum distance needed is 28, which is greater than distance. It is
1617   // not safe to do vectorization.
1618   uint64_t MinDistanceNeeded =
1619       TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1620   if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1621     LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1622                       << Distance << '\n');
1623     return Dependence::Backward;
1624   }
1625 
1626   // Unsafe if the minimum distance needed is greater than max safe distance.
1627   if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1628     LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1629                       << MinDistanceNeeded << " size in bytes");
1630     return Dependence::Backward;
1631   }
1632 
1633   // Positive distance bigger than max vectorization factor.
1634   // FIXME: Should use max factor instead of max distance in bytes, which could
1635   // not handle different types.
1636   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1637   //      void foo (int *A, char *B) {
1638   //        for (unsigned i = 0; i < 1024; i++) {
1639   //          A[i+2] = A[i] + 1;
1640   //          B[i+2] = B[i] + 1;
1641   //        }
1642   //      }
1643   //
1644   // This case is currently unsafe according to the max safe distance. If we
1645   // analyze the two accesses on array B, the max safe dependence distance
1646   // is 2. Then we analyze the accesses on array A, the minimum distance needed
1647   // is 8, which is less than 2 and forbidden vectorization, But actually
1648   // both A and B could be vectorized by 2 iterations.
1649   MaxSafeDepDistBytes =
1650       std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1651 
1652   bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1653   if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1654       couldPreventStoreLoadForward(Distance, TypeByteSize))
1655     return Dependence::BackwardVectorizableButPreventsForwarding;
1656 
1657   uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1658   LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1659                     << " with max VF = " << MaxVF << '\n');
1660   uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1661   MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
1662   return Dependence::BackwardVectorizable;
1663 }
1664 
1665 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1666                                    MemAccessInfoList &CheckDeps,
1667                                    const ValueToValueMap &Strides) {
1668 
1669   MaxSafeDepDistBytes = -1;
1670   SmallPtrSet<MemAccessInfo, 8> Visited;
1671   for (MemAccessInfo CurAccess : CheckDeps) {
1672     if (Visited.count(CurAccess))
1673       continue;
1674 
1675     // Get the relevant memory access set.
1676     EquivalenceClasses<MemAccessInfo>::iterator I =
1677       AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1678 
1679     // Check accesses within this set.
1680     EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1681         AccessSets.member_begin(I);
1682     EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1683         AccessSets.member_end();
1684 
1685     // Check every access pair.
1686     while (AI != AE) {
1687       Visited.insert(*AI);
1688       bool AIIsWrite = AI->getInt();
1689       // Check loads only against next equivalent class, but stores also against
1690       // other stores in the same equivalence class - to the same address.
1691       EquivalenceClasses<MemAccessInfo>::member_iterator OI =
1692           (AIIsWrite ? AI : std::next(AI));
1693       while (OI != AE) {
1694         // Check every accessing instruction pair in program order.
1695         for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1696              I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1697           // Scan all accesses of another equivalence class, but only the next
1698           // accesses of the same equivalent class.
1699           for (std::vector<unsigned>::iterator
1700                    I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
1701                    I2E = (OI == AI ? I1E : Accesses[*OI].end());
1702                I2 != I2E; ++I2) {
1703             auto A = std::make_pair(&*AI, *I1);
1704             auto B = std::make_pair(&*OI, *I2);
1705 
1706             assert(*I1 != *I2);
1707             if (*I1 > *I2)
1708               std::swap(A, B);
1709 
1710             Dependence::DepType Type =
1711                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1712             mergeInStatus(Dependence::isSafeForVectorization(Type));
1713 
1714             // Gather dependences unless we accumulated MaxDependences
1715             // dependences.  In that case return as soon as we find the first
1716             // unsafe dependence.  This puts a limit on this quadratic
1717             // algorithm.
1718             if (RecordDependences) {
1719               if (Type != Dependence::NoDep)
1720                 Dependences.push_back(Dependence(A.second, B.second, Type));
1721 
1722               if (Dependences.size() >= MaxDependences) {
1723                 RecordDependences = false;
1724                 Dependences.clear();
1725                 LLVM_DEBUG(dbgs()
1726                            << "Too many dependences, stopped recording\n");
1727               }
1728             }
1729             if (!RecordDependences && !isSafeForVectorization())
1730               return false;
1731           }
1732         ++OI;
1733       }
1734       AI++;
1735     }
1736   }
1737 
1738   LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1739   return isSafeForVectorization();
1740 }
1741 
1742 SmallVector<Instruction *, 4>
1743 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1744   MemAccessInfo Access(Ptr, isWrite);
1745   auto &IndexVector = Accesses.find(Access)->second;
1746 
1747   SmallVector<Instruction *, 4> Insts;
1748   transform(IndexVector,
1749                  std::back_inserter(Insts),
1750                  [&](unsigned Idx) { return this->InstMap[Idx]; });
1751   return Insts;
1752 }
1753 
1754 const char *MemoryDepChecker::Dependence::DepName[] = {
1755     "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1756     "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1757 
1758 void MemoryDepChecker::Dependence::print(
1759     raw_ostream &OS, unsigned Depth,
1760     const SmallVectorImpl<Instruction *> &Instrs) const {
1761   OS.indent(Depth) << DepName[Type] << ":\n";
1762   OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1763   OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1764 }
1765 
1766 bool LoopAccessInfo::canAnalyzeLoop() {
1767   // We need to have a loop header.
1768   LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1769                     << TheLoop->getHeader()->getParent()->getName() << ": "
1770                     << TheLoop->getHeader()->getName() << '\n');
1771 
1772   // We can only analyze innermost loops.
1773   if (!TheLoop->isInnermost()) {
1774     LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1775     recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1776     return false;
1777   }
1778 
1779   // We must have a single backedge.
1780   if (TheLoop->getNumBackEdges() != 1) {
1781     LLVM_DEBUG(
1782         dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1783     recordAnalysis("CFGNotUnderstood")
1784         << "loop control flow is not understood by analyzer";
1785     return false;
1786   }
1787 
1788   // ScalarEvolution needs to be able to find the exit count.
1789   const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1790   if (isa<SCEVCouldNotCompute>(ExitCount)) {
1791     recordAnalysis("CantComputeNumberOfIterations")
1792         << "could not determine number of loop iterations";
1793     LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1794     return false;
1795   }
1796 
1797   return true;
1798 }
1799 
1800 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
1801                                  const TargetLibraryInfo *TLI,
1802                                  DominatorTree *DT) {
1803   typedef SmallPtrSet<Value*, 16> ValueSet;
1804 
1805   // Holds the Load and Store instructions.
1806   SmallVector<LoadInst *, 16> Loads;
1807   SmallVector<StoreInst *, 16> Stores;
1808 
1809   // Holds all the different accesses in the loop.
1810   unsigned NumReads = 0;
1811   unsigned NumReadWrites = 0;
1812 
1813   bool HasComplexMemInst = false;
1814 
1815   // A runtime check is only legal to insert if there are no convergent calls.
1816   HasConvergentOp = false;
1817 
1818   PtrRtChecking->Pointers.clear();
1819   PtrRtChecking->Need = false;
1820 
1821   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1822 
1823   const bool EnableMemAccessVersioningOfLoop =
1824       EnableMemAccessVersioning &&
1825       !TheLoop->getHeader()->getParent()->hasOptSize();
1826 
1827   // For each block.
1828   for (BasicBlock *BB : TheLoop->blocks()) {
1829     // Scan the BB and collect legal loads and stores. Also detect any
1830     // convergent instructions.
1831     for (Instruction &I : *BB) {
1832       if (auto *Call = dyn_cast<CallBase>(&I)) {
1833         if (Call->isConvergent())
1834           HasConvergentOp = true;
1835       }
1836 
1837       // With both a non-vectorizable memory instruction and a convergent
1838       // operation, found in this loop, no reason to continue the search.
1839       if (HasComplexMemInst && HasConvergentOp) {
1840         CanVecMem = false;
1841         return;
1842       }
1843 
1844       // Avoid hitting recordAnalysis multiple times.
1845       if (HasComplexMemInst)
1846         continue;
1847 
1848       // If this is a load, save it. If this instruction can read from memory
1849       // but is not a load, then we quit. Notice that we don't handle function
1850       // calls that read or write.
1851       if (I.mayReadFromMemory()) {
1852         // Many math library functions read the rounding mode. We will only
1853         // vectorize a loop if it contains known function calls that don't set
1854         // the flag. Therefore, it is safe to ignore this read from memory.
1855         auto *Call = dyn_cast<CallInst>(&I);
1856         if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1857           continue;
1858 
1859         // If the function has an explicit vectorized counterpart, we can safely
1860         // assume that it can be vectorized.
1861         if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1862             !VFDatabase::getMappings(*Call).empty())
1863           continue;
1864 
1865         auto *Ld = dyn_cast<LoadInst>(&I);
1866         if (!Ld) {
1867           recordAnalysis("CantVectorizeInstruction", Ld)
1868             << "instruction cannot be vectorized";
1869           HasComplexMemInst = true;
1870           continue;
1871         }
1872         if (!Ld->isSimple() && !IsAnnotatedParallel) {
1873           recordAnalysis("NonSimpleLoad", Ld)
1874               << "read with atomic ordering or volatile read";
1875           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1876           HasComplexMemInst = true;
1877           continue;
1878         }
1879         NumLoads++;
1880         Loads.push_back(Ld);
1881         DepChecker->addAccess(Ld);
1882         if (EnableMemAccessVersioningOfLoop)
1883           collectStridedAccess(Ld);
1884         continue;
1885       }
1886 
1887       // Save 'store' instructions. Abort if other instructions write to memory.
1888       if (I.mayWriteToMemory()) {
1889         auto *St = dyn_cast<StoreInst>(&I);
1890         if (!St) {
1891           recordAnalysis("CantVectorizeInstruction", St)
1892               << "instruction cannot be vectorized";
1893           HasComplexMemInst = true;
1894           continue;
1895         }
1896         if (!St->isSimple() && !IsAnnotatedParallel) {
1897           recordAnalysis("NonSimpleStore", St)
1898               << "write with atomic ordering or volatile write";
1899           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1900           HasComplexMemInst = true;
1901           continue;
1902         }
1903         NumStores++;
1904         Stores.push_back(St);
1905         DepChecker->addAccess(St);
1906         if (EnableMemAccessVersioningOfLoop)
1907           collectStridedAccess(St);
1908       }
1909     } // Next instr.
1910   } // Next block.
1911 
1912   if (HasComplexMemInst) {
1913     CanVecMem = false;
1914     return;
1915   }
1916 
1917   // Now we have two lists that hold the loads and the stores.
1918   // Next, we find the pointers that they use.
1919 
1920   // Check if we see any stores. If there are no stores, then we don't
1921   // care if the pointers are *restrict*.
1922   if (!Stores.size()) {
1923     LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1924     CanVecMem = true;
1925     return;
1926   }
1927 
1928   MemoryDepChecker::DepCandidates DependentAccesses;
1929   AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
1930 
1931   // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
1932   // multiple times on the same object. If the ptr is accessed twice, once
1933   // for read and once for write, it will only appear once (on the write
1934   // list). This is okay, since we are going to check for conflicts between
1935   // writes and between reads and writes, but not between reads and reads.
1936   ValueSet Seen;
1937 
1938   // Record uniform store addresses to identify if we have multiple stores
1939   // to the same address.
1940   ValueSet UniformStores;
1941 
1942   for (StoreInst *ST : Stores) {
1943     Value *Ptr = ST->getPointerOperand();
1944 
1945     if (isUniform(Ptr))
1946       HasDependenceInvolvingLoopInvariantAddress |=
1947           !UniformStores.insert(Ptr).second;
1948 
1949     // If we did *not* see this pointer before, insert it to  the read-write
1950     // list. At this phase it is only a 'write' list.
1951     if (Seen.insert(Ptr).second) {
1952       ++NumReadWrites;
1953 
1954       MemoryLocation Loc = MemoryLocation::get(ST);
1955       // The TBAA metadata could have a control dependency on the predication
1956       // condition, so we cannot rely on it when determining whether or not we
1957       // need runtime pointer checks.
1958       if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1959         Loc.AATags.TBAA = nullptr;
1960 
1961       Accesses.addStore(Loc);
1962     }
1963   }
1964 
1965   if (IsAnnotatedParallel) {
1966     LLVM_DEBUG(
1967         dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
1968                << "checks.\n");
1969     CanVecMem = true;
1970     return;
1971   }
1972 
1973   for (LoadInst *LD : Loads) {
1974     Value *Ptr = LD->getPointerOperand();
1975     // If we did *not* see this pointer before, insert it to the
1976     // read list. If we *did* see it before, then it is already in
1977     // the read-write list. This allows us to vectorize expressions
1978     // such as A[i] += x;  Because the address of A[i] is a read-write
1979     // pointer. This only works if the index of A[i] is consecutive.
1980     // If the address of i is unknown (for example A[B[i]]) then we may
1981     // read a few words, modify, and write a few words, and some of the
1982     // words may be written to the same address.
1983     bool IsReadOnlyPtr = false;
1984     if (Seen.insert(Ptr).second ||
1985         !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
1986       ++NumReads;
1987       IsReadOnlyPtr = true;
1988     }
1989 
1990     // See if there is an unsafe dependency between a load to a uniform address and
1991     // store to the same uniform address.
1992     if (UniformStores.count(Ptr)) {
1993       LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
1994                            "load and uniform store to the same address!\n");
1995       HasDependenceInvolvingLoopInvariantAddress = true;
1996     }
1997 
1998     MemoryLocation Loc = MemoryLocation::get(LD);
1999     // The TBAA metadata could have a control dependency on the predication
2000     // condition, so we cannot rely on it when determining whether or not we
2001     // need runtime pointer checks.
2002     if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2003       Loc.AATags.TBAA = nullptr;
2004 
2005     Accesses.addLoad(Loc, IsReadOnlyPtr);
2006   }
2007 
2008   // If we write (or read-write) to a single destination and there are no
2009   // other reads in this loop then is it safe to vectorize.
2010   if (NumReadWrites == 1 && NumReads == 0) {
2011     LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2012     CanVecMem = true;
2013     return;
2014   }
2015 
2016   // Build dependence sets and check whether we need a runtime pointer bounds
2017   // check.
2018   Accesses.buildDependenceSets();
2019 
2020   // Find pointers with computable bounds. We are going to use this information
2021   // to place a runtime bound check.
2022   bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
2023                                                   TheLoop, SymbolicStrides);
2024   if (!CanDoRTIfNeeded) {
2025     recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
2026     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2027                       << "the array bounds.\n");
2028     CanVecMem = false;
2029     return;
2030   }
2031 
2032   LLVM_DEBUG(
2033     dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2034 
2035   CanVecMem = true;
2036   if (Accesses.isDependencyCheckNeeded()) {
2037     LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2038     CanVecMem = DepChecker->areDepsSafe(
2039         DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2040     MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2041 
2042     if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2043       LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2044 
2045       // Clear the dependency checks. We assume they are not needed.
2046       Accesses.resetDepChecks(*DepChecker);
2047 
2048       PtrRtChecking->reset();
2049       PtrRtChecking->Need = true;
2050 
2051       auto *SE = PSE->getSE();
2052       CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
2053                                                  SymbolicStrides, true);
2054 
2055       // Check that we found the bounds for the pointer.
2056       if (!CanDoRTIfNeeded) {
2057         recordAnalysis("CantCheckMemDepsAtRunTime")
2058             << "cannot check memory dependencies at runtime";
2059         LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2060         CanVecMem = false;
2061         return;
2062       }
2063 
2064       CanVecMem = true;
2065     }
2066   }
2067 
2068   if (HasConvergentOp) {
2069     recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2070       << "cannot add control dependency to convergent operation";
2071     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2072                          "would be needed with a convergent operation\n");
2073     CanVecMem = false;
2074     return;
2075   }
2076 
2077   if (CanVecMem)
2078     LLVM_DEBUG(
2079         dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
2080                << (PtrRtChecking->Need ? "" : " don't")
2081                << " need runtime memory checks.\n");
2082   else {
2083     recordAnalysis("UnsafeMemDep")
2084         << "unsafe dependent memory operations in loop. Use "
2085            "#pragma loop distribute(enable) to allow loop distribution "
2086            "to attempt to isolate the offending operations into a separate "
2087            "loop";
2088     LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2089   }
2090 }
2091 
2092 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2093                                            DominatorTree *DT)  {
2094   assert(TheLoop->contains(BB) && "Unknown block used");
2095 
2096   // Blocks that do not dominate the latch need predication.
2097   BasicBlock* Latch = TheLoop->getLoopLatch();
2098   return !DT->dominates(BB, Latch);
2099 }
2100 
2101 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2102                                                            Instruction *I) {
2103   assert(!Report && "Multiple reports generated");
2104 
2105   Value *CodeRegion = TheLoop->getHeader();
2106   DebugLoc DL = TheLoop->getStartLoc();
2107 
2108   if (I) {
2109     CodeRegion = I->getParent();
2110     // If there is no debug location attached to the instruction, revert back to
2111     // using the loop's.
2112     if (I->getDebugLoc())
2113       DL = I->getDebugLoc();
2114   }
2115 
2116   Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2117                                                    CodeRegion);
2118   return *Report;
2119 }
2120 
2121 bool LoopAccessInfo::isUniform(Value *V) const {
2122   auto *SE = PSE->getSE();
2123   // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2124   // never considered uniform.
2125   // TODO: Is this really what we want? Even without FP SCEV, we may want some
2126   // trivially loop-invariant FP values to be considered uniform.
2127   if (!SE->isSCEVable(V->getType()))
2128     return false;
2129   return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2130 }
2131 
2132 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2133   Value *Ptr = getLoadStorePointerOperand(MemAccess);
2134   if (!Ptr)
2135     return;
2136 
2137   Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2138   if (!Stride)
2139     return;
2140 
2141   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2142                        "versioning:");
2143   LLVM_DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2144 
2145   // Avoid adding the "Stride == 1" predicate when we know that
2146   // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2147   // or zero iteration loop, as Trip-Count <= Stride == 1.
2148   //
2149   // TODO: We are currently not making a very informed decision on when it is
2150   // beneficial to apply stride versioning. It might make more sense that the
2151   // users of this analysis (such as the vectorizer) will trigger it, based on
2152   // their specific cost considerations; For example, in cases where stride
2153   // versioning does  not help resolving memory accesses/dependences, the
2154   // vectorizer should evaluate the cost of the runtime test, and the benefit
2155   // of various possible stride specializations, considering the alternatives
2156   // of using gather/scatters (if available).
2157 
2158   const SCEV *StrideExpr = PSE->getSCEV(Stride);
2159   const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2160 
2161   // Match the types so we can compare the stride and the BETakenCount.
2162   // The Stride can be positive/negative, so we sign extend Stride;
2163   // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2164   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2165   uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
2166   uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
2167   const SCEV *CastedStride = StrideExpr;
2168   const SCEV *CastedBECount = BETakenCount;
2169   ScalarEvolution *SE = PSE->getSE();
2170   if (BETypeSize >= StrideTypeSize)
2171     CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2172   else
2173     CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2174   const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2175   // Since TripCount == BackEdgeTakenCount + 1, checking:
2176   // "Stride >= TripCount" is equivalent to checking:
2177   // Stride - BETakenCount > 0
2178   if (SE->isKnownPositive(StrideMinusBETaken)) {
2179     LLVM_DEBUG(
2180         dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2181                   "Stride==1 predicate will imply that the loop executes "
2182                   "at most once.\n");
2183     return;
2184   }
2185   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
2186 
2187   SymbolicStrides[Ptr] = Stride;
2188   StrideSet.insert(Stride);
2189 }
2190 
2191 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2192                                const TargetLibraryInfo *TLI, AAResults *AA,
2193                                DominatorTree *DT, LoopInfo *LI)
2194     : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2195       PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)),
2196       DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
2197       NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2198       HasConvergentOp(false),
2199       HasDependenceInvolvingLoopInvariantAddress(false) {
2200   if (canAnalyzeLoop())
2201     analyzeLoop(AA, LI, TLI, DT);
2202 }
2203 
2204 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2205   if (CanVecMem) {
2206     OS.indent(Depth) << "Memory dependences are safe";
2207     if (MaxSafeDepDistBytes != -1ULL)
2208       OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2209          << " bytes";
2210     if (PtrRtChecking->Need)
2211       OS << " with run-time checks";
2212     OS << "\n";
2213   }
2214 
2215   if (HasConvergentOp)
2216     OS.indent(Depth) << "Has convergent operation in loop\n";
2217 
2218   if (Report)
2219     OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2220 
2221   if (auto *Dependences = DepChecker->getDependences()) {
2222     OS.indent(Depth) << "Dependences:\n";
2223     for (auto &Dep : *Dependences) {
2224       Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2225       OS << "\n";
2226     }
2227   } else
2228     OS.indent(Depth) << "Too many dependences, not recorded\n";
2229 
2230   // List the pair of accesses need run-time checks to prove independence.
2231   PtrRtChecking->print(OS, Depth);
2232   OS << "\n";
2233 
2234   OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2235                    << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2236                    << "found in loop.\n";
2237 
2238   OS.indent(Depth) << "SCEV assumptions:\n";
2239   PSE->getUnionPredicate().print(OS, Depth);
2240 
2241   OS << "\n";
2242 
2243   OS.indent(Depth) << "Expressions re-written:\n";
2244   PSE->print(OS, Depth);
2245 }
2246 
2247 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) {
2248   initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
2249 }
2250 
2251 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2252   auto &LAI = LoopAccessInfoMap[L];
2253 
2254   if (!LAI)
2255     LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2256 
2257   return *LAI.get();
2258 }
2259 
2260 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2261   LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2262 
2263   for (Loop *TopLevelLoop : *LI)
2264     for (Loop *L : depth_first(TopLevelLoop)) {
2265       OS.indent(2) << L->getHeader()->getName() << ":\n";
2266       auto &LAI = LAA.getInfo(L);
2267       LAI.print(OS, 4);
2268     }
2269 }
2270 
2271 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2272   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2273   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2274   TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2275   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2276   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2277   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2278 
2279   return false;
2280 }
2281 
2282 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2283   AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2284   AU.addRequiredTransitive<AAResultsWrapperPass>();
2285   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2286   AU.addRequiredTransitive<LoopInfoWrapperPass>();
2287 
2288   AU.setPreservesAll();
2289 }
2290 
2291 char LoopAccessLegacyAnalysis::ID = 0;
2292 static const char laa_name[] = "Loop Access Analysis";
2293 #define LAA_NAME "loop-accesses"
2294 
2295 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2296 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2297 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2298 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2299 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2300 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2301 
2302 AnalysisKey LoopAccessAnalysis::Key;
2303 
2304 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2305                                        LoopStandardAnalysisResults &AR) {
2306   return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2307 }
2308 
2309 namespace llvm {
2310 
2311   Pass *createLAAPass() {
2312     return new LoopAccessLegacyAnalysis();
2313   }
2314 
2315 } // end namespace llvm
2316