1 //===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interface for the loop memory dependence framework that
11 // was originally developed for the Loop Vectorizer.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
16 #define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
17
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AliasSetTracker.h"
23 #include "llvm/Analysis/LoopAnalysisManager.h"
24 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
25 #include "llvm/IR/DiagnosticInfo.h"
26 #include "llvm/IR/ValueHandle.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/raw_ostream.h"
29
30 namespace llvm {
31
32 class Value;
33 class DataLayout;
34 class ScalarEvolution;
35 class Loop;
36 class SCEV;
37 class SCEVUnionPredicate;
38 class LoopAccessInfo;
39 class OptimizationRemarkEmitter;
40
41 /// Collection of parameters shared beetween the Loop Vectorizer and the
42 /// Loop Access Analysis.
43 struct VectorizerParams {
44 /// Maximum SIMD width.
45 static const unsigned MaxVectorWidth;
46
47 /// VF as overridden by the user.
48 static unsigned VectorizationFactor;
49 /// Interleave factor as overridden by the user.
50 static unsigned VectorizationInterleave;
51 /// True if force-vector-interleave was specified by the user.
52 static bool isInterleaveForced();
53
54 /// \When performing memory disambiguation checks at runtime do not
55 /// make more than this number of comparisons.
56 static unsigned RuntimeMemoryCheckThreshold;
57 };
58
59 /// Checks memory dependences among accesses to the same underlying
60 /// object to determine whether there vectorization is legal or not (and at
61 /// which vectorization factor).
62 ///
63 /// Note: This class will compute a conservative dependence for access to
64 /// different underlying pointers. Clients, such as the loop vectorizer, will
65 /// sometimes deal these potential dependencies by emitting runtime checks.
66 ///
67 /// We use the ScalarEvolution framework to symbolically evalutate access
68 /// functions pairs. Since we currently don't restructure the loop we can rely
69 /// on the program order of memory accesses to determine their safety.
70 /// At the moment we will only deem accesses as safe for:
71 /// * A negative constant distance assuming program order.
72 ///
73 /// Safe: tmp = a[i + 1]; OR a[i + 1] = x;
74 /// a[i] = tmp; y = a[i];
75 ///
76 /// The latter case is safe because later checks guarantuee that there can't
77 /// be a cycle through a phi node (that is, we check that "x" and "y" is not
78 /// the same variable: a header phi can only be an induction or a reduction, a
79 /// reduction can't have a memory sink, an induction can't have a memory
80 /// source). This is important and must not be violated (or we have to
81 /// resort to checking for cycles through memory).
82 ///
83 /// * A positive constant distance assuming program order that is bigger
84 /// than the biggest memory access.
85 ///
86 /// tmp = a[i] OR b[i] = x
87 /// a[i+2] = tmp y = b[i+2];
88 ///
89 /// Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
90 ///
91 /// * Zero distances and all accesses have the same size.
92 ///
93 class MemoryDepChecker {
94 public:
95 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
96 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
97 /// Set of potential dependent memory accesses.
98 typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
99
100 /// Dependece between memory access instructions.
101 struct Dependence {
102 /// The type of the dependence.
103 enum DepType {
104 // No dependence.
105 NoDep,
106 // We couldn't determine the direction or the distance.
107 Unknown,
108 // Lexically forward.
109 //
110 // FIXME: If we only have loop-independent forward dependences (e.g. a
111 // read and write of A[i]), LAA will locally deem the dependence "safe"
112 // without querying the MemoryDepChecker. Therefore we can miss
113 // enumerating loop-independent forward dependences in
114 // getDependences. Note that as soon as there are different
115 // indices used to access the same array, the MemoryDepChecker *is*
116 // queried and the dependence list is complete.
117 Forward,
118 // Forward, but if vectorized, is likely to prevent store-to-load
119 // forwarding.
120 ForwardButPreventsForwarding,
121 // Lexically backward.
122 Backward,
123 // Backward, but the distance allows a vectorization factor of
124 // MaxSafeDepDistBytes.
125 BackwardVectorizable,
126 // Same, but may prevent store-to-load forwarding.
127 BackwardVectorizableButPreventsForwarding
128 };
129
130 /// String version of the types.
131 static const char *DepName[];
132
133 /// Index of the source of the dependence in the InstMap vector.
134 unsigned Source;
135 /// Index of the destination of the dependence in the InstMap vector.
136 unsigned Destination;
137 /// The type of the dependence.
138 DepType Type;
139
DependenceDependence140 Dependence(unsigned Source, unsigned Destination, DepType Type)
141 : Source(Source), Destination(Destination), Type(Type) {}
142
143 /// Return the source instruction of the dependence.
144 Instruction *getSource(const LoopAccessInfo &LAI) const;
145 /// Return the destination instruction of the dependence.
146 Instruction *getDestination(const LoopAccessInfo &LAI) const;
147
148 /// Dependence types that don't prevent vectorization.
149 static bool isSafeForVectorization(DepType Type);
150
151 /// Lexically forward dependence.
152 bool isForward() const;
153 /// Lexically backward dependence.
154 bool isBackward() const;
155
156 /// May be a lexically backward dependence type (includes Unknown).
157 bool isPossiblyBackward() const;
158
159 /// Print the dependence. \p Instr is used to map the instruction
160 /// indices to instructions.
161 void print(raw_ostream &OS, unsigned Depth,
162 const SmallVectorImpl<Instruction *> &Instrs) const;
163 };
164
MemoryDepChecker(PredicatedScalarEvolution & PSE,const Loop * L)165 MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
166 : PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeRegisterWidth(-1U),
167 ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
168 RecordDependences(true) {}
169
170 /// Register the location (instructions are given increasing numbers)
171 /// of a write access.
addAccess(StoreInst * SI)172 void addAccess(StoreInst *SI) {
173 Value *Ptr = SI->getPointerOperand();
174 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
175 InstMap.push_back(SI);
176 ++AccessIdx;
177 }
178
179 /// Register the location (instructions are given increasing numbers)
180 /// of a write access.
addAccess(LoadInst * LI)181 void addAccess(LoadInst *LI) {
182 Value *Ptr = LI->getPointerOperand();
183 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
184 InstMap.push_back(LI);
185 ++AccessIdx;
186 }
187
188 /// Check whether the dependencies between the accesses are safe.
189 ///
190 /// Only checks sets with elements in \p CheckDeps.
191 bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
192 const ValueToValueMap &Strides);
193
194 /// No memory dependence was encountered that would inhibit
195 /// vectorization.
isSafeForVectorization()196 bool isSafeForVectorization() const { return SafeForVectorization; }
197
198 /// The maximum number of bytes of a vector register we can vectorize
199 /// the accesses safely with.
getMaxSafeDepDistBytes()200 uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
201
202 /// Return the number of elements that are safe to operate on
203 /// simultaneously, multiplied by the size of the element in bits.
getMaxSafeRegisterWidth()204 uint64_t getMaxSafeRegisterWidth() const { return MaxSafeRegisterWidth; }
205
206 /// In same cases when the dependency check fails we can still
207 /// vectorize the loop with a dynamic array access check.
shouldRetryWithRuntimeCheck()208 bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
209
210 /// Returns the memory dependences. If null is returned we exceeded
211 /// the MaxDependences threshold and this information is not
212 /// available.
getDependences()213 const SmallVectorImpl<Dependence> *getDependences() const {
214 return RecordDependences ? &Dependences : nullptr;
215 }
216
clearDependences()217 void clearDependences() { Dependences.clear(); }
218
219 /// The vector of memory access instructions. The indices are used as
220 /// instruction identifiers in the Dependence class.
getMemoryInstructions()221 const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
222 return InstMap;
223 }
224
225 /// Generate a mapping between the memory instructions and their
226 /// indices according to program order.
generateInstructionOrderMap()227 DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
228 DenseMap<Instruction *, unsigned> OrderMap;
229
230 for (unsigned I = 0; I < InstMap.size(); ++I)
231 OrderMap[InstMap[I]] = I;
232
233 return OrderMap;
234 }
235
236 /// Find the set of instructions that read or write via \p Ptr.
237 SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
238 bool isWrite) const;
239
240 private:
241 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
242 /// applies dynamic knowledge to simplify SCEV expressions and convert them
243 /// to a more usable form. We need this in case assumptions about SCEV
244 /// expressions need to be made in order to avoid unknown dependences. For
245 /// example we might assume a unit stride for a pointer in order to prove
246 /// that a memory access is strided and doesn't wrap.
247 PredicatedScalarEvolution &PSE;
248 const Loop *InnermostLoop;
249
250 /// Maps access locations (ptr, read/write) to program order.
251 DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
252
253 /// Memory access instructions in program order.
254 SmallVector<Instruction *, 16> InstMap;
255
256 /// The program order index to be used for the next instruction.
257 unsigned AccessIdx;
258
259 // We can access this many bytes in parallel safely.
260 uint64_t MaxSafeDepDistBytes;
261
262 /// Number of elements (from consecutive iterations) that are safe to
263 /// operate on simultaneously, multiplied by the size of the element in bits.
264 /// The size of the element is taken from the memory access that is most
265 /// restrictive.
266 uint64_t MaxSafeRegisterWidth;
267
268 /// If we see a non-constant dependence distance we can still try to
269 /// vectorize this loop with runtime checks.
270 bool ShouldRetryWithRuntimeCheck;
271
272 /// No memory dependence was encountered that would inhibit
273 /// vectorization.
274 bool SafeForVectorization;
275
276 //// True if Dependences reflects the dependences in the
277 //// loop. If false we exceeded MaxDependences and
278 //// Dependences is invalid.
279 bool RecordDependences;
280
281 /// Memory dependences collected during the analysis. Only valid if
282 /// RecordDependences is true.
283 SmallVector<Dependence, 8> Dependences;
284
285 /// Check whether there is a plausible dependence between the two
286 /// accesses.
287 ///
288 /// Access \p A must happen before \p B in program order. The two indices
289 /// identify the index into the program order map.
290 ///
291 /// This function checks whether there is a plausible dependence (or the
292 /// absence of such can't be proved) between the two accesses. If there is a
293 /// plausible dependence but the dependence distance is bigger than one
294 /// element access it records this distance in \p MaxSafeDepDistBytes (if this
295 /// distance is smaller than any other distance encountered so far).
296 /// Otherwise, this function returns true signaling a possible dependence.
297 Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
298 const MemAccessInfo &B, unsigned BIdx,
299 const ValueToValueMap &Strides);
300
301 /// Check whether the data dependence could prevent store-load
302 /// forwarding.
303 ///
304 /// \return false if we shouldn't vectorize at all or avoid larger
305 /// vectorization factors by limiting MaxSafeDepDistBytes.
306 bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
307 };
308
309 /// Holds information about the memory runtime legality checks to verify
310 /// that a group of pointers do not overlap.
311 class RuntimePointerChecking {
312 public:
313 struct PointerInfo {
314 /// Holds the pointer value that we need to check.
315 TrackingVH<Value> PointerValue;
316 /// Holds the smallest byte address accessed by the pointer throughout all
317 /// iterations of the loop.
318 const SCEV *Start;
319 /// Holds the largest byte address accessed by the pointer throughout all
320 /// iterations of the loop, plus 1.
321 const SCEV *End;
322 /// Holds the information if this pointer is used for writing to memory.
323 bool IsWritePtr;
324 /// Holds the id of the set of pointers that could be dependent because of a
325 /// shared underlying object.
326 unsigned DependencySetId;
327 /// Holds the id of the disjoint alias set to which this pointer belongs.
328 unsigned AliasSetId;
329 /// SCEV for the access.
330 const SCEV *Expr;
331
PointerInfoPointerInfo332 PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
333 bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
334 const SCEV *Expr)
335 : PointerValue(PointerValue), Start(Start), End(End),
336 IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
337 AliasSetId(AliasSetId), Expr(Expr) {}
338 };
339
RuntimePointerChecking(ScalarEvolution * SE)340 RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
341
342 /// Reset the state of the pointer runtime information.
reset()343 void reset() {
344 Need = false;
345 Pointers.clear();
346 Checks.clear();
347 }
348
349 /// Insert a pointer and calculate the start and end SCEVs.
350 /// We need \p PSE in order to compute the SCEV expression of the pointer
351 /// according to the assumptions that we've made during the analysis.
352 /// The method might also version the pointer stride according to \p Strides,
353 /// and add new predicates to \p PSE.
354 void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
355 unsigned ASId, const ValueToValueMap &Strides,
356 PredicatedScalarEvolution &PSE);
357
358 /// No run-time memory checking is necessary.
empty()359 bool empty() const { return Pointers.empty(); }
360
361 /// A grouping of pointers. A single memcheck is required between
362 /// two groups.
363 struct CheckingPtrGroup {
364 /// Create a new pointer checking group containing a single
365 /// pointer, with index \p Index in RtCheck.
CheckingPtrGroupCheckingPtrGroup366 CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
367 : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
368 Low(RtCheck.Pointers[Index].Start) {
369 Members.push_back(Index);
370 }
371
372 /// Tries to add the pointer recorded in RtCheck at index
373 /// \p Index to this pointer checking group. We can only add a pointer
374 /// to a checking group if we will still be able to get
375 /// the upper and lower bounds of the check. Returns true in case
376 /// of success, false otherwise.
377 bool addPointer(unsigned Index);
378
379 /// Constitutes the context of this pointer checking group. For each
380 /// pointer that is a member of this group we will retain the index
381 /// at which it appears in RtCheck.
382 RuntimePointerChecking &RtCheck;
383 /// The SCEV expression which represents the upper bound of all the
384 /// pointers in this group.
385 const SCEV *High;
386 /// The SCEV expression which represents the lower bound of all the
387 /// pointers in this group.
388 const SCEV *Low;
389 /// Indices of all the pointers that constitute this grouping.
390 SmallVector<unsigned, 2> Members;
391 };
392
393 /// A memcheck which made up of a pair of grouped pointers.
394 ///
395 /// These *have* to be const for now, since checks are generated from
396 /// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
397 /// function. FIXME: once check-generation is moved inside this class (after
398 /// the PtrPartition hack is removed), we could drop const.
399 typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
400 PointerCheck;
401
402 /// Generate the checks and store it. This also performs the grouping
403 /// of pointers to reduce the number of memchecks necessary.
404 void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
405 bool UseDependencies);
406
407 /// Returns the checks that generateChecks created.
getChecks()408 const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
409
410 /// Decide if we need to add a check between two groups of pointers,
411 /// according to needsChecking.
412 bool needsChecking(const CheckingPtrGroup &M,
413 const CheckingPtrGroup &N) const;
414
415 /// Returns the number of run-time checks required according to
416 /// needsChecking.
getNumberOfChecks()417 unsigned getNumberOfChecks() const { return Checks.size(); }
418
419 /// Print the list run-time memory checks necessary.
420 void print(raw_ostream &OS, unsigned Depth = 0) const;
421
422 /// Print \p Checks.
423 void printChecks(raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
424 unsigned Depth = 0) const;
425
426 /// This flag indicates if we need to add the runtime check.
427 bool Need;
428
429 /// Information about the pointers that may require checking.
430 SmallVector<PointerInfo, 2> Pointers;
431
432 /// Holds a partitioning of pointers into "check groups".
433 SmallVector<CheckingPtrGroup, 2> CheckingGroups;
434
435 /// Check if pointers are in the same partition
436 ///
437 /// \p PtrToPartition contains the partition number for pointers (-1 if the
438 /// pointer belongs to multiple partitions).
439 static bool
440 arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
441 unsigned PtrIdx1, unsigned PtrIdx2);
442
443 /// Decide whether we need to issue a run-time check for pointer at
444 /// index \p I and \p J to prove their independence.
445 bool needsChecking(unsigned I, unsigned J) const;
446
447 /// Return PointerInfo for pointer at index \p PtrIdx.
getPointerInfo(unsigned PtrIdx)448 const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
449 return Pointers[PtrIdx];
450 }
451
452 private:
453 /// Groups pointers such that a single memcheck is required
454 /// between two different groups. This will clear the CheckingGroups vector
455 /// and re-compute it. We will only group dependecies if \p UseDependencies
456 /// is true, otherwise we will create a separate group for each pointer.
457 void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
458 bool UseDependencies);
459
460 /// Generate the checks and return them.
461 SmallVector<PointerCheck, 4>
462 generateChecks() const;
463
464 /// Holds a pointer to the ScalarEvolution analysis.
465 ScalarEvolution *SE;
466
467 /// Set of run-time checks required to establish independence of
468 /// otherwise may-aliasing pointers in the loop.
469 SmallVector<PointerCheck, 4> Checks;
470 };
471
472 /// Drive the analysis of memory accesses in the loop
473 ///
474 /// This class is responsible for analyzing the memory accesses of a loop. It
475 /// collects the accesses and then its main helper the AccessAnalysis class
476 /// finds and categorizes the dependences in buildDependenceSets.
477 ///
478 /// For memory dependences that can be analyzed at compile time, it determines
479 /// whether the dependence is part of cycle inhibiting vectorization. This work
480 /// is delegated to the MemoryDepChecker class.
481 ///
482 /// For memory dependences that cannot be determined at compile time, it
483 /// generates run-time checks to prove independence. This is done by
484 /// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
485 /// RuntimePointerCheck class.
486 ///
487 /// If pointers can wrap or can't be expressed as affine AddRec expressions by
488 /// ScalarEvolution, we will generate run-time checks by emitting a
489 /// SCEVUnionPredicate.
490 ///
491 /// Checks for both memory dependences and the SCEV predicates contained in the
492 /// PSE must be emitted in order for the results of this analysis to be valid.
493 class LoopAccessInfo {
494 public:
495 LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI,
496 AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI);
497
498 /// Return true we can analyze the memory accesses in the loop and there are
499 /// no memory dependence cycles.
canVectorizeMemory()500 bool canVectorizeMemory() const { return CanVecMem; }
501
getRuntimePointerChecking()502 const RuntimePointerChecking *getRuntimePointerChecking() const {
503 return PtrRtChecking.get();
504 }
505
506 /// Number of memchecks required to prove independence of otherwise
507 /// may-alias pointers.
getNumRuntimePointerChecks()508 unsigned getNumRuntimePointerChecks() const {
509 return PtrRtChecking->getNumberOfChecks();
510 }
511
512 /// Return true if the block BB needs to be predicated in order for the loop
513 /// to be vectorized.
514 static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
515 DominatorTree *DT);
516
517 /// Returns true if the value V is uniform within the loop.
518 bool isUniform(Value *V) const;
519
getMaxSafeDepDistBytes()520 uint64_t getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; }
getNumStores()521 unsigned getNumStores() const { return NumStores; }
getNumLoads()522 unsigned getNumLoads() const { return NumLoads;}
523
524 /// Add code that checks at runtime if the accessed arrays overlap.
525 ///
526 /// Returns a pair of instructions where the first element is the first
527 /// instruction generated in possibly a sequence of instructions and the
528 /// second value is the final comparator value or NULL if no check is needed.
529 std::pair<Instruction *, Instruction *>
530 addRuntimeChecks(Instruction *Loc) const;
531
532 /// Generete the instructions for the checks in \p PointerChecks.
533 ///
534 /// Returns a pair of instructions where the first element is the first
535 /// instruction generated in possibly a sequence of instructions and the
536 /// second value is the final comparator value or NULL if no check is needed.
537 std::pair<Instruction *, Instruction *>
538 addRuntimeChecks(Instruction *Loc,
539 const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
540 &PointerChecks) const;
541
542 /// The diagnostics report generated for the analysis. E.g. why we
543 /// couldn't analyze the loop.
getReport()544 const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
545
546 /// the Memory Dependence Checker which can determine the
547 /// loop-independent and loop-carried dependences between memory accesses.
getDepChecker()548 const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
549
550 /// Return the list of instructions that use \p Ptr to read or write
551 /// memory.
getInstructionsForAccess(Value * Ptr,bool isWrite)552 SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
553 bool isWrite) const {
554 return DepChecker->getInstructionsForAccess(Ptr, isWrite);
555 }
556
557 /// If an access has a symbolic strides, this maps the pointer value to
558 /// the stride symbol.
getSymbolicStrides()559 const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
560
561 /// Pointer has a symbolic stride.
hasStride(Value * V)562 bool hasStride(Value *V) const { return StrideSet.count(V); }
563
564 /// Print the information about the memory accesses in the loop.
565 void print(raw_ostream &OS, unsigned Depth = 0) const;
566
567 /// Checks existence of store to invariant address inside loop.
568 /// If the loop has any store to invariant address, then it returns true,
569 /// else returns false.
hasStoreToLoopInvariantAddress()570 bool hasStoreToLoopInvariantAddress() const {
571 return StoreToLoopInvariantAddress;
572 }
573
574 /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
575 /// them to a more usable form. All SCEV expressions during the analysis
576 /// should be re-written (and therefore simplified) according to PSE.
577 /// A user of LoopAccessAnalysis will need to emit the runtime checks
578 /// associated with this predicate.
getPSE()579 const PredicatedScalarEvolution &getPSE() const { return *PSE; }
580
581 private:
582 /// Analyze the loop.
583 void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
584 const TargetLibraryInfo *TLI, DominatorTree *DT);
585
586 /// Check if the structure of the loop allows it to be analyzed by this
587 /// pass.
588 bool canAnalyzeLoop();
589
590 /// Save the analysis remark.
591 ///
592 /// LAA does not directly emits the remarks. Instead it stores it which the
593 /// client can retrieve and presents as its own analysis
594 /// (e.g. -Rpass-analysis=loop-vectorize).
595 OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
596 Instruction *Instr = nullptr);
597
598 /// Collect memory access with loop invariant strides.
599 ///
600 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
601 /// invariant.
602 void collectStridedAccess(Value *LoadOrStoreInst);
603
604 std::unique_ptr<PredicatedScalarEvolution> PSE;
605
606 /// We need to check that all of the pointers in this list are disjoint
607 /// at runtime. Using std::unique_ptr to make using move ctor simpler.
608 std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
609
610 /// the Memory Dependence Checker which can determine the
611 /// loop-independent and loop-carried dependences between memory accesses.
612 std::unique_ptr<MemoryDepChecker> DepChecker;
613
614 Loop *TheLoop;
615
616 unsigned NumLoads;
617 unsigned NumStores;
618
619 uint64_t MaxSafeDepDistBytes;
620
621 /// Cache the result of analyzeLoop.
622 bool CanVecMem;
623
624 /// Indicator for storing to uniform addresses.
625 /// If a loop has write to a loop invariant address then it should be true.
626 bool StoreToLoopInvariantAddress;
627
628 /// The diagnostics report generated for the analysis. E.g. why we
629 /// couldn't analyze the loop.
630 std::unique_ptr<OptimizationRemarkAnalysis> Report;
631
632 /// If an access has a symbolic strides, this maps the pointer value to
633 /// the stride symbol.
634 ValueToValueMap SymbolicStrides;
635
636 /// Set of symbolic strides values.
637 SmallPtrSet<Value *, 8> StrideSet;
638 };
639
640 Value *stripIntegerCast(Value *V);
641
642 /// Return the SCEV corresponding to a pointer with the symbolic stride
643 /// replaced with constant one, assuming the SCEV predicate associated with
644 /// \p PSE is true.
645 ///
646 /// If necessary this method will version the stride of the pointer according
647 /// to \p PtrToStride and therefore add further predicates to \p PSE.
648 ///
649 /// If \p OrigPtr is not null, use it to look up the stride value instead of \p
650 /// Ptr. \p PtrToStride provides the mapping between the pointer value and its
651 /// stride as collected by LoopVectorizationLegality::collectStridedAccess.
652 const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
653 const ValueToValueMap &PtrToStride,
654 Value *Ptr, Value *OrigPtr = nullptr);
655
656 /// If the pointer has a constant stride return it in units of its
657 /// element size. Otherwise return zero.
658 ///
659 /// Ensure that it does not wrap in the address space, assuming the predicate
660 /// associated with \p PSE is true.
661 ///
662 /// If necessary this method will version the stride of the pointer according
663 /// to \p PtrToStride and therefore add further predicates to \p PSE.
664 /// The \p Assume parameter indicates if we are allowed to make additional
665 /// run-time assumptions.
666 int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
667 const ValueToValueMap &StridesMap = ValueToValueMap(),
668 bool Assume = false, bool ShouldCheckWrap = true);
669
670 /// Attempt to sort the pointers in \p VL and return the sorted indices
671 /// in \p SortedIndices, if reordering is required.
672 ///
673 /// Returns 'true' if sorting is legal, otherwise returns 'false'.
674 ///
675 /// For example, for a given \p VL of memory accesses in program order, a[i+4],
676 /// a[i+0], a[i+1] and a[i+7], this function will sort the \p VL and save the
677 /// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
678 /// saves the mask for actual memory accesses in program order in
679 /// \p SortedIndices as <1,2,0,3>
680 bool sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
681 ScalarEvolution &SE,
682 SmallVectorImpl<unsigned> &SortedIndices);
683
684 /// Returns true if the memory operations \p A and \p B are consecutive.
685 /// This is a simple API that does not depend on the analysis pass.
686 bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
687 ScalarEvolution &SE, bool CheckType = true);
688
689 /// This analysis provides dependence information for the memory accesses
690 /// of a loop.
691 ///
692 /// It runs the analysis for a loop on demand. This can be initiated by
693 /// querying the loop access info via LAA::getInfo. getInfo return a
694 /// LoopAccessInfo object. See this class for the specifics of what information
695 /// is provided.
696 class LoopAccessLegacyAnalysis : public FunctionPass {
697 public:
698 static char ID;
699
LoopAccessLegacyAnalysis()700 LoopAccessLegacyAnalysis() : FunctionPass(ID) {
701 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
702 }
703
704 bool runOnFunction(Function &F) override;
705
706 void getAnalysisUsage(AnalysisUsage &AU) const override;
707
708 /// Query the result of the loop access information for the loop \p L.
709 ///
710 /// If there is no cached result available run the analysis.
711 const LoopAccessInfo &getInfo(Loop *L);
712
releaseMemory()713 void releaseMemory() override {
714 // Invalidate the cache when the pass is freed.
715 LoopAccessInfoMap.clear();
716 }
717
718 /// Print the result of the analysis when invoked with -analyze.
719 void print(raw_ostream &OS, const Module *M = nullptr) const override;
720
721 private:
722 /// The cache.
723 DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
724
725 // The used analysis passes.
726 ScalarEvolution *SE;
727 const TargetLibraryInfo *TLI;
728 AliasAnalysis *AA;
729 DominatorTree *DT;
730 LoopInfo *LI;
731 };
732
733 /// This analysis provides dependence information for the memory
734 /// accesses of a loop.
735 ///
736 /// It runs the analysis for a loop on demand. This can be initiated by
737 /// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
738 /// getResult return a LoopAccessInfo object. See this class for the
739 /// specifics of what information is provided.
740 class LoopAccessAnalysis
741 : public AnalysisInfoMixin<LoopAccessAnalysis> {
742 friend AnalysisInfoMixin<LoopAccessAnalysis>;
743 static AnalysisKey Key;
744
745 public:
746 typedef LoopAccessInfo Result;
747
748 Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
749 };
750
getSource(const LoopAccessInfo & LAI)751 inline Instruction *MemoryDepChecker::Dependence::getSource(
752 const LoopAccessInfo &LAI) const {
753 return LAI.getDepChecker().getMemoryInstructions()[Source];
754 }
755
getDestination(const LoopAccessInfo & LAI)756 inline Instruction *MemoryDepChecker::Dependence::getDestination(
757 const LoopAccessInfo &LAI) const {
758 return LAI.getDepChecker().getMemoryInstructions()[Destination];
759 }
760
761 } // End llvm namespace
762
763 #endif
764