1 //===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implement a loop-aware load elimination pass.
10 //
11 // It uses LoopAccessAnalysis to identify loop-carried dependences with a
12 // distance of one between stores and loads. These form the candidates for the
13 // transformation. The source value of each store then propagated to the user
14 // of the corresponding load. This makes the load dead.
15 //
16 // The pass can also version the loop and add memchecks in order to prove that
17 // may-aliasing stores can't change the value in memory before it's read by the
18 // load.
19 //
20 //===----------------------------------------------------------------------===//
21
22 #include "llvm/Transforms/Scalar/LoopLoadElimination.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DepthFirstIterator.h"
26 #include "llvm/ADT/STLExtras.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Analysis/AliasAnalysis.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/BlockFrequencyInfo.h"
33 #include "llvm/Analysis/GlobalsModRef.h"
34 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
35 #include "llvm/Analysis/LoopAccessAnalysis.h"
36 #include "llvm/Analysis/LoopAnalysisManager.h"
37 #include "llvm/Analysis/LoopInfo.h"
38 #include "llvm/Analysis/MemorySSA.h"
39 #include "llvm/Analysis/ProfileSummaryInfo.h"
40 #include "llvm/Analysis/ScalarEvolution.h"
41 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
42 #include "llvm/Analysis/TargetLibraryInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/Module.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Transforms/Utils.h"
59 #include "llvm/Transforms/Utils/LoopVersioning.h"
60 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
61 #include "llvm/Transforms/Utils/SizeOpts.h"
62 #include <algorithm>
63 #include <cassert>
64 #include <forward_list>
65 #include <set>
66 #include <tuple>
67 #include <utility>
68
69 using namespace llvm;
70
71 #define LLE_OPTION "loop-load-elim"
72 #define DEBUG_TYPE LLE_OPTION
73
74 static cl::opt<unsigned> CheckPerElim(
75 "runtime-check-per-loop-load-elim", cl::Hidden,
76 cl::desc("Max number of memchecks allowed per eliminated load on average"),
77 cl::init(1));
78
79 static cl::opt<unsigned> LoadElimSCEVCheckThreshold(
80 "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden,
81 cl::desc("The maximum number of SCEV checks allowed for Loop "
82 "Load Elimination"));
83
84 STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE");
85
86 namespace {
87
88 /// Represent a store-to-forwarding candidate.
89 struct StoreToLoadForwardingCandidate {
90 LoadInst *Load;
91 StoreInst *Store;
92
StoreToLoadForwardingCandidate__anon2a6168f40111::StoreToLoadForwardingCandidate93 StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store)
94 : Load(Load), Store(Store) {}
95
96 /// Return true if the dependence from the store to the load has a
97 /// distance of one. E.g. A[i+1] = A[i]
isDependenceDistanceOfOne__anon2a6168f40111::StoreToLoadForwardingCandidate98 bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE,
99 Loop *L) const {
100 Value *LoadPtr = Load->getPointerOperand();
101 Value *StorePtr = Store->getPointerOperand();
102 Type *LoadPtrType = LoadPtr->getType();
103 Type *LoadType = LoadPtrType->getPointerElementType();
104
105 assert(LoadPtrType->getPointerAddressSpace() ==
106 StorePtr->getType()->getPointerAddressSpace() &&
107 LoadType == StorePtr->getType()->getPointerElementType() &&
108 "Should be a known dependence");
109
110 // Currently we only support accesses with unit stride. FIXME: we should be
111 // able to handle non unit stirde as well as long as the stride is equal to
112 // the dependence distance.
113 if (getPtrStride(PSE, LoadPtr, L) != 1 ||
114 getPtrStride(PSE, StorePtr, L) != 1)
115 return false;
116
117 auto &DL = Load->getParent()->getModule()->getDataLayout();
118 unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType));
119
120 auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr));
121 auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr));
122
123 // We don't need to check non-wrapping here because forward/backward
124 // dependence wouldn't be valid if these weren't monotonic accesses.
125 auto *Dist = cast<SCEVConstant>(
126 PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV));
127 const APInt &Val = Dist->getAPInt();
128 return Val == TypeByteSize;
129 }
130
getLoadPtr__anon2a6168f40111::StoreToLoadForwardingCandidate131 Value *getLoadPtr() const { return Load->getPointerOperand(); }
132
133 #ifndef NDEBUG
operator <<(raw_ostream & OS,const StoreToLoadForwardingCandidate & Cand)134 friend raw_ostream &operator<<(raw_ostream &OS,
135 const StoreToLoadForwardingCandidate &Cand) {
136 OS << *Cand.Store << " -->\n";
137 OS.indent(2) << *Cand.Load << "\n";
138 return OS;
139 }
140 #endif
141 };
142
143 } // end anonymous namespace
144
145 /// Check if the store dominates all latches, so as long as there is no
146 /// intervening store this value will be loaded in the next iteration.
doesStoreDominatesAllLatches(BasicBlock * StoreBlock,Loop * L,DominatorTree * DT)147 static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
148 DominatorTree *DT) {
149 SmallVector<BasicBlock *, 8> Latches;
150 L->getLoopLatches(Latches);
151 return llvm::all_of(Latches, [&](const BasicBlock *Latch) {
152 return DT->dominates(StoreBlock, Latch);
153 });
154 }
155
156 /// Return true if the load is not executed on all paths in the loop.
isLoadConditional(LoadInst * Load,Loop * L)157 static bool isLoadConditional(LoadInst *Load, Loop *L) {
158 return Load->getParent() != L->getHeader();
159 }
160
161 namespace {
162
163 /// The per-loop class that does most of the work.
164 class LoadEliminationForLoop {
165 public:
LoadEliminationForLoop(Loop * L,LoopInfo * LI,const LoopAccessInfo & LAI,DominatorTree * DT,BlockFrequencyInfo * BFI,ProfileSummaryInfo * PSI)166 LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI,
167 DominatorTree *DT, BlockFrequencyInfo *BFI,
168 ProfileSummaryInfo* PSI)
169 : L(L), LI(LI), LAI(LAI), DT(DT), BFI(BFI), PSI(PSI), PSE(LAI.getPSE()) {}
170
171 /// Look through the loop-carried and loop-independent dependences in
172 /// this loop and find store->load dependences.
173 ///
174 /// Note that no candidate is returned if LAA has failed to analyze the loop
175 /// (e.g. if it's not bottom-tested, contains volatile memops, etc.)
176 std::forward_list<StoreToLoadForwardingCandidate>
findStoreToLoadDependences(const LoopAccessInfo & LAI)177 findStoreToLoadDependences(const LoopAccessInfo &LAI) {
178 std::forward_list<StoreToLoadForwardingCandidate> Candidates;
179
180 const auto *Deps = LAI.getDepChecker().getDependences();
181 if (!Deps)
182 return Candidates;
183
184 // Find store->load dependences (consequently true dep). Both lexically
185 // forward and backward dependences qualify. Disqualify loads that have
186 // other unknown dependences.
187
188 SmallPtrSet<Instruction *, 4> LoadsWithUnknownDepedence;
189
190 for (const auto &Dep : *Deps) {
191 Instruction *Source = Dep.getSource(LAI);
192 Instruction *Destination = Dep.getDestination(LAI);
193
194 if (Dep.Type == MemoryDepChecker::Dependence::Unknown) {
195 if (isa<LoadInst>(Source))
196 LoadsWithUnknownDepedence.insert(Source);
197 if (isa<LoadInst>(Destination))
198 LoadsWithUnknownDepedence.insert(Destination);
199 continue;
200 }
201
202 if (Dep.isBackward())
203 // Note that the designations source and destination follow the program
204 // order, i.e. source is always first. (The direction is given by the
205 // DepType.)
206 std::swap(Source, Destination);
207 else
208 assert(Dep.isForward() && "Needs to be a forward dependence");
209
210 auto *Store = dyn_cast<StoreInst>(Source);
211 if (!Store)
212 continue;
213 auto *Load = dyn_cast<LoadInst>(Destination);
214 if (!Load)
215 continue;
216
217 // Only progagate the value if they are of the same type.
218 if (Store->getPointerOperandType() != Load->getPointerOperandType())
219 continue;
220
221 Candidates.emplace_front(Load, Store);
222 }
223
224 if (!LoadsWithUnknownDepedence.empty())
225 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) {
226 return LoadsWithUnknownDepedence.count(C.Load);
227 });
228
229 return Candidates;
230 }
231
232 /// Return the index of the instruction according to program order.
getInstrIndex(Instruction * Inst)233 unsigned getInstrIndex(Instruction *Inst) {
234 auto I = InstOrder.find(Inst);
235 assert(I != InstOrder.end() && "No index for instruction");
236 return I->second;
237 }
238
239 /// If a load has multiple candidates associated (i.e. different
240 /// stores), it means that it could be forwarding from multiple stores
241 /// depending on control flow. Remove these candidates.
242 ///
243 /// Here, we rely on LAA to include the relevant loop-independent dependences.
244 /// LAA is known to omit these in the very simple case when the read and the
245 /// write within an alias set always takes place using the *same* pointer.
246 ///
247 /// However, we know that this is not the case here, i.e. we can rely on LAA
248 /// to provide us with loop-independent dependences for the cases we're
249 /// interested. Consider the case for example where a loop-independent
250 /// dependece S1->S2 invalidates the forwarding S3->S2.
251 ///
252 /// A[i] = ... (S1)
253 /// ... = A[i] (S2)
254 /// A[i+1] = ... (S3)
255 ///
256 /// LAA will perform dependence analysis here because there are two
257 /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]).
removeDependencesFromMultipleStores(std::forward_list<StoreToLoadForwardingCandidate> & Candidates)258 void removeDependencesFromMultipleStores(
259 std::forward_list<StoreToLoadForwardingCandidate> &Candidates) {
260 // If Store is nullptr it means that we have multiple stores forwarding to
261 // this store.
262 using LoadToSingleCandT =
263 DenseMap<LoadInst *, const StoreToLoadForwardingCandidate *>;
264 LoadToSingleCandT LoadToSingleCand;
265
266 for (const auto &Cand : Candidates) {
267 bool NewElt;
268 LoadToSingleCandT::iterator Iter;
269
270 std::tie(Iter, NewElt) =
271 LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand));
272 if (!NewElt) {
273 const StoreToLoadForwardingCandidate *&OtherCand = Iter->second;
274 // Already multiple stores forward to this load.
275 if (OtherCand == nullptr)
276 continue;
277
278 // Handle the very basic case when the two stores are in the same block
279 // so deciding which one forwards is easy. The later one forwards as
280 // long as they both have a dependence distance of one to the load.
281 if (Cand.Store->getParent() == OtherCand->Store->getParent() &&
282 Cand.isDependenceDistanceOfOne(PSE, L) &&
283 OtherCand->isDependenceDistanceOfOne(PSE, L)) {
284 // They are in the same block, the later one will forward to the load.
285 if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store))
286 OtherCand = &Cand;
287 } else
288 OtherCand = nullptr;
289 }
290 }
291
292 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) {
293 if (LoadToSingleCand[Cand.Load] != &Cand) {
294 LLVM_DEBUG(
295 dbgs() << "Removing from candidates: \n"
296 << Cand
297 << " The load may have multiple stores forwarding to "
298 << "it\n");
299 return true;
300 }
301 return false;
302 });
303 }
304
305 /// Given two pointers operations by their RuntimePointerChecking
306 /// indices, return true if they require an alias check.
307 ///
308 /// We need a check if one is a pointer for a candidate load and the other is
309 /// a pointer for a possibly intervening store.
needsChecking(unsigned PtrIdx1,unsigned PtrIdx2,const SmallPtrSet<Value *,4> & PtrsWrittenOnFwdingPath,const std::set<Value * > & CandLoadPtrs)310 bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2,
311 const SmallPtrSet<Value *, 4> &PtrsWrittenOnFwdingPath,
312 const std::set<Value *> &CandLoadPtrs) {
313 Value *Ptr1 =
314 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx1).PointerValue;
315 Value *Ptr2 =
316 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx2).PointerValue;
317 return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) ||
318 (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1)));
319 }
320
321 /// Return pointers that are possibly written to on the path from a
322 /// forwarding store to a load.
323 ///
324 /// These pointers need to be alias-checked against the forwarding candidates.
findPointersWrittenOnForwardingPath(const SmallVectorImpl<StoreToLoadForwardingCandidate> & Candidates)325 SmallPtrSet<Value *, 4> findPointersWrittenOnForwardingPath(
326 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
327 // From FirstStore to LastLoad neither of the elimination candidate loads
328 // should overlap with any of the stores.
329 //
330 // E.g.:
331 //
332 // st1 C[i]
333 // ld1 B[i] <-------,
334 // ld0 A[i] <----, | * LastLoad
335 // ... | |
336 // st2 E[i] | |
337 // st3 B[i+1] -- | -' * FirstStore
338 // st0 A[i+1] ---'
339 // st4 D[i]
340 //
341 // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with
342 // ld0.
343
344 LoadInst *LastLoad =
345 std::max_element(Candidates.begin(), Candidates.end(),
346 [&](const StoreToLoadForwardingCandidate &A,
347 const StoreToLoadForwardingCandidate &B) {
348 return getInstrIndex(A.Load) < getInstrIndex(B.Load);
349 })
350 ->Load;
351 StoreInst *FirstStore =
352 std::min_element(Candidates.begin(), Candidates.end(),
353 [&](const StoreToLoadForwardingCandidate &A,
354 const StoreToLoadForwardingCandidate &B) {
355 return getInstrIndex(A.Store) <
356 getInstrIndex(B.Store);
357 })
358 ->Store;
359
360 // We're looking for stores after the first forwarding store until the end
361 // of the loop, then from the beginning of the loop until the last
362 // forwarded-to load. Collect the pointer for the stores.
363 SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath;
364
365 auto InsertStorePtr = [&](Instruction *I) {
366 if (auto *S = dyn_cast<StoreInst>(I))
367 PtrsWrittenOnFwdingPath.insert(S->getPointerOperand());
368 };
369 const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions();
370 std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1,
371 MemInstrs.end(), InsertStorePtr);
372 std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)],
373 InsertStorePtr);
374
375 return PtrsWrittenOnFwdingPath;
376 }
377
378 /// Determine the pointer alias checks to prove that there are no
379 /// intervening stores.
collectMemchecks(const SmallVectorImpl<StoreToLoadForwardingCandidate> & Candidates)380 SmallVector<RuntimePointerCheck, 4> collectMemchecks(
381 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
382
383 SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath =
384 findPointersWrittenOnForwardingPath(Candidates);
385
386 // Collect the pointers of the candidate loads.
387 // FIXME: SmallPtrSet does not work with std::inserter.
388 std::set<Value *> CandLoadPtrs;
389 transform(Candidates,
390 std::inserter(CandLoadPtrs, CandLoadPtrs.begin()),
391 std::mem_fn(&StoreToLoadForwardingCandidate::getLoadPtr));
392
393 const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks();
394 SmallVector<RuntimePointerCheck, 4> Checks;
395
396 copy_if(AllChecks, std::back_inserter(Checks),
397 [&](const RuntimePointerCheck &Check) {
398 for (auto PtrIdx1 : Check.first->Members)
399 for (auto PtrIdx2 : Check.second->Members)
400 if (needsChecking(PtrIdx1, PtrIdx2, PtrsWrittenOnFwdingPath,
401 CandLoadPtrs))
402 return true;
403 return false;
404 });
405
406 LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size()
407 << "):\n");
408 LLVM_DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
409
410 return Checks;
411 }
412
413 /// Perform the transformation for a candidate.
414 void
propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate & Cand,SCEVExpander & SEE)415 propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand,
416 SCEVExpander &SEE) {
417 // loop:
418 // %x = load %gep_i
419 // = ... %x
420 // store %y, %gep_i_plus_1
421 //
422 // =>
423 //
424 // ph:
425 // %x.initial = load %gep_0
426 // loop:
427 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
428 // %x = load %gep_i <---- now dead
429 // = ... %x.storeforward
430 // store %y, %gep_i_plus_1
431
432 Value *Ptr = Cand.Load->getPointerOperand();
433 auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr));
434 auto *PH = L->getLoopPreheader();
435 assert(PH && "Preheader should exist!");
436 Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
437 PH->getTerminator());
438 Value *Initial = new LoadInst(
439 Cand.Load->getType(), InitialPtr, "load_initial",
440 /* isVolatile */ false, Cand.Load->getAlign(), PH->getTerminator());
441
442 PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
443 &L->getHeader()->front());
444 PHI->addIncoming(Initial, PH);
445 PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch());
446
447 Cand.Load->replaceAllUsesWith(PHI);
448 }
449
450 /// Top-level driver for each loop: find store->load forwarding
451 /// candidates, add run-time checks and perform transformation.
processLoop()452 bool processLoop() {
453 LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
454 << "\" checking " << *L << "\n");
455
456 // Look for store-to-load forwarding cases across the
457 // backedge. E.g.:
458 //
459 // loop:
460 // %x = load %gep_i
461 // = ... %x
462 // store %y, %gep_i_plus_1
463 //
464 // =>
465 //
466 // ph:
467 // %x.initial = load %gep_0
468 // loop:
469 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
470 // %x = load %gep_i <---- now dead
471 // = ... %x.storeforward
472 // store %y, %gep_i_plus_1
473
474 // First start with store->load dependences.
475 auto StoreToLoadDependences = findStoreToLoadDependences(LAI);
476 if (StoreToLoadDependences.empty())
477 return false;
478
479 // Generate an index for each load and store according to the original
480 // program order. This will be used later.
481 InstOrder = LAI.getDepChecker().generateInstructionOrderMap();
482
483 // To keep things simple for now, remove those where the load is potentially
484 // fed by multiple stores.
485 removeDependencesFromMultipleStores(StoreToLoadDependences);
486 if (StoreToLoadDependences.empty())
487 return false;
488
489 // Filter the candidates further.
490 SmallVector<StoreToLoadForwardingCandidate, 4> Candidates;
491 unsigned NumForwarding = 0;
492 for (const StoreToLoadForwardingCandidate &Cand : StoreToLoadDependences) {
493 LLVM_DEBUG(dbgs() << "Candidate " << Cand);
494
495 // Make sure that the stored values is available everywhere in the loop in
496 // the next iteration.
497 if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT))
498 continue;
499
500 // If the load is conditional we can't hoist its 0-iteration instance to
501 // the preheader because that would make it unconditional. Thus we would
502 // access a memory location that the original loop did not access.
503 if (isLoadConditional(Cand.Load, L))
504 continue;
505
506 // Check whether the SCEV difference is the same as the induction step,
507 // thus we load the value in the next iteration.
508 if (!Cand.isDependenceDistanceOfOne(PSE, L))
509 continue;
510
511 ++NumForwarding;
512 LLVM_DEBUG(
513 dbgs()
514 << NumForwarding
515 << ". Valid store-to-load forwarding across the loop backedge\n");
516 Candidates.push_back(Cand);
517 }
518 if (Candidates.empty())
519 return false;
520
521 // Check intervening may-alias stores. These need runtime checks for alias
522 // disambiguation.
523 SmallVector<RuntimePointerCheck, 4> Checks = collectMemchecks(Candidates);
524
525 // Too many checks are likely to outweigh the benefits of forwarding.
526 if (Checks.size() > Candidates.size() * CheckPerElim) {
527 LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n");
528 return false;
529 }
530
531 if (LAI.getPSE().getUnionPredicate().getComplexity() >
532 LoadElimSCEVCheckThreshold) {
533 LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
534 return false;
535 }
536
537 if (!L->isLoopSimplifyForm()) {
538 LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form");
539 return false;
540 }
541
542 if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) {
543 if (LAI.hasConvergentOp()) {
544 LLVM_DEBUG(dbgs() << "Versioning is needed but not allowed with "
545 "convergent calls\n");
546 return false;
547 }
548
549 auto *HeaderBB = L->getHeader();
550 auto *F = HeaderBB->getParent();
551 bool OptForSize = F->hasOptSize() ||
552 llvm::shouldOptimizeForSize(HeaderBB, PSI, BFI,
553 PGSOQueryType::IRPass);
554 if (OptForSize) {
555 LLVM_DEBUG(
556 dbgs() << "Versioning is needed but not allowed when optimizing "
557 "for size.\n");
558 return false;
559 }
560
561 // Point of no-return, start the transformation. First, version the loop
562 // if necessary.
563
564 LoopVersioning LV(LAI, L, LI, DT, PSE.getSE(), false);
565 LV.setAliasChecks(std::move(Checks));
566 LV.setSCEVChecks(LAI.getPSE().getUnionPredicate());
567 LV.versionLoop();
568 }
569
570 // Next, propagate the value stored by the store to the users of the load.
571 // Also for the first iteration, generate the initial value of the load.
572 SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(),
573 "storeforward");
574 for (const auto &Cand : Candidates)
575 propagateStoredValueToLoadUsers(Cand, SEE);
576 NumLoopLoadEliminted += NumForwarding;
577
578 return true;
579 }
580
581 private:
582 Loop *L;
583
584 /// Maps the load/store instructions to their index according to
585 /// program order.
586 DenseMap<Instruction *, unsigned> InstOrder;
587
588 // Analyses used.
589 LoopInfo *LI;
590 const LoopAccessInfo &LAI;
591 DominatorTree *DT;
592 BlockFrequencyInfo *BFI;
593 ProfileSummaryInfo *PSI;
594 PredicatedScalarEvolution PSE;
595 };
596
597 } // end anonymous namespace
598
599 static bool
eliminateLoadsAcrossLoops(Function & F,LoopInfo & LI,DominatorTree & DT,BlockFrequencyInfo * BFI,ProfileSummaryInfo * PSI,function_ref<const LoopAccessInfo & (Loop &)> GetLAI)600 eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT,
601 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
602 function_ref<const LoopAccessInfo &(Loop &)> GetLAI) {
603 // Build up a worklist of inner-loops to transform to avoid iterator
604 // invalidation.
605 // FIXME: This logic comes from other passes that actually change the loop
606 // nest structure. It isn't clear this is necessary (or useful) for a pass
607 // which merely optimizes the use of loads in a loop.
608 SmallVector<Loop *, 8> Worklist;
609
610 for (Loop *TopLevelLoop : LI)
611 for (Loop *L : depth_first(TopLevelLoop))
612 // We only handle inner-most loops.
613 if (L->empty())
614 Worklist.push_back(L);
615
616 // Now walk the identified inner loops.
617 bool Changed = false;
618 for (Loop *L : Worklist) {
619 // The actual work is performed by LoadEliminationForLoop.
620 LoadEliminationForLoop LEL(L, &LI, GetLAI(*L), &DT, BFI, PSI);
621 Changed |= LEL.processLoop();
622 }
623 return Changed;
624 }
625
626 namespace {
627
628 /// The pass. Most of the work is delegated to the per-loop
629 /// LoadEliminationForLoop class.
630 class LoopLoadElimination : public FunctionPass {
631 public:
632 static char ID;
633
LoopLoadElimination()634 LoopLoadElimination() : FunctionPass(ID) {
635 initializeLoopLoadEliminationPass(*PassRegistry::getPassRegistry());
636 }
637
runOnFunction(Function & F)638 bool runOnFunction(Function &F) override {
639 if (skipFunction(F))
640 return false;
641
642 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
643 auto &LAA = getAnalysis<LoopAccessLegacyAnalysis>();
644 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
645 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
646 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
647 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
648 nullptr;
649
650 // Process each loop nest in the function.
651 return eliminateLoadsAcrossLoops(
652 F, LI, DT, BFI, PSI,
653 [&LAA](Loop &L) -> const LoopAccessInfo & { return LAA.getInfo(&L); });
654 }
655
getAnalysisUsage(AnalysisUsage & AU) const656 void getAnalysisUsage(AnalysisUsage &AU) const override {
657 AU.addRequiredID(LoopSimplifyID);
658 AU.addRequired<LoopInfoWrapperPass>();
659 AU.addPreserved<LoopInfoWrapperPass>();
660 AU.addRequired<LoopAccessLegacyAnalysis>();
661 AU.addRequired<ScalarEvolutionWrapperPass>();
662 AU.addRequired<DominatorTreeWrapperPass>();
663 AU.addPreserved<DominatorTreeWrapperPass>();
664 AU.addPreserved<GlobalsAAWrapperPass>();
665 AU.addRequired<ProfileSummaryInfoWrapperPass>();
666 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
667 }
668 };
669
670 } // end anonymous namespace
671
672 char LoopLoadElimination::ID;
673
674 static const char LLE_name[] = "Loop Load Elimination";
675
INITIALIZE_PASS_BEGIN(LoopLoadElimination,LLE_OPTION,LLE_name,false,false)676 INITIALIZE_PASS_BEGIN(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
677 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
678 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
679 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
680 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
681 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
682 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
683 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
684 INITIALIZE_PASS_END(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
685
686 FunctionPass *llvm::createLoopLoadEliminationPass() {
687 return new LoopLoadElimination();
688 }
689
run(Function & F,FunctionAnalysisManager & AM)690 PreservedAnalyses LoopLoadEliminationPass::run(Function &F,
691 FunctionAnalysisManager &AM) {
692 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
693 auto &LI = AM.getResult<LoopAnalysis>(F);
694 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
695 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
696 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
697 auto &AA = AM.getResult<AAManager>(F);
698 auto &AC = AM.getResult<AssumptionAnalysis>(F);
699 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
700 auto *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
701 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
702 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
703 MemorySSA *MSSA = EnableMSSALoopDependency
704 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA()
705 : nullptr;
706
707 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
708 bool Changed = eliminateLoadsAcrossLoops(
709 F, LI, DT, BFI, PSI, [&](Loop &L) -> const LoopAccessInfo & {
710 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA};
711 return LAM.getResult<LoopAccessAnalysis>(L, AR);
712 });
713
714 if (!Changed)
715 return PreservedAnalyses::all();
716
717 PreservedAnalyses PA;
718 return PA;
719 }
720