1 //===-- LoopPredication.cpp - Guard based loop predication pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The LoopPredication pass tries to convert loop variant range checks to loop
10 // invariant by widening checks across loop iterations. For example, it will
11 // convert
12 //
13 // for (i = 0; i < n; i++) {
14 // guard(i < len);
15 // ...
16 // }
17 //
18 // to
19 //
20 // for (i = 0; i < n; i++) {
21 // guard(n - 1 < len);
22 // ...
23 // }
24 //
25 // After this transformation the condition of the guard is loop invariant, so
26 // loop-unswitch can later unswitch the loop by this condition which basically
27 // predicates the loop by the widened condition:
28 //
29 // if (n - 1 < len)
30 // for (i = 0; i < n; i++) {
31 // ...
32 // }
33 // else
34 // deoptimize
35 //
36 // It's tempting to rely on SCEV here, but it has proven to be problematic.
37 // Generally the facts SCEV provides about the increment step of add
38 // recurrences are true if the backedge of the loop is taken, which implicitly
39 // assumes that the guard doesn't fail. Using these facts to optimize the
40 // guard results in a circular logic where the guard is optimized under the
41 // assumption that it never fails.
42 //
43 // For example, in the loop below the induction variable will be marked as nuw
44 // basing on the guard. Basing on nuw the guard predicate will be considered
45 // monotonic. Given a monotonic condition it's tempting to replace the induction
46 // variable in the condition with its value on the last iteration. But this
47 // transformation is not correct, e.g. e = 4, b = 5 breaks the loop.
48 //
49 // for (int i = b; i != e; i++)
50 // guard(i u< len)
51 //
52 // One of the ways to reason about this problem is to use an inductive proof
53 // approach. Given the loop:
54 //
55 // if (B(0)) {
56 // do {
57 // I = PHI(0, I.INC)
58 // I.INC = I + Step
59 // guard(G(I));
60 // } while (B(I));
61 // }
62 //
63 // where B(x) and G(x) are predicates that map integers to booleans, we want a
64 // loop invariant expression M such the following program has the same semantics
65 // as the above:
66 //
67 // if (B(0)) {
68 // do {
69 // I = PHI(0, I.INC)
70 // I.INC = I + Step
71 // guard(G(0) && M);
72 // } while (B(I));
73 // }
74 //
75 // One solution for M is M = forall X . (G(X) && B(X)) => G(X + Step)
76 //
77 // Informal proof that the transformation above is correct:
78 //
79 // By the definition of guards we can rewrite the guard condition to:
80 // G(I) && G(0) && M
81 //
82 // Let's prove that for each iteration of the loop:
83 // G(0) && M => G(I)
84 // And the condition above can be simplified to G(Start) && M.
85 //
86 // Induction base.
87 // G(0) && M => G(0)
88 //
89 // Induction step. Assuming G(0) && M => G(I) on the subsequent
90 // iteration:
91 //
92 // B(I) is true because it's the backedge condition.
93 // G(I) is true because the backedge is guarded by this condition.
94 //
95 // So M = forall X . (G(X) && B(X)) => G(X + Step) implies G(I + Step).
96 //
97 // Note that we can use anything stronger than M, i.e. any condition which
98 // implies M.
99 //
100 // When S = 1 (i.e. forward iterating loop), the transformation is supported
101 // when:
102 // * The loop has a single latch with the condition of the form:
103 // B(X) = latchStart + X <pred> latchLimit,
104 // where <pred> is u<, u<=, s<, or s<=.
105 // * The guard condition is of the form
106 // G(X) = guardStart + X u< guardLimit
107 //
108 // For the ult latch comparison case M is:
109 // forall X . guardStart + X u< guardLimit && latchStart + X <u latchLimit =>
110 // guardStart + X + 1 u< guardLimit
111 //
112 // The only way the antecedent can be true and the consequent can be false is
113 // if
114 // X == guardLimit - 1 - guardStart
115 // (and guardLimit is non-zero, but we won't use this latter fact).
116 // If X == guardLimit - 1 - guardStart then the second half of the antecedent is
117 // latchStart + guardLimit - 1 - guardStart u< latchLimit
118 // and its negation is
119 // latchStart + guardLimit - 1 - guardStart u>= latchLimit
120 //
121 // In other words, if
122 // latchLimit u<= latchStart + guardLimit - 1 - guardStart
123 // then:
124 // (the ranges below are written in ConstantRange notation, where [A, B) is the
125 // set for (I = A; I != B; I++ /*maywrap*/) yield(I);)
126 //
127 // forall X . guardStart + X u< guardLimit &&
128 // latchStart + X u< latchLimit =>
129 // guardStart + X + 1 u< guardLimit
130 // == forall X . guardStart + X u< guardLimit &&
131 // latchStart + X u< latchStart + guardLimit - 1 - guardStart =>
132 // guardStart + X + 1 u< guardLimit
133 // == forall X . (guardStart + X) in [0, guardLimit) &&
134 // (latchStart + X) in [0, latchStart + guardLimit - 1 - guardStart) =>
135 // (guardStart + X + 1) in [0, guardLimit)
136 // == forall X . X in [-guardStart, guardLimit - guardStart) &&
137 // X in [-latchStart, guardLimit - 1 - guardStart) =>
138 // X in [-guardStart - 1, guardLimit - guardStart - 1)
139 // == true
140 //
141 // So the widened condition is:
142 // guardStart u< guardLimit &&
143 // latchStart + guardLimit - 1 - guardStart u>= latchLimit
144 // Similarly for ule condition the widened condition is:
145 // guardStart u< guardLimit &&
146 // latchStart + guardLimit - 1 - guardStart u> latchLimit
147 // For slt condition the widened condition is:
148 // guardStart u< guardLimit &&
149 // latchStart + guardLimit - 1 - guardStart s>= latchLimit
150 // For sle condition the widened condition is:
151 // guardStart u< guardLimit &&
152 // latchStart + guardLimit - 1 - guardStart s> latchLimit
153 //
154 // When S = -1 (i.e. reverse iterating loop), the transformation is supported
155 // when:
156 // * The loop has a single latch with the condition of the form:
157 // B(X) = X <pred> latchLimit, where <pred> is u>, u>=, s>, or s>=.
158 // * The guard condition is of the form
159 // G(X) = X - 1 u< guardLimit
160 //
161 // For the ugt latch comparison case M is:
162 // forall X. X-1 u< guardLimit and X u> latchLimit => X-2 u< guardLimit
163 //
164 // The only way the antecedent can be true and the consequent can be false is if
165 // X == 1.
166 // If X == 1 then the second half of the antecedent is
167 // 1 u> latchLimit, and its negation is latchLimit u>= 1.
168 //
169 // So the widened condition is:
170 // guardStart u< guardLimit && latchLimit u>= 1.
171 // Similarly for sgt condition the widened condition is:
172 // guardStart u< guardLimit && latchLimit s>= 1.
173 // For uge condition the widened condition is:
174 // guardStart u< guardLimit && latchLimit u> 1.
175 // For sge condition the widened condition is:
176 // guardStart u< guardLimit && latchLimit s> 1.
177 //===----------------------------------------------------------------------===//
178
179 #include "llvm/Transforms/Scalar/LoopPredication.h"
180 #include "llvm/ADT/Statistic.h"
181 #include "llvm/Analysis/AliasAnalysis.h"
182 #include "llvm/Analysis/BranchProbabilityInfo.h"
183 #include "llvm/Analysis/GuardUtils.h"
184 #include "llvm/Analysis/LoopInfo.h"
185 #include "llvm/Analysis/LoopPass.h"
186 #include "llvm/Analysis/ScalarEvolution.h"
187 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
188 #include "llvm/IR/Function.h"
189 #include "llvm/IR/GlobalValue.h"
190 #include "llvm/IR/IntrinsicInst.h"
191 #include "llvm/IR/Module.h"
192 #include "llvm/IR/PatternMatch.h"
193 #include "llvm/InitializePasses.h"
194 #include "llvm/Pass.h"
195 #include "llvm/Support/CommandLine.h"
196 #include "llvm/Support/Debug.h"
197 #include "llvm/Transforms/Scalar.h"
198 #include "llvm/Transforms/Utils/GuardUtils.h"
199 #include "llvm/Transforms/Utils/Local.h"
200 #include "llvm/Transforms/Utils/LoopUtils.h"
201 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
202
203 #define DEBUG_TYPE "loop-predication"
204
205 STATISTIC(TotalConsidered, "Number of guards considered");
206 STATISTIC(TotalWidened, "Number of checks widened");
207
208 using namespace llvm;
209
210 static cl::opt<bool> EnableIVTruncation("loop-predication-enable-iv-truncation",
211 cl::Hidden, cl::init(true));
212
213 static cl::opt<bool> EnableCountDownLoop("loop-predication-enable-count-down-loop",
214 cl::Hidden, cl::init(true));
215
216 static cl::opt<bool>
217 SkipProfitabilityChecks("loop-predication-skip-profitability-checks",
218 cl::Hidden, cl::init(false));
219
220 // This is the scale factor for the latch probability. We use this during
221 // profitability analysis to find other exiting blocks that have a much higher
222 // probability of exiting the loop instead of loop exiting via latch.
223 // This value should be greater than 1 for a sane profitability check.
224 static cl::opt<float> LatchExitProbabilityScale(
225 "loop-predication-latch-probability-scale", cl::Hidden, cl::init(2.0),
226 cl::desc("scale factor for the latch probability. Value should be greater "
227 "than 1. Lower values are ignored"));
228
229 static cl::opt<bool> PredicateWidenableBranchGuards(
230 "loop-predication-predicate-widenable-branches-to-deopt", cl::Hidden,
231 cl::desc("Whether or not we should predicate guards "
232 "expressed as widenable branches to deoptimize blocks"),
233 cl::init(true));
234
235 namespace {
236 /// Represents an induction variable check:
237 /// icmp Pred, <induction variable>, <loop invariant limit>
238 struct LoopICmp {
239 ICmpInst::Predicate Pred;
240 const SCEVAddRecExpr *IV;
241 const SCEV *Limit;
LoopICmp__anone9668e770111::LoopICmp242 LoopICmp(ICmpInst::Predicate Pred, const SCEVAddRecExpr *IV,
243 const SCEV *Limit)
244 : Pred(Pred), IV(IV), Limit(Limit) {}
LoopICmp__anone9668e770111::LoopICmp245 LoopICmp() {}
dump__anone9668e770111::LoopICmp246 void dump() {
247 dbgs() << "LoopICmp Pred = " << Pred << ", IV = " << *IV
248 << ", Limit = " << *Limit << "\n";
249 }
250 };
251
252 class LoopPredication {
253 AliasAnalysis *AA;
254 DominatorTree *DT;
255 ScalarEvolution *SE;
256 LoopInfo *LI;
257 BranchProbabilityInfo *BPI;
258
259 Loop *L;
260 const DataLayout *DL;
261 BasicBlock *Preheader;
262 LoopICmp LatchCheck;
263
264 bool isSupportedStep(const SCEV* Step);
265 Optional<LoopICmp> parseLoopICmp(ICmpInst *ICI);
266 Optional<LoopICmp> parseLoopLatchICmp();
267
268 /// Return an insertion point suitable for inserting a safe to speculate
269 /// instruction whose only user will be 'User' which has operands 'Ops'. A
270 /// trivial result would be the at the User itself, but we try to return a
271 /// loop invariant location if possible.
272 Instruction *findInsertPt(Instruction *User, ArrayRef<Value*> Ops);
273 /// Same as above, *except* that this uses the SCEV definition of invariant
274 /// which is that an expression *can be made* invariant via SCEVExpander.
275 /// Thus, this version is only suitable for finding an insert point to be be
276 /// passed to SCEVExpander!
277 Instruction *findInsertPt(Instruction *User, ArrayRef<const SCEV*> Ops);
278
279 /// Return true if the value is known to produce a single fixed value across
280 /// all iterations on which it executes. Note that this does not imply
281 /// speculation safety. That must be established separately.
282 bool isLoopInvariantValue(const SCEV* S);
283
284 Value *expandCheck(SCEVExpander &Expander, Instruction *Guard,
285 ICmpInst::Predicate Pred, const SCEV *LHS,
286 const SCEV *RHS);
287
288 Optional<Value *> widenICmpRangeCheck(ICmpInst *ICI, SCEVExpander &Expander,
289 Instruction *Guard);
290 Optional<Value *> widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck,
291 LoopICmp RangeCheck,
292 SCEVExpander &Expander,
293 Instruction *Guard);
294 Optional<Value *> widenICmpRangeCheckDecrementingLoop(LoopICmp LatchCheck,
295 LoopICmp RangeCheck,
296 SCEVExpander &Expander,
297 Instruction *Guard);
298 unsigned collectChecks(SmallVectorImpl<Value *> &Checks, Value *Condition,
299 SCEVExpander &Expander, Instruction *Guard);
300 bool widenGuardConditions(IntrinsicInst *II, SCEVExpander &Expander);
301 bool widenWidenableBranchGuardConditions(BranchInst *Guard, SCEVExpander &Expander);
302 // If the loop always exits through another block in the loop, we should not
303 // predicate based on the latch check. For example, the latch check can be a
304 // very coarse grained check and there can be more fine grained exit checks
305 // within the loop. We identify such unprofitable loops through BPI.
306 bool isLoopProfitableToPredicate();
307
308 bool predicateLoopExits(Loop *L, SCEVExpander &Rewriter);
309
310 public:
LoopPredication(AliasAnalysis * AA,DominatorTree * DT,ScalarEvolution * SE,LoopInfo * LI,BranchProbabilityInfo * BPI)311 LoopPredication(AliasAnalysis *AA, DominatorTree *DT,
312 ScalarEvolution *SE, LoopInfo *LI,
313 BranchProbabilityInfo *BPI)
314 : AA(AA), DT(DT), SE(SE), LI(LI), BPI(BPI) {};
315 bool runOnLoop(Loop *L);
316 };
317
318 class LoopPredicationLegacyPass : public LoopPass {
319 public:
320 static char ID;
LoopPredicationLegacyPass()321 LoopPredicationLegacyPass() : LoopPass(ID) {
322 initializeLoopPredicationLegacyPassPass(*PassRegistry::getPassRegistry());
323 }
324
getAnalysisUsage(AnalysisUsage & AU) const325 void getAnalysisUsage(AnalysisUsage &AU) const override {
326 AU.addRequired<BranchProbabilityInfoWrapperPass>();
327 getLoopAnalysisUsage(AU);
328 }
329
runOnLoop(Loop * L,LPPassManager & LPM)330 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
331 if (skipLoop(L))
332 return false;
333 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
334 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
335 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
336 BranchProbabilityInfo &BPI =
337 getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
338 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
339 LoopPredication LP(AA, DT, SE, LI, &BPI);
340 return LP.runOnLoop(L);
341 }
342 };
343
344 char LoopPredicationLegacyPass::ID = 0;
345 } // end namespace
346
347 INITIALIZE_PASS_BEGIN(LoopPredicationLegacyPass, "loop-predication",
348 "Loop predication", false, false)
INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass)349 INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass)
350 INITIALIZE_PASS_DEPENDENCY(LoopPass)
351 INITIALIZE_PASS_END(LoopPredicationLegacyPass, "loop-predication",
352 "Loop predication", false, false)
353
354 Pass *llvm::createLoopPredicationPass() {
355 return new LoopPredicationLegacyPass();
356 }
357
run(Loop & L,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater & U)358 PreservedAnalyses LoopPredicationPass::run(Loop &L, LoopAnalysisManager &AM,
359 LoopStandardAnalysisResults &AR,
360 LPMUpdater &U) {
361 Function *F = L.getHeader()->getParent();
362 // For the new PM, we also can't use BranchProbabilityInfo as an analysis
363 // pass. Function analyses need to be preserved across loop transformations
364 // but BPI is not preserved, hence a newly built one is needed.
365 BranchProbabilityInfo BPI(*F, AR.LI, &AR.TLI, &AR.DT, nullptr);
366 LoopPredication LP(&AR.AA, &AR.DT, &AR.SE, &AR.LI, &BPI);
367 if (!LP.runOnLoop(&L))
368 return PreservedAnalyses::all();
369
370 return getLoopPassPreservedAnalyses();
371 }
372
373 Optional<LoopICmp>
parseLoopICmp(ICmpInst * ICI)374 LoopPredication::parseLoopICmp(ICmpInst *ICI) {
375 auto Pred = ICI->getPredicate();
376 auto *LHS = ICI->getOperand(0);
377 auto *RHS = ICI->getOperand(1);
378
379 const SCEV *LHSS = SE->getSCEV(LHS);
380 if (isa<SCEVCouldNotCompute>(LHSS))
381 return None;
382 const SCEV *RHSS = SE->getSCEV(RHS);
383 if (isa<SCEVCouldNotCompute>(RHSS))
384 return None;
385
386 // Canonicalize RHS to be loop invariant bound, LHS - a loop computable IV
387 if (SE->isLoopInvariant(LHSS, L)) {
388 std::swap(LHS, RHS);
389 std::swap(LHSS, RHSS);
390 Pred = ICmpInst::getSwappedPredicate(Pred);
391 }
392
393 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHSS);
394 if (!AR || AR->getLoop() != L)
395 return None;
396
397 return LoopICmp(Pred, AR, RHSS);
398 }
399
expandCheck(SCEVExpander & Expander,Instruction * Guard,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)400 Value *LoopPredication::expandCheck(SCEVExpander &Expander,
401 Instruction *Guard,
402 ICmpInst::Predicate Pred, const SCEV *LHS,
403 const SCEV *RHS) {
404 Type *Ty = LHS->getType();
405 assert(Ty == RHS->getType() && "expandCheck operands have different types?");
406
407 if (SE->isLoopInvariant(LHS, L) && SE->isLoopInvariant(RHS, L)) {
408 IRBuilder<> Builder(Guard);
409 if (SE->isLoopEntryGuardedByCond(L, Pred, LHS, RHS))
410 return Builder.getTrue();
411 if (SE->isLoopEntryGuardedByCond(L, ICmpInst::getInversePredicate(Pred),
412 LHS, RHS))
413 return Builder.getFalse();
414 }
415
416 Value *LHSV = Expander.expandCodeFor(LHS, Ty, findInsertPt(Guard, {LHS}));
417 Value *RHSV = Expander.expandCodeFor(RHS, Ty, findInsertPt(Guard, {RHS}));
418 IRBuilder<> Builder(findInsertPt(Guard, {LHSV, RHSV}));
419 return Builder.CreateICmp(Pred, LHSV, RHSV);
420 }
421
422
423 // Returns true if its safe to truncate the IV to RangeCheckType.
424 // When the IV type is wider than the range operand type, we can still do loop
425 // predication, by generating SCEVs for the range and latch that are of the
426 // same type. We achieve this by generating a SCEV truncate expression for the
427 // latch IV. This is done iff truncation of the IV is a safe operation,
428 // without loss of information.
429 // Another way to achieve this is by generating a wider type SCEV for the
430 // range check operand, however, this needs a more involved check that
431 // operands do not overflow. This can lead to loss of information when the
432 // range operand is of the form: add i32 %offset, %iv. We need to prove that
433 // sext(x + y) is same as sext(x) + sext(y).
434 // This function returns true if we can safely represent the IV type in
435 // the RangeCheckType without loss of information.
isSafeToTruncateWideIVType(const DataLayout & DL,ScalarEvolution & SE,const LoopICmp LatchCheck,Type * RangeCheckType)436 static bool isSafeToTruncateWideIVType(const DataLayout &DL,
437 ScalarEvolution &SE,
438 const LoopICmp LatchCheck,
439 Type *RangeCheckType) {
440 if (!EnableIVTruncation)
441 return false;
442 assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()).getFixedSize() >
443 DL.getTypeSizeInBits(RangeCheckType).getFixedSize() &&
444 "Expected latch check IV type to be larger than range check operand "
445 "type!");
446 // The start and end values of the IV should be known. This is to guarantee
447 // that truncating the wide type will not lose information.
448 auto *Limit = dyn_cast<SCEVConstant>(LatchCheck.Limit);
449 auto *Start = dyn_cast<SCEVConstant>(LatchCheck.IV->getStart());
450 if (!Limit || !Start)
451 return false;
452 // This check makes sure that the IV does not change sign during loop
453 // iterations. Consider latchType = i64, LatchStart = 5, Pred = ICMP_SGE,
454 // LatchEnd = 2, rangeCheckType = i32. If it's not a monotonic predicate, the
455 // IV wraps around, and the truncation of the IV would lose the range of
456 // iterations between 2^32 and 2^64.
457 if (!SE.getMonotonicPredicateType(LatchCheck.IV, LatchCheck.Pred))
458 return false;
459 // The active bits should be less than the bits in the RangeCheckType. This
460 // guarantees that truncating the latch check to RangeCheckType is a safe
461 // operation.
462 auto RangeCheckTypeBitSize =
463 DL.getTypeSizeInBits(RangeCheckType).getFixedSize();
464 return Start->getAPInt().getActiveBits() < RangeCheckTypeBitSize &&
465 Limit->getAPInt().getActiveBits() < RangeCheckTypeBitSize;
466 }
467
468
469 // Return an LoopICmp describing a latch check equivlent to LatchCheck but with
470 // the requested type if safe to do so. May involve the use of a new IV.
generateLoopLatchCheck(const DataLayout & DL,ScalarEvolution & SE,const LoopICmp LatchCheck,Type * RangeCheckType)471 static Optional<LoopICmp> generateLoopLatchCheck(const DataLayout &DL,
472 ScalarEvolution &SE,
473 const LoopICmp LatchCheck,
474 Type *RangeCheckType) {
475
476 auto *LatchType = LatchCheck.IV->getType();
477 if (RangeCheckType == LatchType)
478 return LatchCheck;
479 // For now, bail out if latch type is narrower than range type.
480 if (DL.getTypeSizeInBits(LatchType).getFixedSize() <
481 DL.getTypeSizeInBits(RangeCheckType).getFixedSize())
482 return None;
483 if (!isSafeToTruncateWideIVType(DL, SE, LatchCheck, RangeCheckType))
484 return None;
485 // We can now safely identify the truncated version of the IV and limit for
486 // RangeCheckType.
487 LoopICmp NewLatchCheck;
488 NewLatchCheck.Pred = LatchCheck.Pred;
489 NewLatchCheck.IV = dyn_cast<SCEVAddRecExpr>(
490 SE.getTruncateExpr(LatchCheck.IV, RangeCheckType));
491 if (!NewLatchCheck.IV)
492 return None;
493 NewLatchCheck.Limit = SE.getTruncateExpr(LatchCheck.Limit, RangeCheckType);
494 LLVM_DEBUG(dbgs() << "IV of type: " << *LatchType
495 << "can be represented as range check type:"
496 << *RangeCheckType << "\n");
497 LLVM_DEBUG(dbgs() << "LatchCheck.IV: " << *NewLatchCheck.IV << "\n");
498 LLVM_DEBUG(dbgs() << "LatchCheck.Limit: " << *NewLatchCheck.Limit << "\n");
499 return NewLatchCheck;
500 }
501
isSupportedStep(const SCEV * Step)502 bool LoopPredication::isSupportedStep(const SCEV* Step) {
503 return Step->isOne() || (Step->isAllOnesValue() && EnableCountDownLoop);
504 }
505
findInsertPt(Instruction * Use,ArrayRef<Value * > Ops)506 Instruction *LoopPredication::findInsertPt(Instruction *Use,
507 ArrayRef<Value*> Ops) {
508 for (Value *Op : Ops)
509 if (!L->isLoopInvariant(Op))
510 return Use;
511 return Preheader->getTerminator();
512 }
513
findInsertPt(Instruction * Use,ArrayRef<const SCEV * > Ops)514 Instruction *LoopPredication::findInsertPt(Instruction *Use,
515 ArrayRef<const SCEV*> Ops) {
516 // Subtlety: SCEV considers things to be invariant if the value produced is
517 // the same across iterations. This is not the same as being able to
518 // evaluate outside the loop, which is what we actually need here.
519 for (const SCEV *Op : Ops)
520 if (!SE->isLoopInvariant(Op, L) ||
521 !isSafeToExpandAt(Op, Preheader->getTerminator(), *SE))
522 return Use;
523 return Preheader->getTerminator();
524 }
525
isLoopInvariantValue(const SCEV * S)526 bool LoopPredication::isLoopInvariantValue(const SCEV* S) {
527 // Handling expressions which produce invariant results, but *haven't* yet
528 // been removed from the loop serves two important purposes.
529 // 1) Most importantly, it resolves a pass ordering cycle which would
530 // otherwise need us to iteration licm, loop-predication, and either
531 // loop-unswitch or loop-peeling to make progress on examples with lots of
532 // predicable range checks in a row. (Since, in the general case, we can't
533 // hoist the length checks until the dominating checks have been discharged
534 // as we can't prove doing so is safe.)
535 // 2) As a nice side effect, this exposes the value of peeling or unswitching
536 // much more obviously in the IR. Otherwise, the cost modeling for other
537 // transforms would end up needing to duplicate all of this logic to model a
538 // check which becomes predictable based on a modeled peel or unswitch.
539 //
540 // The cost of doing so in the worst case is an extra fill from the stack in
541 // the loop to materialize the loop invariant test value instead of checking
542 // against the original IV which is presumable in a register inside the loop.
543 // Such cases are presumably rare, and hint at missing oppurtunities for
544 // other passes.
545
546 if (SE->isLoopInvariant(S, L))
547 // Note: This the SCEV variant, so the original Value* may be within the
548 // loop even though SCEV has proven it is loop invariant.
549 return true;
550
551 // Handle a particular important case which SCEV doesn't yet know about which
552 // shows up in range checks on arrays with immutable lengths.
553 // TODO: This should be sunk inside SCEV.
554 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
555 if (const auto *LI = dyn_cast<LoadInst>(U->getValue()))
556 if (LI->isUnordered() && L->hasLoopInvariantOperands(LI))
557 if (AA->pointsToConstantMemory(LI->getOperand(0)) ||
558 LI->hasMetadata(LLVMContext::MD_invariant_load))
559 return true;
560 return false;
561 }
562
widenICmpRangeCheckIncrementingLoop(LoopICmp LatchCheck,LoopICmp RangeCheck,SCEVExpander & Expander,Instruction * Guard)563 Optional<Value *> LoopPredication::widenICmpRangeCheckIncrementingLoop(
564 LoopICmp LatchCheck, LoopICmp RangeCheck,
565 SCEVExpander &Expander, Instruction *Guard) {
566 auto *Ty = RangeCheck.IV->getType();
567 // Generate the widened condition for the forward loop:
568 // guardStart u< guardLimit &&
569 // latchLimit <pred> guardLimit - 1 - guardStart + latchStart
570 // where <pred> depends on the latch condition predicate. See the file
571 // header comment for the reasoning.
572 // guardLimit - guardStart + latchStart - 1
573 const SCEV *GuardStart = RangeCheck.IV->getStart();
574 const SCEV *GuardLimit = RangeCheck.Limit;
575 const SCEV *LatchStart = LatchCheck.IV->getStart();
576 const SCEV *LatchLimit = LatchCheck.Limit;
577 // Subtlety: We need all the values to be *invariant* across all iterations,
578 // but we only need to check expansion safety for those which *aren't*
579 // already guaranteed to dominate the guard.
580 if (!isLoopInvariantValue(GuardStart) ||
581 !isLoopInvariantValue(GuardLimit) ||
582 !isLoopInvariantValue(LatchStart) ||
583 !isLoopInvariantValue(LatchLimit)) {
584 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
585 return None;
586 }
587 if (!isSafeToExpandAt(LatchStart, Guard, *SE) ||
588 !isSafeToExpandAt(LatchLimit, Guard, *SE)) {
589 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
590 return None;
591 }
592
593 // guardLimit - guardStart + latchStart - 1
594 const SCEV *RHS =
595 SE->getAddExpr(SE->getMinusSCEV(GuardLimit, GuardStart),
596 SE->getMinusSCEV(LatchStart, SE->getOne(Ty)));
597 auto LimitCheckPred =
598 ICmpInst::getFlippedStrictnessPredicate(LatchCheck.Pred);
599
600 LLVM_DEBUG(dbgs() << "LHS: " << *LatchLimit << "\n");
601 LLVM_DEBUG(dbgs() << "RHS: " << *RHS << "\n");
602 LLVM_DEBUG(dbgs() << "Pred: " << LimitCheckPred << "\n");
603
604 auto *LimitCheck =
605 expandCheck(Expander, Guard, LimitCheckPred, LatchLimit, RHS);
606 auto *FirstIterationCheck = expandCheck(Expander, Guard, RangeCheck.Pred,
607 GuardStart, GuardLimit);
608 IRBuilder<> Builder(findInsertPt(Guard, {FirstIterationCheck, LimitCheck}));
609 return Builder.CreateAnd(FirstIterationCheck, LimitCheck);
610 }
611
widenICmpRangeCheckDecrementingLoop(LoopICmp LatchCheck,LoopICmp RangeCheck,SCEVExpander & Expander,Instruction * Guard)612 Optional<Value *> LoopPredication::widenICmpRangeCheckDecrementingLoop(
613 LoopICmp LatchCheck, LoopICmp RangeCheck,
614 SCEVExpander &Expander, Instruction *Guard) {
615 auto *Ty = RangeCheck.IV->getType();
616 const SCEV *GuardStart = RangeCheck.IV->getStart();
617 const SCEV *GuardLimit = RangeCheck.Limit;
618 const SCEV *LatchStart = LatchCheck.IV->getStart();
619 const SCEV *LatchLimit = LatchCheck.Limit;
620 // Subtlety: We need all the values to be *invariant* across all iterations,
621 // but we only need to check expansion safety for those which *aren't*
622 // already guaranteed to dominate the guard.
623 if (!isLoopInvariantValue(GuardStart) ||
624 !isLoopInvariantValue(GuardLimit) ||
625 !isLoopInvariantValue(LatchStart) ||
626 !isLoopInvariantValue(LatchLimit)) {
627 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
628 return None;
629 }
630 if (!isSafeToExpandAt(LatchStart, Guard, *SE) ||
631 !isSafeToExpandAt(LatchLimit, Guard, *SE)) {
632 LLVM_DEBUG(dbgs() << "Can't expand limit check!\n");
633 return None;
634 }
635 // The decrement of the latch check IV should be the same as the
636 // rangeCheckIV.
637 auto *PostDecLatchCheckIV = LatchCheck.IV->getPostIncExpr(*SE);
638 if (RangeCheck.IV != PostDecLatchCheckIV) {
639 LLVM_DEBUG(dbgs() << "Not the same. PostDecLatchCheckIV: "
640 << *PostDecLatchCheckIV
641 << " and RangeCheckIV: " << *RangeCheck.IV << "\n");
642 return None;
643 }
644
645 // Generate the widened condition for CountDownLoop:
646 // guardStart u< guardLimit &&
647 // latchLimit <pred> 1.
648 // See the header comment for reasoning of the checks.
649 auto LimitCheckPred =
650 ICmpInst::getFlippedStrictnessPredicate(LatchCheck.Pred);
651 auto *FirstIterationCheck = expandCheck(Expander, Guard,
652 ICmpInst::ICMP_ULT,
653 GuardStart, GuardLimit);
654 auto *LimitCheck = expandCheck(Expander, Guard, LimitCheckPred, LatchLimit,
655 SE->getOne(Ty));
656 IRBuilder<> Builder(findInsertPt(Guard, {FirstIterationCheck, LimitCheck}));
657 return Builder.CreateAnd(FirstIterationCheck, LimitCheck);
658 }
659
normalizePredicate(ScalarEvolution * SE,Loop * L,LoopICmp & RC)660 static void normalizePredicate(ScalarEvolution *SE, Loop *L,
661 LoopICmp& RC) {
662 // LFTR canonicalizes checks to the ICMP_NE/EQ form; normalize back to the
663 // ULT/UGE form for ease of handling by our caller.
664 if (ICmpInst::isEquality(RC.Pred) &&
665 RC.IV->getStepRecurrence(*SE)->isOne() &&
666 SE->isKnownPredicate(ICmpInst::ICMP_ULE, RC.IV->getStart(), RC.Limit))
667 RC.Pred = RC.Pred == ICmpInst::ICMP_NE ?
668 ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
669 }
670
671
672 /// If ICI can be widened to a loop invariant condition emits the loop
673 /// invariant condition in the loop preheader and return it, otherwise
674 /// returns None.
widenICmpRangeCheck(ICmpInst * ICI,SCEVExpander & Expander,Instruction * Guard)675 Optional<Value *> LoopPredication::widenICmpRangeCheck(ICmpInst *ICI,
676 SCEVExpander &Expander,
677 Instruction *Guard) {
678 LLVM_DEBUG(dbgs() << "Analyzing ICmpInst condition:\n");
679 LLVM_DEBUG(ICI->dump());
680
681 // parseLoopStructure guarantees that the latch condition is:
682 // ++i <pred> latchLimit, where <pred> is u<, u<=, s<, or s<=.
683 // We are looking for the range checks of the form:
684 // i u< guardLimit
685 auto RangeCheck = parseLoopICmp(ICI);
686 if (!RangeCheck) {
687 LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
688 return None;
689 }
690 LLVM_DEBUG(dbgs() << "Guard check:\n");
691 LLVM_DEBUG(RangeCheck->dump());
692 if (RangeCheck->Pred != ICmpInst::ICMP_ULT) {
693 LLVM_DEBUG(dbgs() << "Unsupported range check predicate("
694 << RangeCheck->Pred << ")!\n");
695 return None;
696 }
697 auto *RangeCheckIV = RangeCheck->IV;
698 if (!RangeCheckIV->isAffine()) {
699 LLVM_DEBUG(dbgs() << "Range check IV is not affine!\n");
700 return None;
701 }
702 auto *Step = RangeCheckIV->getStepRecurrence(*SE);
703 // We cannot just compare with latch IV step because the latch and range IVs
704 // may have different types.
705 if (!isSupportedStep(Step)) {
706 LLVM_DEBUG(dbgs() << "Range check and latch have IVs different steps!\n");
707 return None;
708 }
709 auto *Ty = RangeCheckIV->getType();
710 auto CurrLatchCheckOpt = generateLoopLatchCheck(*DL, *SE, LatchCheck, Ty);
711 if (!CurrLatchCheckOpt) {
712 LLVM_DEBUG(dbgs() << "Failed to generate a loop latch check "
713 "corresponding to range type: "
714 << *Ty << "\n");
715 return None;
716 }
717
718 LoopICmp CurrLatchCheck = *CurrLatchCheckOpt;
719 // At this point, the range and latch step should have the same type, but need
720 // not have the same value (we support both 1 and -1 steps).
721 assert(Step->getType() ==
722 CurrLatchCheck.IV->getStepRecurrence(*SE)->getType() &&
723 "Range and latch steps should be of same type!");
724 if (Step != CurrLatchCheck.IV->getStepRecurrence(*SE)) {
725 LLVM_DEBUG(dbgs() << "Range and latch have different step values!\n");
726 return None;
727 }
728
729 if (Step->isOne())
730 return widenICmpRangeCheckIncrementingLoop(CurrLatchCheck, *RangeCheck,
731 Expander, Guard);
732 else {
733 assert(Step->isAllOnesValue() && "Step should be -1!");
734 return widenICmpRangeCheckDecrementingLoop(CurrLatchCheck, *RangeCheck,
735 Expander, Guard);
736 }
737 }
738
collectChecks(SmallVectorImpl<Value * > & Checks,Value * Condition,SCEVExpander & Expander,Instruction * Guard)739 unsigned LoopPredication::collectChecks(SmallVectorImpl<Value *> &Checks,
740 Value *Condition,
741 SCEVExpander &Expander,
742 Instruction *Guard) {
743 unsigned NumWidened = 0;
744 // The guard condition is expected to be in form of:
745 // cond1 && cond2 && cond3 ...
746 // Iterate over subconditions looking for icmp conditions which can be
747 // widened across loop iterations. Widening these conditions remember the
748 // resulting list of subconditions in Checks vector.
749 SmallVector<Value *, 4> Worklist(1, Condition);
750 SmallPtrSet<Value *, 4> Visited;
751 Value *WideableCond = nullptr;
752 do {
753 Value *Condition = Worklist.pop_back_val();
754 if (!Visited.insert(Condition).second)
755 continue;
756
757 Value *LHS, *RHS;
758 using namespace llvm::PatternMatch;
759 if (match(Condition, m_And(m_Value(LHS), m_Value(RHS)))) {
760 Worklist.push_back(LHS);
761 Worklist.push_back(RHS);
762 continue;
763 }
764
765 if (match(Condition,
766 m_Intrinsic<Intrinsic::experimental_widenable_condition>())) {
767 // Pick any, we don't care which
768 WideableCond = Condition;
769 continue;
770 }
771
772 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Condition)) {
773 if (auto NewRangeCheck = widenICmpRangeCheck(ICI, Expander,
774 Guard)) {
775 Checks.push_back(NewRangeCheck.getValue());
776 NumWidened++;
777 continue;
778 }
779 }
780
781 // Save the condition as is if we can't widen it
782 Checks.push_back(Condition);
783 } while (!Worklist.empty());
784 // At the moment, our matching logic for wideable conditions implicitly
785 // assumes we preserve the form: (br (and Cond, WC())). FIXME
786 // Note that if there were multiple calls to wideable condition in the
787 // traversal, we only need to keep one, and which one is arbitrary.
788 if (WideableCond)
789 Checks.push_back(WideableCond);
790 return NumWidened;
791 }
792
widenGuardConditions(IntrinsicInst * Guard,SCEVExpander & Expander)793 bool LoopPredication::widenGuardConditions(IntrinsicInst *Guard,
794 SCEVExpander &Expander) {
795 LLVM_DEBUG(dbgs() << "Processing guard:\n");
796 LLVM_DEBUG(Guard->dump());
797
798 TotalConsidered++;
799 SmallVector<Value *, 4> Checks;
800 unsigned NumWidened = collectChecks(Checks, Guard->getOperand(0), Expander,
801 Guard);
802 if (NumWidened == 0)
803 return false;
804
805 TotalWidened += NumWidened;
806
807 // Emit the new guard condition
808 IRBuilder<> Builder(findInsertPt(Guard, Checks));
809 Value *AllChecks = Builder.CreateAnd(Checks);
810 auto *OldCond = Guard->getOperand(0);
811 Guard->setOperand(0, AllChecks);
812 RecursivelyDeleteTriviallyDeadInstructions(OldCond);
813
814 LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n");
815 return true;
816 }
817
widenWidenableBranchGuardConditions(BranchInst * BI,SCEVExpander & Expander)818 bool LoopPredication::widenWidenableBranchGuardConditions(
819 BranchInst *BI, SCEVExpander &Expander) {
820 assert(isGuardAsWidenableBranch(BI) && "Must be!");
821 LLVM_DEBUG(dbgs() << "Processing guard:\n");
822 LLVM_DEBUG(BI->dump());
823
824 TotalConsidered++;
825 SmallVector<Value *, 4> Checks;
826 unsigned NumWidened = collectChecks(Checks, BI->getCondition(),
827 Expander, BI);
828 if (NumWidened == 0)
829 return false;
830
831 TotalWidened += NumWidened;
832
833 // Emit the new guard condition
834 IRBuilder<> Builder(findInsertPt(BI, Checks));
835 Value *AllChecks = Builder.CreateAnd(Checks);
836 auto *OldCond = BI->getCondition();
837 BI->setCondition(AllChecks);
838 RecursivelyDeleteTriviallyDeadInstructions(OldCond);
839 assert(isGuardAsWidenableBranch(BI) &&
840 "Stopped being a guard after transform?");
841
842 LLVM_DEBUG(dbgs() << "Widened checks = " << NumWidened << "\n");
843 return true;
844 }
845
parseLoopLatchICmp()846 Optional<LoopICmp> LoopPredication::parseLoopLatchICmp() {
847 using namespace PatternMatch;
848
849 BasicBlock *LoopLatch = L->getLoopLatch();
850 if (!LoopLatch) {
851 LLVM_DEBUG(dbgs() << "The loop doesn't have a single latch!\n");
852 return None;
853 }
854
855 auto *BI = dyn_cast<BranchInst>(LoopLatch->getTerminator());
856 if (!BI || !BI->isConditional()) {
857 LLVM_DEBUG(dbgs() << "Failed to match the latch terminator!\n");
858 return None;
859 }
860 BasicBlock *TrueDest = BI->getSuccessor(0);
861 assert(
862 (TrueDest == L->getHeader() || BI->getSuccessor(1) == L->getHeader()) &&
863 "One of the latch's destinations must be the header");
864
865 auto *ICI = dyn_cast<ICmpInst>(BI->getCondition());
866 if (!ICI) {
867 LLVM_DEBUG(dbgs() << "Failed to match the latch condition!\n");
868 return None;
869 }
870 auto Result = parseLoopICmp(ICI);
871 if (!Result) {
872 LLVM_DEBUG(dbgs() << "Failed to parse the loop latch condition!\n");
873 return None;
874 }
875
876 if (TrueDest != L->getHeader())
877 Result->Pred = ICmpInst::getInversePredicate(Result->Pred);
878
879 // Check affine first, so if it's not we don't try to compute the step
880 // recurrence.
881 if (!Result->IV->isAffine()) {
882 LLVM_DEBUG(dbgs() << "The induction variable is not affine!\n");
883 return None;
884 }
885
886 auto *Step = Result->IV->getStepRecurrence(*SE);
887 if (!isSupportedStep(Step)) {
888 LLVM_DEBUG(dbgs() << "Unsupported loop stride(" << *Step << ")!\n");
889 return None;
890 }
891
892 auto IsUnsupportedPredicate = [](const SCEV *Step, ICmpInst::Predicate Pred) {
893 if (Step->isOne()) {
894 return Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_SLT &&
895 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_SLE;
896 } else {
897 assert(Step->isAllOnesValue() && "Step should be -1!");
898 return Pred != ICmpInst::ICMP_UGT && Pred != ICmpInst::ICMP_SGT &&
899 Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_SGE;
900 }
901 };
902
903 normalizePredicate(SE, L, *Result);
904 if (IsUnsupportedPredicate(Step, Result->Pred)) {
905 LLVM_DEBUG(dbgs() << "Unsupported loop latch predicate(" << Result->Pred
906 << ")!\n");
907 return None;
908 }
909
910 return Result;
911 }
912
913
isLoopProfitableToPredicate()914 bool LoopPredication::isLoopProfitableToPredicate() {
915 if (SkipProfitabilityChecks || !BPI)
916 return true;
917
918 SmallVector<std::pair<BasicBlock *, BasicBlock *>, 8> ExitEdges;
919 L->getExitEdges(ExitEdges);
920 // If there is only one exiting edge in the loop, it is always profitable to
921 // predicate the loop.
922 if (ExitEdges.size() == 1)
923 return true;
924
925 // Calculate the exiting probabilities of all exiting edges from the loop,
926 // starting with the LatchExitProbability.
927 // Heuristic for profitability: If any of the exiting blocks' probability of
928 // exiting the loop is larger than exiting through the latch block, it's not
929 // profitable to predicate the loop.
930 auto *LatchBlock = L->getLoopLatch();
931 assert(LatchBlock && "Should have a single latch at this point!");
932 auto *LatchTerm = LatchBlock->getTerminator();
933 assert(LatchTerm->getNumSuccessors() == 2 &&
934 "expected to be an exiting block with 2 succs!");
935 unsigned LatchBrExitIdx =
936 LatchTerm->getSuccessor(0) == L->getHeader() ? 1 : 0;
937 BranchProbability LatchExitProbability =
938 BPI->getEdgeProbability(LatchBlock, LatchBrExitIdx);
939
940 // Protect against degenerate inputs provided by the user. Providing a value
941 // less than one, can invert the definition of profitable loop predication.
942 float ScaleFactor = LatchExitProbabilityScale;
943 if (ScaleFactor < 1) {
944 LLVM_DEBUG(
945 dbgs()
946 << "Ignored user setting for loop-predication-latch-probability-scale: "
947 << LatchExitProbabilityScale << "\n");
948 LLVM_DEBUG(dbgs() << "The value is set to 1.0\n");
949 ScaleFactor = 1.0;
950 }
951 const auto LatchProbabilityThreshold =
952 LatchExitProbability * ScaleFactor;
953
954 for (const auto &ExitEdge : ExitEdges) {
955 BranchProbability ExitingBlockProbability =
956 BPI->getEdgeProbability(ExitEdge.first, ExitEdge.second);
957 // Some exiting edge has higher probability than the latch exiting edge.
958 // No longer profitable to predicate.
959 if (ExitingBlockProbability > LatchProbabilityThreshold)
960 return false;
961 }
962 // Using BPI, we have concluded that the most probable way to exit from the
963 // loop is through the latch (or there's no profile information and all
964 // exits are equally likely).
965 return true;
966 }
967
968 /// If we can (cheaply) find a widenable branch which controls entry into the
969 /// loop, return it.
FindWidenableTerminatorAboveLoop(Loop * L,LoopInfo & LI)970 static BranchInst *FindWidenableTerminatorAboveLoop(Loop *L, LoopInfo &LI) {
971 // Walk back through any unconditional executed blocks and see if we can find
972 // a widenable condition which seems to control execution of this loop. Note
973 // that we predict that maythrow calls are likely untaken and thus that it's
974 // profitable to widen a branch before a maythrow call with a condition
975 // afterwards even though that may cause the slow path to run in a case where
976 // it wouldn't have otherwise.
977 BasicBlock *BB = L->getLoopPreheader();
978 if (!BB)
979 return nullptr;
980 do {
981 if (BasicBlock *Pred = BB->getSinglePredecessor())
982 if (BB == Pred->getSingleSuccessor()) {
983 BB = Pred;
984 continue;
985 }
986 break;
987 } while (true);
988
989 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
990 auto *Term = Pred->getTerminator();
991
992 Value *Cond, *WC;
993 BasicBlock *IfTrueBB, *IfFalseBB;
994 if (parseWidenableBranch(Term, Cond, WC, IfTrueBB, IfFalseBB) &&
995 IfTrueBB == BB)
996 return cast<BranchInst>(Term);
997 }
998 return nullptr;
999 }
1000
1001 /// Return the minimum of all analyzeable exit counts. This is an upper bound
1002 /// on the actual exit count. If there are not at least two analyzeable exits,
1003 /// returns SCEVCouldNotCompute.
getMinAnalyzeableBackedgeTakenCount(ScalarEvolution & SE,DominatorTree & DT,Loop * L)1004 static const SCEV *getMinAnalyzeableBackedgeTakenCount(ScalarEvolution &SE,
1005 DominatorTree &DT,
1006 Loop *L) {
1007 SmallVector<BasicBlock *, 16> ExitingBlocks;
1008 L->getExitingBlocks(ExitingBlocks);
1009
1010 SmallVector<const SCEV *, 4> ExitCounts;
1011 for (BasicBlock *ExitingBB : ExitingBlocks) {
1012 const SCEV *ExitCount = SE.getExitCount(L, ExitingBB);
1013 if (isa<SCEVCouldNotCompute>(ExitCount))
1014 continue;
1015 assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
1016 "We should only have known counts for exiting blocks that "
1017 "dominate latch!");
1018 ExitCounts.push_back(ExitCount);
1019 }
1020 if (ExitCounts.size() < 2)
1021 return SE.getCouldNotCompute();
1022 return SE.getUMinFromMismatchedTypes(ExitCounts);
1023 }
1024
1025 /// This implements an analogous, but entirely distinct transform from the main
1026 /// loop predication transform. This one is phrased in terms of using a
1027 /// widenable branch *outside* the loop to allow us to simplify loop exits in a
1028 /// following loop. This is close in spirit to the IndVarSimplify transform
1029 /// of the same name, but is materially different widening loosens legality
1030 /// sharply.
predicateLoopExits(Loop * L,SCEVExpander & Rewriter)1031 bool LoopPredication::predicateLoopExits(Loop *L, SCEVExpander &Rewriter) {
1032 // The transformation performed here aims to widen a widenable condition
1033 // above the loop such that all analyzeable exit leading to deopt are dead.
1034 // It assumes that the latch is the dominant exit for profitability and that
1035 // exits branching to deoptimizing blocks are rarely taken. It relies on the
1036 // semantics of widenable expressions for legality. (i.e. being able to fall
1037 // down the widenable path spuriously allows us to ignore exit order,
1038 // unanalyzeable exits, side effects, exceptional exits, and other challenges
1039 // which restrict the applicability of the non-WC based version of this
1040 // transform in IndVarSimplify.)
1041 //
1042 // NOTE ON POISON/UNDEF - We're hoisting an expression above guards which may
1043 // imply flags on the expression being hoisted and inserting new uses (flags
1044 // are only correct for current uses). The result is that we may be
1045 // inserting a branch on the value which can be either poison or undef. In
1046 // this case, the branch can legally go either way; we just need to avoid
1047 // introducing UB. This is achieved through the use of the freeze
1048 // instruction.
1049
1050 SmallVector<BasicBlock *, 16> ExitingBlocks;
1051 L->getExitingBlocks(ExitingBlocks);
1052
1053 if (ExitingBlocks.empty())
1054 return false; // Nothing to do.
1055
1056 auto *Latch = L->getLoopLatch();
1057 if (!Latch)
1058 return false;
1059
1060 auto *WidenableBR = FindWidenableTerminatorAboveLoop(L, *LI);
1061 if (!WidenableBR)
1062 return false;
1063
1064 const SCEV *LatchEC = SE->getExitCount(L, Latch);
1065 if (isa<SCEVCouldNotCompute>(LatchEC))
1066 return false; // profitability - want hot exit in analyzeable set
1067
1068 // At this point, we have found an analyzeable latch, and a widenable
1069 // condition above the loop. If we have a widenable exit within the loop
1070 // (for which we can't compute exit counts), drop the ability to further
1071 // widen so that we gain ability to analyze it's exit count and perform this
1072 // transform. TODO: It'd be nice to know for sure the exit became
1073 // analyzeable after dropping widenability.
1074 {
1075 bool Invalidate = false;
1076
1077 for (auto *ExitingBB : ExitingBlocks) {
1078 if (LI->getLoopFor(ExitingBB) != L)
1079 continue;
1080
1081 auto *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
1082 if (!BI)
1083 continue;
1084
1085 Use *Cond, *WC;
1086 BasicBlock *IfTrueBB, *IfFalseBB;
1087 if (parseWidenableBranch(BI, Cond, WC, IfTrueBB, IfFalseBB) &&
1088 L->contains(IfTrueBB)) {
1089 WC->set(ConstantInt::getTrue(IfTrueBB->getContext()));
1090 Invalidate = true;
1091 }
1092 }
1093 if (Invalidate)
1094 SE->forgetLoop(L);
1095 }
1096
1097 // The use of umin(all analyzeable exits) instead of latch is subtle, but
1098 // important for profitability. We may have a loop which hasn't been fully
1099 // canonicalized just yet. If the exit we chose to widen is provably never
1100 // taken, we want the widened form to *also* be provably never taken. We
1101 // can't guarantee this as a current unanalyzeable exit may later become
1102 // analyzeable, but we can at least avoid the obvious cases.
1103 const SCEV *MinEC = getMinAnalyzeableBackedgeTakenCount(*SE, *DT, L);
1104 if (isa<SCEVCouldNotCompute>(MinEC) || MinEC->getType()->isPointerTy() ||
1105 !SE->isLoopInvariant(MinEC, L) ||
1106 !isSafeToExpandAt(MinEC, WidenableBR, *SE))
1107 return false;
1108
1109 // Subtlety: We need to avoid inserting additional uses of the WC. We know
1110 // that it can only have one transitive use at the moment, and thus moving
1111 // that use to just before the branch and inserting code before it and then
1112 // modifying the operand is legal.
1113 auto *IP = cast<Instruction>(WidenableBR->getCondition());
1114 IP->moveBefore(WidenableBR);
1115 Rewriter.setInsertPoint(IP);
1116 IRBuilder<> B(IP);
1117
1118 bool Changed = false;
1119 Value *MinECV = nullptr; // lazily generated if needed
1120 for (BasicBlock *ExitingBB : ExitingBlocks) {
1121 // If our exiting block exits multiple loops, we can only rewrite the
1122 // innermost one. Otherwise, we're changing how many times the innermost
1123 // loop runs before it exits.
1124 if (LI->getLoopFor(ExitingBB) != L)
1125 continue;
1126
1127 // Can't rewrite non-branch yet.
1128 auto *BI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
1129 if (!BI)
1130 continue;
1131
1132 // If already constant, nothing to do.
1133 if (isa<Constant>(BI->getCondition()))
1134 continue;
1135
1136 const SCEV *ExitCount = SE->getExitCount(L, ExitingBB);
1137 if (isa<SCEVCouldNotCompute>(ExitCount) ||
1138 ExitCount->getType()->isPointerTy() ||
1139 !isSafeToExpandAt(ExitCount, WidenableBR, *SE))
1140 continue;
1141
1142 const bool ExitIfTrue = !L->contains(*succ_begin(ExitingBB));
1143 BasicBlock *ExitBB = BI->getSuccessor(ExitIfTrue ? 0 : 1);
1144 if (!ExitBB->getPostdominatingDeoptimizeCall())
1145 continue;
1146
1147 /// Here we can be fairly sure that executing this exit will most likely
1148 /// lead to executing llvm.experimental.deoptimize.
1149 /// This is a profitability heuristic, not a legality constraint.
1150
1151 // If we found a widenable exit condition, do two things:
1152 // 1) fold the widened exit test into the widenable condition
1153 // 2) fold the branch to untaken - avoids infinite looping
1154
1155 Value *ECV = Rewriter.expandCodeFor(ExitCount);
1156 if (!MinECV)
1157 MinECV = Rewriter.expandCodeFor(MinEC);
1158 Value *RHS = MinECV;
1159 if (ECV->getType() != RHS->getType()) {
1160 Type *WiderTy = SE->getWiderType(ECV->getType(), RHS->getType());
1161 ECV = B.CreateZExt(ECV, WiderTy);
1162 RHS = B.CreateZExt(RHS, WiderTy);
1163 }
1164 assert(!Latch || DT->dominates(ExitingBB, Latch));
1165 Value *NewCond = B.CreateICmp(ICmpInst::ICMP_UGT, ECV, RHS);
1166 // Freeze poison or undef to an arbitrary bit pattern to ensure we can
1167 // branch without introducing UB. See NOTE ON POISON/UNDEF above for
1168 // context.
1169 NewCond = B.CreateFreeze(NewCond);
1170
1171 widenWidenableBranch(WidenableBR, NewCond);
1172
1173 Value *OldCond = BI->getCondition();
1174 BI->setCondition(ConstantInt::get(OldCond->getType(), !ExitIfTrue));
1175 Changed = true;
1176 }
1177
1178 if (Changed)
1179 // We just mutated a bunch of loop exits changing there exit counts
1180 // widely. We need to force recomputation of the exit counts given these
1181 // changes. Note that all of the inserted exits are never taken, and
1182 // should be removed next time the CFG is modified.
1183 SE->forgetLoop(L);
1184 return Changed;
1185 }
1186
runOnLoop(Loop * Loop)1187 bool LoopPredication::runOnLoop(Loop *Loop) {
1188 L = Loop;
1189
1190 LLVM_DEBUG(dbgs() << "Analyzing ");
1191 LLVM_DEBUG(L->dump());
1192
1193 Module *M = L->getHeader()->getModule();
1194
1195 // There is nothing to do if the module doesn't use guards
1196 auto *GuardDecl =
1197 M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1198 bool HasIntrinsicGuards = GuardDecl && !GuardDecl->use_empty();
1199 auto *WCDecl = M->getFunction(
1200 Intrinsic::getName(Intrinsic::experimental_widenable_condition));
1201 bool HasWidenableConditions =
1202 PredicateWidenableBranchGuards && WCDecl && !WCDecl->use_empty();
1203 if (!HasIntrinsicGuards && !HasWidenableConditions)
1204 return false;
1205
1206 DL = &M->getDataLayout();
1207
1208 Preheader = L->getLoopPreheader();
1209 if (!Preheader)
1210 return false;
1211
1212 auto LatchCheckOpt = parseLoopLatchICmp();
1213 if (!LatchCheckOpt)
1214 return false;
1215 LatchCheck = *LatchCheckOpt;
1216
1217 LLVM_DEBUG(dbgs() << "Latch check:\n");
1218 LLVM_DEBUG(LatchCheck.dump());
1219
1220 if (!isLoopProfitableToPredicate()) {
1221 LLVM_DEBUG(dbgs() << "Loop not profitable to predicate!\n");
1222 return false;
1223 }
1224 // Collect all the guards into a vector and process later, so as not
1225 // to invalidate the instruction iterator.
1226 SmallVector<IntrinsicInst *, 4> Guards;
1227 SmallVector<BranchInst *, 4> GuardsAsWidenableBranches;
1228 for (const auto BB : L->blocks()) {
1229 for (auto &I : *BB)
1230 if (isGuard(&I))
1231 Guards.push_back(cast<IntrinsicInst>(&I));
1232 if (PredicateWidenableBranchGuards &&
1233 isGuardAsWidenableBranch(BB->getTerminator()))
1234 GuardsAsWidenableBranches.push_back(
1235 cast<BranchInst>(BB->getTerminator()));
1236 }
1237
1238 SCEVExpander Expander(*SE, *DL, "loop-predication");
1239 bool Changed = false;
1240 for (auto *Guard : Guards)
1241 Changed |= widenGuardConditions(Guard, Expander);
1242 for (auto *Guard : GuardsAsWidenableBranches)
1243 Changed |= widenWidenableBranchGuardConditions(Guard, Expander);
1244 Changed |= predicateLoopExits(L, Expander);
1245 return Changed;
1246 }
1247