1 //===-- SimplifyIndVar.cpp - Induction variable simplification ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements induction variable simplification. It does
10 // not define any actual pass or policy, but provides a single function to
11 // simplify a loop's induction variables based on ScalarEvolution.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/IRBuilder.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
28
29 using namespace llvm;
30
31 #define DEBUG_TYPE "indvars"
32
33 STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
34 STATISTIC(NumElimOperand, "Number of IV operands folded into a use");
35 STATISTIC(NumFoldedUser, "Number of IV users folded into a constant");
36 STATISTIC(NumElimRem , "Number of IV remainder operations eliminated");
37 STATISTIC(
38 NumSimplifiedSDiv,
39 "Number of IV signed division operations converted to unsigned division");
40 STATISTIC(
41 NumSimplifiedSRem,
42 "Number of IV signed remainder operations converted to unsigned remainder");
43 STATISTIC(NumElimCmp , "Number of IV comparisons eliminated");
44
45 namespace {
46 /// This is a utility for simplifying induction variables
47 /// based on ScalarEvolution. It is the primary instrument of the
48 /// IndvarSimplify pass, but it may also be directly invoked to cleanup after
49 /// other loop passes that preserve SCEV.
50 class SimplifyIndvar {
51 Loop *L;
52 LoopInfo *LI;
53 ScalarEvolution *SE;
54 DominatorTree *DT;
55 const TargetTransformInfo *TTI;
56 SCEVExpander &Rewriter;
57 SmallVectorImpl<WeakTrackingVH> &DeadInsts;
58
59 bool Changed = false;
60
61 public:
SimplifyIndvar(Loop * Loop,ScalarEvolution * SE,DominatorTree * DT,LoopInfo * LI,const TargetTransformInfo * TTI,SCEVExpander & Rewriter,SmallVectorImpl<WeakTrackingVH> & Dead)62 SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
63 LoopInfo *LI, const TargetTransformInfo *TTI,
64 SCEVExpander &Rewriter,
65 SmallVectorImpl<WeakTrackingVH> &Dead)
66 : L(Loop), LI(LI), SE(SE), DT(DT), TTI(TTI), Rewriter(Rewriter),
67 DeadInsts(Dead) {
68 assert(LI && "IV simplification requires LoopInfo");
69 }
70
hasChanged() const71 bool hasChanged() const { return Changed; }
72
73 /// Iteratively perform simplification on a worklist of users of the
74 /// specified induction variable. This is the top-level driver that applies
75 /// all simplifications to users of an IV.
76 void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
77
78 Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
79
80 bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
81 bool replaceIVUserWithLoopInvariant(Instruction *UseInst);
82 bool replaceFloatIVWithIntegerIV(Instruction *UseInst);
83
84 bool eliminateOverflowIntrinsic(WithOverflowInst *WO);
85 bool eliminateSaturatingIntrinsic(SaturatingInst *SI);
86 bool eliminateTrunc(TruncInst *TI);
87 bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
88 bool makeIVComparisonInvariant(ICmpInst *ICmp, Instruction *IVOperand);
89 void eliminateIVComparison(ICmpInst *ICmp, Instruction *IVOperand);
90 void simplifyIVRemainder(BinaryOperator *Rem, Instruction *IVOperand,
91 bool IsSigned);
92 void replaceRemWithNumerator(BinaryOperator *Rem);
93 void replaceRemWithNumeratorOrZero(BinaryOperator *Rem);
94 void replaceSRemWithURem(BinaryOperator *Rem);
95 bool eliminateSDiv(BinaryOperator *SDiv);
96 bool strengthenOverflowingOperation(BinaryOperator *OBO,
97 Instruction *IVOperand);
98 bool strengthenRightShift(BinaryOperator *BO, Instruction *IVOperand);
99 };
100 }
101
102 /// Find a point in code which dominates all given instructions. We can safely
103 /// assume that, whatever fact we can prove at the found point, this fact is
104 /// also true for each of the given instructions.
findCommonDominator(ArrayRef<Instruction * > Instructions,DominatorTree & DT)105 static Instruction *findCommonDominator(ArrayRef<Instruction *> Instructions,
106 DominatorTree &DT) {
107 Instruction *CommonDom = nullptr;
108 for (auto *Insn : Instructions)
109 CommonDom =
110 CommonDom ? DT.findNearestCommonDominator(CommonDom, Insn) : Insn;
111 assert(CommonDom && "Common dominator not found?");
112 return CommonDom;
113 }
114
115 /// Fold an IV operand into its use. This removes increments of an
116 /// aligned IV when used by a instruction that ignores the low bits.
117 ///
118 /// IVOperand is guaranteed SCEVable, but UseInst may not be.
119 ///
120 /// Return the operand of IVOperand for this induction variable if IVOperand can
121 /// be folded (in case more folding opportunities have been exposed).
122 /// Otherwise return null.
foldIVUser(Instruction * UseInst,Instruction * IVOperand)123 Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
124 Value *IVSrc = nullptr;
125 const unsigned OperIdx = 0;
126 const SCEV *FoldedExpr = nullptr;
127 bool MustDropExactFlag = false;
128 switch (UseInst->getOpcode()) {
129 default:
130 return nullptr;
131 case Instruction::UDiv:
132 case Instruction::LShr:
133 // We're only interested in the case where we know something about
134 // the numerator and have a constant denominator.
135 if (IVOperand != UseInst->getOperand(OperIdx) ||
136 !isa<ConstantInt>(UseInst->getOperand(1)))
137 return nullptr;
138
139 // Attempt to fold a binary operator with constant operand.
140 // e.g. ((I + 1) >> 2) => I >> 2
141 if (!isa<BinaryOperator>(IVOperand)
142 || !isa<ConstantInt>(IVOperand->getOperand(1)))
143 return nullptr;
144
145 IVSrc = IVOperand->getOperand(0);
146 // IVSrc must be the (SCEVable) IV, since the other operand is const.
147 assert(SE->isSCEVable(IVSrc->getType()) && "Expect SCEVable IV operand");
148
149 ConstantInt *D = cast<ConstantInt>(UseInst->getOperand(1));
150 if (UseInst->getOpcode() == Instruction::LShr) {
151 // Get a constant for the divisor. See createSCEV.
152 uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
153 if (D->getValue().uge(BitWidth))
154 return nullptr;
155
156 D = ConstantInt::get(UseInst->getContext(),
157 APInt::getOneBitSet(BitWidth, D->getZExtValue()));
158 }
159 const auto *LHS = SE->getSCEV(IVSrc);
160 const auto *RHS = SE->getSCEV(D);
161 FoldedExpr = SE->getUDivExpr(LHS, RHS);
162 // We might have 'exact' flag set at this point which will no longer be
163 // correct after we make the replacement.
164 if (UseInst->isExact() && LHS != SE->getMulExpr(FoldedExpr, RHS))
165 MustDropExactFlag = true;
166 }
167 // We have something that might fold it's operand. Compare SCEVs.
168 if (!SE->isSCEVable(UseInst->getType()))
169 return nullptr;
170
171 // Bypass the operand if SCEV can prove it has no effect.
172 if (SE->getSCEV(UseInst) != FoldedExpr)
173 return nullptr;
174
175 LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
176 << " -> " << *UseInst << '\n');
177
178 UseInst->setOperand(OperIdx, IVSrc);
179 assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
180
181 if (MustDropExactFlag)
182 UseInst->dropPoisonGeneratingFlags();
183
184 ++NumElimOperand;
185 Changed = true;
186 if (IVOperand->use_empty())
187 DeadInsts.emplace_back(IVOperand);
188 return IVSrc;
189 }
190
makeIVComparisonInvariant(ICmpInst * ICmp,Instruction * IVOperand)191 bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
192 Instruction *IVOperand) {
193 auto *Preheader = L->getLoopPreheader();
194 if (!Preheader)
195 return false;
196 unsigned IVOperIdx = 0;
197 ICmpInst::Predicate Pred = ICmp->getPredicate();
198 if (IVOperand != ICmp->getOperand(0)) {
199 // Swapped
200 assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
201 IVOperIdx = 1;
202 Pred = ICmpInst::getSwappedPredicate(Pred);
203 }
204
205 // Get the SCEVs for the ICmp operands (in the specific context of the
206 // current loop)
207 const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
208 const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
209 const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
210 auto LIP = SE->getLoopInvariantPredicate(Pred, S, X, L, ICmp);
211 if (!LIP)
212 return false;
213 ICmpInst::Predicate InvariantPredicate = LIP->Pred;
214 const SCEV *InvariantLHS = LIP->LHS;
215 const SCEV *InvariantRHS = LIP->RHS;
216
217 // Do not generate something ridiculous.
218 auto *PHTerm = Preheader->getTerminator();
219 if (Rewriter.isHighCostExpansion({ InvariantLHS, InvariantRHS }, L,
220 2 * SCEVCheapExpansionBudget, TTI, PHTerm))
221 return false;
222 auto *NewLHS =
223 Rewriter.expandCodeFor(InvariantLHS, IVOperand->getType(), PHTerm);
224 auto *NewRHS =
225 Rewriter.expandCodeFor(InvariantRHS, IVOperand->getType(), PHTerm);
226 LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
227 ICmp->setPredicate(InvariantPredicate);
228 ICmp->setOperand(0, NewLHS);
229 ICmp->setOperand(1, NewRHS);
230 return true;
231 }
232
233 /// SimplifyIVUsers helper for eliminating useless
234 /// comparisons against an induction variable.
eliminateIVComparison(ICmpInst * ICmp,Instruction * IVOperand)235 void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp,
236 Instruction *IVOperand) {
237 unsigned IVOperIdx = 0;
238 ICmpInst::Predicate Pred = ICmp->getPredicate();
239 ICmpInst::Predicate OriginalPred = Pred;
240 if (IVOperand != ICmp->getOperand(0)) {
241 // Swapped
242 assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
243 IVOperIdx = 1;
244 Pred = ICmpInst::getSwappedPredicate(Pred);
245 }
246
247 // Get the SCEVs for the ICmp operands (in the specific context of the
248 // current loop)
249 const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
250 const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
251 const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
252
253 // If the condition is always true or always false in the given context,
254 // replace it with a constant value.
255 SmallVector<Instruction *, 4> Users;
256 for (auto *U : ICmp->users())
257 Users.push_back(cast<Instruction>(U));
258 const Instruction *CtxI = findCommonDominator(Users, *DT);
259 if (auto Ev = SE->evaluatePredicateAt(Pred, S, X, CtxI)) {
260 SE->forgetValue(ICmp);
261 ICmp->replaceAllUsesWith(ConstantInt::getBool(ICmp->getContext(), *Ev));
262 DeadInsts.emplace_back(ICmp);
263 LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
264 } else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
265 // fallthrough to end of function
266 } else if (ICmpInst::isSigned(OriginalPred) &&
267 SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
268 // If we were unable to make anything above, all we can is to canonicalize
269 // the comparison hoping that it will open the doors for other
270 // optimizations. If we find out that we compare two non-negative values,
271 // we turn the instruction's predicate to its unsigned version. Note that
272 // we cannot rely on Pred here unless we check if we have swapped it.
273 assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
274 LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
275 << '\n');
276 ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
277 } else
278 return;
279
280 ++NumElimCmp;
281 Changed = true;
282 }
283
eliminateSDiv(BinaryOperator * SDiv)284 bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
285 // Get the SCEVs for the ICmp operands.
286 auto *N = SE->getSCEV(SDiv->getOperand(0));
287 auto *D = SE->getSCEV(SDiv->getOperand(1));
288
289 // Simplify unnecessary loops away.
290 const Loop *L = LI->getLoopFor(SDiv->getParent());
291 N = SE->getSCEVAtScope(N, L);
292 D = SE->getSCEVAtScope(D, L);
293
294 // Replace sdiv by udiv if both of the operands are non-negative
295 if (SE->isKnownNonNegative(N) && SE->isKnownNonNegative(D)) {
296 auto *UDiv = BinaryOperator::Create(
297 BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1),
298 SDiv->getName() + ".udiv", SDiv);
299 UDiv->setIsExact(SDiv->isExact());
300 SDiv->replaceAllUsesWith(UDiv);
301 LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
302 ++NumSimplifiedSDiv;
303 Changed = true;
304 DeadInsts.push_back(SDiv);
305 return true;
306 }
307
308 return false;
309 }
310
311 // i %s n -> i %u n if i >= 0 and n >= 0
replaceSRemWithURem(BinaryOperator * Rem)312 void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
313 auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
314 auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
315 Rem->getName() + ".urem", Rem);
316 Rem->replaceAllUsesWith(URem);
317 LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
318 ++NumSimplifiedSRem;
319 Changed = true;
320 DeadInsts.emplace_back(Rem);
321 }
322
323 // i % n --> i if i is in [0,n).
replaceRemWithNumerator(BinaryOperator * Rem)324 void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
325 Rem->replaceAllUsesWith(Rem->getOperand(0));
326 LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
327 ++NumElimRem;
328 Changed = true;
329 DeadInsts.emplace_back(Rem);
330 }
331
332 // (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n).
replaceRemWithNumeratorOrZero(BinaryOperator * Rem)333 void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
334 auto *T = Rem->getType();
335 auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
336 ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ, N, D);
337 SelectInst *Sel =
338 SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem);
339 Rem->replaceAllUsesWith(Sel);
340 LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
341 ++NumElimRem;
342 Changed = true;
343 DeadInsts.emplace_back(Rem);
344 }
345
346 /// SimplifyIVUsers helper for eliminating useless remainder operations
347 /// operating on an induction variable or replacing srem by urem.
simplifyIVRemainder(BinaryOperator * Rem,Instruction * IVOperand,bool IsSigned)348 void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem,
349 Instruction *IVOperand,
350 bool IsSigned) {
351 auto *NValue = Rem->getOperand(0);
352 auto *DValue = Rem->getOperand(1);
353 // We're only interested in the case where we know something about
354 // the numerator, unless it is a srem, because we want to replace srem by urem
355 // in general.
356 bool UsedAsNumerator = IVOperand == NValue;
357 if (!UsedAsNumerator && !IsSigned)
358 return;
359
360 const SCEV *N = SE->getSCEV(NValue);
361
362 // Simplify unnecessary loops away.
363 const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
364 N = SE->getSCEVAtScope(N, ICmpLoop);
365
366 bool IsNumeratorNonNegative = !IsSigned || SE->isKnownNonNegative(N);
367
368 // Do not proceed if the Numerator may be negative
369 if (!IsNumeratorNonNegative)
370 return;
371
372 const SCEV *D = SE->getSCEV(DValue);
373 D = SE->getSCEVAtScope(D, ICmpLoop);
374
375 if (UsedAsNumerator) {
376 auto LT = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
377 if (SE->isKnownPredicate(LT, N, D)) {
378 replaceRemWithNumerator(Rem);
379 return;
380 }
381
382 auto *T = Rem->getType();
383 const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
384 if (SE->isKnownPredicate(LT, NLessOne, D)) {
385 replaceRemWithNumeratorOrZero(Rem);
386 return;
387 }
388 }
389
390 // Try to replace SRem with URem, if both N and D are known non-negative.
391 // Since we had already check N, we only need to check D now
392 if (!IsSigned || !SE->isKnownNonNegative(D))
393 return;
394
395 replaceSRemWithURem(Rem);
396 }
397
eliminateOverflowIntrinsic(WithOverflowInst * WO)398 bool SimplifyIndvar::eliminateOverflowIntrinsic(WithOverflowInst *WO) {
399 const SCEV *LHS = SE->getSCEV(WO->getLHS());
400 const SCEV *RHS = SE->getSCEV(WO->getRHS());
401 if (!SE->willNotOverflow(WO->getBinaryOp(), WO->isSigned(), LHS, RHS))
402 return false;
403
404 // Proved no overflow, nuke the overflow check and, if possible, the overflow
405 // intrinsic as well.
406
407 BinaryOperator *NewResult = BinaryOperator::Create(
408 WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), "", WO);
409
410 if (WO->isSigned())
411 NewResult->setHasNoSignedWrap(true);
412 else
413 NewResult->setHasNoUnsignedWrap(true);
414
415 SmallVector<ExtractValueInst *, 4> ToDelete;
416
417 for (auto *U : WO->users()) {
418 if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
419 if (EVI->getIndices()[0] == 1)
420 EVI->replaceAllUsesWith(ConstantInt::getFalse(WO->getContext()));
421 else {
422 assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
423 EVI->replaceAllUsesWith(NewResult);
424 }
425 ToDelete.push_back(EVI);
426 }
427 }
428
429 for (auto *EVI : ToDelete)
430 EVI->eraseFromParent();
431
432 if (WO->use_empty())
433 WO->eraseFromParent();
434
435 Changed = true;
436 return true;
437 }
438
eliminateSaturatingIntrinsic(SaturatingInst * SI)439 bool SimplifyIndvar::eliminateSaturatingIntrinsic(SaturatingInst *SI) {
440 const SCEV *LHS = SE->getSCEV(SI->getLHS());
441 const SCEV *RHS = SE->getSCEV(SI->getRHS());
442 if (!SE->willNotOverflow(SI->getBinaryOp(), SI->isSigned(), LHS, RHS))
443 return false;
444
445 BinaryOperator *BO = BinaryOperator::Create(
446 SI->getBinaryOp(), SI->getLHS(), SI->getRHS(), SI->getName(), SI);
447 if (SI->isSigned())
448 BO->setHasNoSignedWrap();
449 else
450 BO->setHasNoUnsignedWrap();
451
452 SI->replaceAllUsesWith(BO);
453 DeadInsts.emplace_back(SI);
454 Changed = true;
455 return true;
456 }
457
eliminateTrunc(TruncInst * TI)458 bool SimplifyIndvar::eliminateTrunc(TruncInst *TI) {
459 // It is always legal to replace
460 // icmp <pred> i32 trunc(iv), n
461 // with
462 // icmp <pred> i64 sext(trunc(iv)), sext(n), if pred is signed predicate.
463 // Or with
464 // icmp <pred> i64 zext(trunc(iv)), zext(n), if pred is unsigned predicate.
465 // Or with either of these if pred is an equality predicate.
466 //
467 // If we can prove that iv == sext(trunc(iv)) or iv == zext(trunc(iv)) for
468 // every comparison which uses trunc, it means that we can replace each of
469 // them with comparison of iv against sext/zext(n). We no longer need trunc
470 // after that.
471 //
472 // TODO: Should we do this if we can widen *some* comparisons, but not all
473 // of them? Sometimes it is enough to enable other optimizations, but the
474 // trunc instruction will stay in the loop.
475 Value *IV = TI->getOperand(0);
476 Type *IVTy = IV->getType();
477 const SCEV *IVSCEV = SE->getSCEV(IV);
478 const SCEV *TISCEV = SE->getSCEV(TI);
479
480 // Check if iv == zext(trunc(iv)) and if iv == sext(trunc(iv)). If so, we can
481 // get rid of trunc
482 bool DoesSExtCollapse = false;
483 bool DoesZExtCollapse = false;
484 if (IVSCEV == SE->getSignExtendExpr(TISCEV, IVTy))
485 DoesSExtCollapse = true;
486 if (IVSCEV == SE->getZeroExtendExpr(TISCEV, IVTy))
487 DoesZExtCollapse = true;
488
489 // If neither sext nor zext does collapse, it is not profitable to do any
490 // transform. Bail.
491 if (!DoesSExtCollapse && !DoesZExtCollapse)
492 return false;
493
494 // Collect users of the trunc that look like comparisons against invariants.
495 // Bail if we find something different.
496 SmallVector<ICmpInst *, 4> ICmpUsers;
497 for (auto *U : TI->users()) {
498 // We don't care about users in unreachable blocks.
499 if (isa<Instruction>(U) &&
500 !DT->isReachableFromEntry(cast<Instruction>(U)->getParent()))
501 continue;
502 ICmpInst *ICI = dyn_cast<ICmpInst>(U);
503 if (!ICI) return false;
504 assert(L->contains(ICI->getParent()) && "LCSSA form broken?");
505 if (!(ICI->getOperand(0) == TI && L->isLoopInvariant(ICI->getOperand(1))) &&
506 !(ICI->getOperand(1) == TI && L->isLoopInvariant(ICI->getOperand(0))))
507 return false;
508 // If we cannot get rid of trunc, bail.
509 if (ICI->isSigned() && !DoesSExtCollapse)
510 return false;
511 if (ICI->isUnsigned() && !DoesZExtCollapse)
512 return false;
513 // For equality, either signed or unsigned works.
514 ICmpUsers.push_back(ICI);
515 }
516
517 auto CanUseZExt = [&](ICmpInst *ICI) {
518 // Unsigned comparison can be widened as unsigned.
519 if (ICI->isUnsigned())
520 return true;
521 // Is it profitable to do zext?
522 if (!DoesZExtCollapse)
523 return false;
524 // For equality, we can safely zext both parts.
525 if (ICI->isEquality())
526 return true;
527 // Otherwise we can only use zext when comparing two non-negative or two
528 // negative values. But in practice, we will never pass DoesZExtCollapse
529 // check for a negative value, because zext(trunc(x)) is non-negative. So
530 // it only make sense to check for non-negativity here.
531 const SCEV *SCEVOP1 = SE->getSCEV(ICI->getOperand(0));
532 const SCEV *SCEVOP2 = SE->getSCEV(ICI->getOperand(1));
533 return SE->isKnownNonNegative(SCEVOP1) && SE->isKnownNonNegative(SCEVOP2);
534 };
535 // Replace all comparisons against trunc with comparisons against IV.
536 for (auto *ICI : ICmpUsers) {
537 bool IsSwapped = L->isLoopInvariant(ICI->getOperand(0));
538 auto *Op1 = IsSwapped ? ICI->getOperand(0) : ICI->getOperand(1);
539 Instruction *Ext = nullptr;
540 // For signed/unsigned predicate, replace the old comparison with comparison
541 // of immediate IV against sext/zext of the invariant argument. If we can
542 // use either sext or zext (i.e. we are dealing with equality predicate),
543 // then prefer zext as a more canonical form.
544 // TODO: If we see a signed comparison which can be turned into unsigned,
545 // we can do it here for canonicalization purposes.
546 ICmpInst::Predicate Pred = ICI->getPredicate();
547 if (IsSwapped) Pred = ICmpInst::getSwappedPredicate(Pred);
548 if (CanUseZExt(ICI)) {
549 assert(DoesZExtCollapse && "Unprofitable zext?");
550 Ext = new ZExtInst(Op1, IVTy, "zext", ICI);
551 Pred = ICmpInst::getUnsignedPredicate(Pred);
552 } else {
553 assert(DoesSExtCollapse && "Unprofitable sext?");
554 Ext = new SExtInst(Op1, IVTy, "sext", ICI);
555 assert(Pred == ICmpInst::getSignedPredicate(Pred) && "Must be signed!");
556 }
557 bool Changed;
558 L->makeLoopInvariant(Ext, Changed);
559 (void)Changed;
560 ICmpInst *NewICI = new ICmpInst(ICI, Pred, IV, Ext);
561 ICI->replaceAllUsesWith(NewICI);
562 DeadInsts.emplace_back(ICI);
563 }
564
565 // Trunc no longer needed.
566 TI->replaceAllUsesWith(PoisonValue::get(TI->getType()));
567 DeadInsts.emplace_back(TI);
568 return true;
569 }
570
571 /// Eliminate an operation that consumes a simple IV and has no observable
572 /// side-effect given the range of IV values. IVOperand is guaranteed SCEVable,
573 /// but UseInst may not be.
eliminateIVUser(Instruction * UseInst,Instruction * IVOperand)574 bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
575 Instruction *IVOperand) {
576 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
577 eliminateIVComparison(ICmp, IVOperand);
578 return true;
579 }
580 if (BinaryOperator *Bin = dyn_cast<BinaryOperator>(UseInst)) {
581 bool IsSRem = Bin->getOpcode() == Instruction::SRem;
582 if (IsSRem || Bin->getOpcode() == Instruction::URem) {
583 simplifyIVRemainder(Bin, IVOperand, IsSRem);
584 return true;
585 }
586
587 if (Bin->getOpcode() == Instruction::SDiv)
588 return eliminateSDiv(Bin);
589 }
590
591 if (auto *WO = dyn_cast<WithOverflowInst>(UseInst))
592 if (eliminateOverflowIntrinsic(WO))
593 return true;
594
595 if (auto *SI = dyn_cast<SaturatingInst>(UseInst))
596 if (eliminateSaturatingIntrinsic(SI))
597 return true;
598
599 if (auto *TI = dyn_cast<TruncInst>(UseInst))
600 if (eliminateTrunc(TI))
601 return true;
602
603 if (eliminateIdentitySCEV(UseInst, IVOperand))
604 return true;
605
606 return false;
607 }
608
GetLoopInvariantInsertPosition(Loop * L,Instruction * Hint)609 static Instruction *GetLoopInvariantInsertPosition(Loop *L, Instruction *Hint) {
610 if (auto *BB = L->getLoopPreheader())
611 return BB->getTerminator();
612
613 return Hint;
614 }
615
616 /// Replace the UseInst with a loop invariant expression if it is safe.
replaceIVUserWithLoopInvariant(Instruction * I)617 bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
618 if (!SE->isSCEVable(I->getType()))
619 return false;
620
621 // Get the symbolic expression for this instruction.
622 const SCEV *S = SE->getSCEV(I);
623
624 if (!SE->isLoopInvariant(S, L))
625 return false;
626
627 // Do not generate something ridiculous even if S is loop invariant.
628 if (Rewriter.isHighCostExpansion(S, L, SCEVCheapExpansionBudget, TTI, I))
629 return false;
630
631 auto *IP = GetLoopInvariantInsertPosition(L, I);
632
633 if (!Rewriter.isSafeToExpandAt(S, IP)) {
634 LLVM_DEBUG(dbgs() << "INDVARS: Can not replace IV user: " << *I
635 << " with non-speculable loop invariant: " << *S << '\n');
636 return false;
637 }
638
639 auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
640
641 I->replaceAllUsesWith(Invariant);
642 LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
643 << " with loop invariant: " << *S << '\n');
644 ++NumFoldedUser;
645 Changed = true;
646 DeadInsts.emplace_back(I);
647 return true;
648 }
649
650 /// Eliminate redundant type cast between integer and float.
replaceFloatIVWithIntegerIV(Instruction * UseInst)651 bool SimplifyIndvar::replaceFloatIVWithIntegerIV(Instruction *UseInst) {
652 if (UseInst->getOpcode() != CastInst::SIToFP &&
653 UseInst->getOpcode() != CastInst::UIToFP)
654 return false;
655
656 Instruction *IVOperand = cast<Instruction>(UseInst->getOperand(0));
657 // Get the symbolic expression for this instruction.
658 const SCEV *IV = SE->getSCEV(IVOperand);
659 unsigned MaskBits;
660 if (UseInst->getOpcode() == CastInst::SIToFP)
661 MaskBits = SE->getSignedRange(IV).getMinSignedBits();
662 else
663 MaskBits = SE->getUnsignedRange(IV).getActiveBits();
664 unsigned DestNumSigBits = UseInst->getType()->getFPMantissaWidth();
665 if (MaskBits <= DestNumSigBits) {
666 for (User *U : UseInst->users()) {
667 // Match for fptosi/fptoui of sitofp and with same type.
668 auto *CI = dyn_cast<CastInst>(U);
669 if (!CI)
670 continue;
671
672 CastInst::CastOps Opcode = CI->getOpcode();
673 if (Opcode != CastInst::FPToSI && Opcode != CastInst::FPToUI)
674 continue;
675
676 Value *Conv = nullptr;
677 if (IVOperand->getType() != CI->getType()) {
678 IRBuilder<> Builder(CI);
679 StringRef Name = IVOperand->getName();
680 // To match InstCombine logic, we only need sext if both fptosi and
681 // sitofp are used. If one of them is unsigned, then we can use zext.
682 if (SE->getTypeSizeInBits(IVOperand->getType()) >
683 SE->getTypeSizeInBits(CI->getType())) {
684 Conv = Builder.CreateTrunc(IVOperand, CI->getType(), Name + ".trunc");
685 } else if (Opcode == CastInst::FPToUI ||
686 UseInst->getOpcode() == CastInst::UIToFP) {
687 Conv = Builder.CreateZExt(IVOperand, CI->getType(), Name + ".zext");
688 } else {
689 Conv = Builder.CreateSExt(IVOperand, CI->getType(), Name + ".sext");
690 }
691 } else
692 Conv = IVOperand;
693
694 CI->replaceAllUsesWith(Conv);
695 DeadInsts.push_back(CI);
696 LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *CI
697 << " with: " << *Conv << '\n');
698
699 ++NumFoldedUser;
700 Changed = true;
701 }
702 }
703
704 return Changed;
705 }
706
707 /// Eliminate any operation that SCEV can prove is an identity function.
eliminateIdentitySCEV(Instruction * UseInst,Instruction * IVOperand)708 bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
709 Instruction *IVOperand) {
710 if (!SE->isSCEVable(UseInst->getType()) ||
711 (UseInst->getType() != IVOperand->getType()) ||
712 (SE->getSCEV(UseInst) != SE->getSCEV(IVOperand)))
713 return false;
714
715 // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
716 // dominator tree, even if X is an operand to Y. For instance, in
717 //
718 // %iv = phi i32 {0,+,1}
719 // br %cond, label %left, label %merge
720 //
721 // left:
722 // %X = add i32 %iv, 0
723 // br label %merge
724 //
725 // merge:
726 // %M = phi (%X, %iv)
727 //
728 // getSCEV(%M) == getSCEV(%X) == {0,+,1}, but %X does not dominate %M, and
729 // %M.replaceAllUsesWith(%X) would be incorrect.
730
731 if (isa<PHINode>(UseInst))
732 // If UseInst is not a PHI node then we know that IVOperand dominates
733 // UseInst directly from the legality of SSA.
734 if (!DT || !DT->dominates(IVOperand, UseInst))
735 return false;
736
737 if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
738 return false;
739
740 LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
741
742 SE->forgetValue(UseInst);
743 UseInst->replaceAllUsesWith(IVOperand);
744 ++NumElimIdentity;
745 Changed = true;
746 DeadInsts.emplace_back(UseInst);
747 return true;
748 }
749
750 /// Annotate BO with nsw / nuw if it provably does not signed-overflow /
751 /// unsigned-overflow. Returns true if anything changed, false otherwise.
strengthenOverflowingOperation(BinaryOperator * BO,Instruction * IVOperand)752 bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
753 Instruction *IVOperand) {
754 auto Flags = SE->getStrengthenedNoWrapFlagsFromBinOp(
755 cast<OverflowingBinaryOperator>(BO));
756
757 if (!Flags)
758 return false;
759
760 BO->setHasNoUnsignedWrap(ScalarEvolution::maskFlags(*Flags, SCEV::FlagNUW) ==
761 SCEV::FlagNUW);
762 BO->setHasNoSignedWrap(ScalarEvolution::maskFlags(*Flags, SCEV::FlagNSW) ==
763 SCEV::FlagNSW);
764
765 // The getStrengthenedNoWrapFlagsFromBinOp() check inferred additional nowrap
766 // flags on addrecs while performing zero/sign extensions. We could call
767 // forgetValue() here to make sure those flags also propagate to any other
768 // SCEV expressions based on the addrec. However, this can have pathological
769 // compile-time impact, see https://bugs.llvm.org/show_bug.cgi?id=50384.
770 return true;
771 }
772
773 /// Annotate the Shr in (X << IVOperand) >> C as exact using the
774 /// information from the IV's range. Returns true if anything changed, false
775 /// otherwise.
strengthenRightShift(BinaryOperator * BO,Instruction * IVOperand)776 bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
777 Instruction *IVOperand) {
778 using namespace llvm::PatternMatch;
779
780 if (BO->getOpcode() == Instruction::Shl) {
781 bool Changed = false;
782 ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
783 for (auto *U : BO->users()) {
784 const APInt *C;
785 if (match(U,
786 m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
787 match(U,
788 m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
789 BinaryOperator *Shr = cast<BinaryOperator>(U);
790 if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
791 Shr->setIsExact(true);
792 Changed = true;
793 }
794 }
795 }
796 return Changed;
797 }
798
799 return false;
800 }
801
802 /// Add all uses of Def to the current IV's worklist.
pushIVUsers(Instruction * Def,Loop * L,SmallPtrSet<Instruction *,16> & Simplified,SmallVectorImpl<std::pair<Instruction *,Instruction * >> & SimpleIVUsers)803 static void pushIVUsers(
804 Instruction *Def, Loop *L,
805 SmallPtrSet<Instruction*,16> &Simplified,
806 SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
807
808 for (User *U : Def->users()) {
809 Instruction *UI = cast<Instruction>(U);
810
811 // Avoid infinite or exponential worklist processing.
812 // Also ensure unique worklist users.
813 // If Def is a LoopPhi, it may not be in the Simplified set, so check for
814 // self edges first.
815 if (UI == Def)
816 continue;
817
818 // Only change the current Loop, do not change the other parts (e.g. other
819 // Loops).
820 if (!L->contains(UI))
821 continue;
822
823 // Do not push the same instruction more than once.
824 if (!Simplified.insert(UI).second)
825 continue;
826
827 SimpleIVUsers.push_back(std::make_pair(UI, Def));
828 }
829 }
830
831 /// Return true if this instruction generates a simple SCEV
832 /// expression in terms of that IV.
833 ///
834 /// This is similar to IVUsers' isInteresting() but processes each instruction
835 /// non-recursively when the operand is already known to be a simpleIVUser.
836 ///
isSimpleIVUser(Instruction * I,const Loop * L,ScalarEvolution * SE)837 static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
838 if (!SE->isSCEVable(I->getType()))
839 return false;
840
841 // Get the symbolic expression for this instruction.
842 const SCEV *S = SE->getSCEV(I);
843
844 // Only consider affine recurrences.
845 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
846 if (AR && AR->getLoop() == L)
847 return true;
848
849 return false;
850 }
851
852 /// Iteratively perform simplification on a worklist of users
853 /// of the specified induction variable. Each successive simplification may push
854 /// more users which may themselves be candidates for simplification.
855 ///
856 /// This algorithm does not require IVUsers analysis. Instead, it simplifies
857 /// instructions in-place during analysis. Rather than rewriting induction
858 /// variables bottom-up from their users, it transforms a chain of IVUsers
859 /// top-down, updating the IR only when it encounters a clear optimization
860 /// opportunity.
861 ///
862 /// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
863 ///
simplifyUsers(PHINode * CurrIV,IVVisitor * V)864 void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
865 if (!SE->isSCEVable(CurrIV->getType()))
866 return;
867
868 // Instructions processed by SimplifyIndvar for CurrIV.
869 SmallPtrSet<Instruction*,16> Simplified;
870
871 // Use-def pairs if IV users waiting to be processed for CurrIV.
872 SmallVector<std::pair<Instruction*, Instruction*>, 8> SimpleIVUsers;
873
874 // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
875 // called multiple times for the same LoopPhi. This is the proper thing to
876 // do for loop header phis that use each other.
877 pushIVUsers(CurrIV, L, Simplified, SimpleIVUsers);
878
879 while (!SimpleIVUsers.empty()) {
880 std::pair<Instruction*, Instruction*> UseOper =
881 SimpleIVUsers.pop_back_val();
882 Instruction *UseInst = UseOper.first;
883
884 // If a user of the IndVar is trivially dead, we prefer just to mark it dead
885 // rather than try to do some complex analysis or transformation (such as
886 // widening) basing on it.
887 // TODO: Propagate TLI and pass it here to handle more cases.
888 if (isInstructionTriviallyDead(UseInst, /* TLI */ nullptr)) {
889 DeadInsts.emplace_back(UseInst);
890 continue;
891 }
892
893 // Bypass back edges to avoid extra work.
894 if (UseInst == CurrIV) continue;
895
896 // Try to replace UseInst with a loop invariant before any other
897 // simplifications.
898 if (replaceIVUserWithLoopInvariant(UseInst))
899 continue;
900
901 Instruction *IVOperand = UseOper.second;
902 for (unsigned N = 0; IVOperand; ++N) {
903 assert(N <= Simplified.size() && "runaway iteration");
904 (void) N;
905
906 Value *NewOper = foldIVUser(UseInst, IVOperand);
907 if (!NewOper)
908 break; // done folding
909 IVOperand = dyn_cast<Instruction>(NewOper);
910 }
911 if (!IVOperand)
912 continue;
913
914 if (eliminateIVUser(UseInst, IVOperand)) {
915 pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
916 continue;
917 }
918
919 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseInst)) {
920 if ((isa<OverflowingBinaryOperator>(BO) &&
921 strengthenOverflowingOperation(BO, IVOperand)) ||
922 (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand))) {
923 // re-queue uses of the now modified binary operator and fall
924 // through to the checks that remain.
925 pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
926 }
927 }
928
929 // Try to use integer induction for FPToSI of float induction directly.
930 if (replaceFloatIVWithIntegerIV(UseInst)) {
931 // Re-queue the potentially new direct uses of IVOperand.
932 pushIVUsers(IVOperand, L, Simplified, SimpleIVUsers);
933 continue;
934 }
935
936 CastInst *Cast = dyn_cast<CastInst>(UseInst);
937 if (V && Cast) {
938 V->visitCast(Cast);
939 continue;
940 }
941 if (isSimpleIVUser(UseInst, L, SE)) {
942 pushIVUsers(UseInst, L, Simplified, SimpleIVUsers);
943 }
944 }
945 }
946
947 namespace llvm {
948
anchor()949 void IVVisitor::anchor() { }
950
951 /// Simplify instructions that use this induction variable
952 /// by using ScalarEvolution to analyze the IV's recurrence.
simplifyUsersOfIV(PHINode * CurrIV,ScalarEvolution * SE,DominatorTree * DT,LoopInfo * LI,const TargetTransformInfo * TTI,SmallVectorImpl<WeakTrackingVH> & Dead,SCEVExpander & Rewriter,IVVisitor * V)953 bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
954 LoopInfo *LI, const TargetTransformInfo *TTI,
955 SmallVectorImpl<WeakTrackingVH> &Dead,
956 SCEVExpander &Rewriter, IVVisitor *V) {
957 SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, TTI,
958 Rewriter, Dead);
959 SIV.simplifyUsers(CurrIV, V);
960 return SIV.hasChanged();
961 }
962
963 /// Simplify users of induction variables within this
964 /// loop. This does not actually change or add IVs.
simplifyLoopIVs(Loop * L,ScalarEvolution * SE,DominatorTree * DT,LoopInfo * LI,const TargetTransformInfo * TTI,SmallVectorImpl<WeakTrackingVH> & Dead)965 bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
966 LoopInfo *LI, const TargetTransformInfo *TTI,
967 SmallVectorImpl<WeakTrackingVH> &Dead) {
968 SCEVExpander Rewriter(*SE, SE->getDataLayout(), "indvars");
969 #ifndef NDEBUG
970 Rewriter.setDebugType(DEBUG_TYPE);
971 #endif
972 bool Changed = false;
973 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
974 Changed |=
975 simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, TTI, Dead, Rewriter);
976 }
977 return Changed;
978 }
979
980 } // namespace llvm
981
982 namespace {
983 //===----------------------------------------------------------------------===//
984 // Widen Induction Variables - Extend the width of an IV to cover its
985 // widest uses.
986 //===----------------------------------------------------------------------===//
987
988 class WidenIV {
989 // Parameters
990 PHINode *OrigPhi;
991 Type *WideType;
992
993 // Context
994 LoopInfo *LI;
995 Loop *L;
996 ScalarEvolution *SE;
997 DominatorTree *DT;
998
999 // Does the module have any calls to the llvm.experimental.guard intrinsic
1000 // at all? If not we can avoid scanning instructions looking for guards.
1001 bool HasGuards;
1002
1003 bool UsePostIncrementRanges;
1004
1005 // Statistics
1006 unsigned NumElimExt = 0;
1007 unsigned NumWidened = 0;
1008
1009 // Result
1010 PHINode *WidePhi = nullptr;
1011 Instruction *WideInc = nullptr;
1012 const SCEV *WideIncExpr = nullptr;
1013 SmallVectorImpl<WeakTrackingVH> &DeadInsts;
1014
1015 SmallPtrSet<Instruction *,16> Widened;
1016
1017 enum class ExtendKind { Zero, Sign, Unknown };
1018
1019 // A map tracking the kind of extension used to widen each narrow IV
1020 // and narrow IV user.
1021 // Key: pointer to a narrow IV or IV user.
1022 // Value: the kind of extension used to widen this Instruction.
1023 DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
1024
1025 using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
1026
1027 // A map with control-dependent ranges for post increment IV uses. The key is
1028 // a pair of IV def and a use of this def denoting the context. The value is
1029 // a ConstantRange representing possible values of the def at the given
1030 // context.
1031 DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
1032
getPostIncRangeInfo(Value * Def,Instruction * UseI)1033 std::optional<ConstantRange> getPostIncRangeInfo(Value *Def,
1034 Instruction *UseI) {
1035 DefUserPair Key(Def, UseI);
1036 auto It = PostIncRangeInfos.find(Key);
1037 return It == PostIncRangeInfos.end()
1038 ? std::optional<ConstantRange>(std::nullopt)
1039 : std::optional<ConstantRange>(It->second);
1040 }
1041
1042 void calculatePostIncRanges(PHINode *OrigPhi);
1043 void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
1044
updatePostIncRangeInfo(Value * Def,Instruction * UseI,ConstantRange R)1045 void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
1046 DefUserPair Key(Def, UseI);
1047 auto It = PostIncRangeInfos.find(Key);
1048 if (It == PostIncRangeInfos.end())
1049 PostIncRangeInfos.insert({Key, R});
1050 else
1051 It->second = R.intersectWith(It->second);
1052 }
1053
1054 public:
1055 /// Record a link in the Narrow IV def-use chain along with the WideIV that
1056 /// computes the same value as the Narrow IV def. This avoids caching Use*
1057 /// pointers.
1058 struct NarrowIVDefUse {
1059 Instruction *NarrowDef = nullptr;
1060 Instruction *NarrowUse = nullptr;
1061 Instruction *WideDef = nullptr;
1062
1063 // True if the narrow def is never negative. Tracking this information lets
1064 // us use a sign extension instead of a zero extension or vice versa, when
1065 // profitable and legal.
1066 bool NeverNegative = false;
1067
NarrowIVDefUse__anon2d09b0ec0311::WidenIV::NarrowIVDefUse1068 NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
1069 bool NeverNegative)
1070 : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
1071 NeverNegative(NeverNegative) {}
1072 };
1073
1074 WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
1075 DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
1076 bool HasGuards, bool UsePostIncrementRanges = true);
1077
1078 PHINode *createWideIV(SCEVExpander &Rewriter);
1079
getNumElimExt()1080 unsigned getNumElimExt() { return NumElimExt; };
getNumWidened()1081 unsigned getNumWidened() { return NumWidened; };
1082
1083 protected:
1084 Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
1085 Instruction *Use);
1086
1087 Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
1088 Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
1089 const SCEVAddRecExpr *WideAR);
1090 Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
1091
1092 ExtendKind getExtendKind(Instruction *I);
1093
1094 using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
1095
1096 WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
1097
1098 WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
1099
1100 const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1101 unsigned OpCode) const;
1102
1103 Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
1104
1105 bool widenLoopCompare(NarrowIVDefUse DU);
1106 bool widenWithVariantUse(NarrowIVDefUse DU);
1107
1108 void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
1109
1110 private:
1111 SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
1112 };
1113 } // namespace
1114
1115 /// Determine the insertion point for this user. By default, insert immediately
1116 /// before the user. SCEVExpander or LICM will hoist loop invariants out of the
1117 /// loop. For PHI nodes, there may be multiple uses, so compute the nearest
1118 /// common dominator for the incoming blocks. A nullptr can be returned if no
1119 /// viable location is found: it may happen if User is a PHI and Def only comes
1120 /// to this PHI from unreachable blocks.
getInsertPointForUses(Instruction * User,Value * Def,DominatorTree * DT,LoopInfo * LI)1121 static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
1122 DominatorTree *DT, LoopInfo *LI) {
1123 PHINode *PHI = dyn_cast<PHINode>(User);
1124 if (!PHI)
1125 return User;
1126
1127 Instruction *InsertPt = nullptr;
1128 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
1129 if (PHI->getIncomingValue(i) != Def)
1130 continue;
1131
1132 BasicBlock *InsertBB = PHI->getIncomingBlock(i);
1133
1134 if (!DT->isReachableFromEntry(InsertBB))
1135 continue;
1136
1137 if (!InsertPt) {
1138 InsertPt = InsertBB->getTerminator();
1139 continue;
1140 }
1141 InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
1142 InsertPt = InsertBB->getTerminator();
1143 }
1144
1145 // If we have skipped all inputs, it means that Def only comes to Phi from
1146 // unreachable blocks.
1147 if (!InsertPt)
1148 return nullptr;
1149
1150 auto *DefI = dyn_cast<Instruction>(Def);
1151 if (!DefI)
1152 return InsertPt;
1153
1154 assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
1155
1156 auto *L = LI->getLoopFor(DefI->getParent());
1157 assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
1158
1159 for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
1160 if (LI->getLoopFor(DTN->getBlock()) == L)
1161 return DTN->getBlock()->getTerminator();
1162
1163 llvm_unreachable("DefI dominates InsertPt!");
1164 }
1165
WidenIV(const WideIVInfo & WI,LoopInfo * LInfo,ScalarEvolution * SEv,DominatorTree * DTree,SmallVectorImpl<WeakTrackingVH> & DI,bool HasGuards,bool UsePostIncrementRanges)1166 WidenIV::WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
1167 DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI,
1168 bool HasGuards, bool UsePostIncrementRanges)
1169 : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
1170 L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
1171 HasGuards(HasGuards), UsePostIncrementRanges(UsePostIncrementRanges),
1172 DeadInsts(DI) {
1173 assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
1174 ExtendKindMap[OrigPhi] = WI.IsSigned ? ExtendKind::Sign : ExtendKind::Zero;
1175 }
1176
createExtendInst(Value * NarrowOper,Type * WideType,bool IsSigned,Instruction * Use)1177 Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
1178 bool IsSigned, Instruction *Use) {
1179 // Set the debug location and conservative insertion point.
1180 IRBuilder<> Builder(Use);
1181 // Hoist the insertion point into loop preheaders as far as possible.
1182 for (const Loop *L = LI->getLoopFor(Use->getParent());
1183 L && L->getLoopPreheader() && L->isLoopInvariant(NarrowOper);
1184 L = L->getParentLoop())
1185 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
1186
1187 return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
1188 Builder.CreateZExt(NarrowOper, WideType);
1189 }
1190
1191 /// Instantiate a wide operation to replace a narrow operation. This only needs
1192 /// to handle operations that can evaluation to SCEVAddRec. It can safely return
1193 /// 0 for any operation we decide not to clone.
cloneIVUser(WidenIV::NarrowIVDefUse DU,const SCEVAddRecExpr * WideAR)1194 Instruction *WidenIV::cloneIVUser(WidenIV::NarrowIVDefUse DU,
1195 const SCEVAddRecExpr *WideAR) {
1196 unsigned Opcode = DU.NarrowUse->getOpcode();
1197 switch (Opcode) {
1198 default:
1199 return nullptr;
1200 case Instruction::Add:
1201 case Instruction::Mul:
1202 case Instruction::UDiv:
1203 case Instruction::Sub:
1204 return cloneArithmeticIVUser(DU, WideAR);
1205
1206 case Instruction::And:
1207 case Instruction::Or:
1208 case Instruction::Xor:
1209 case Instruction::Shl:
1210 case Instruction::LShr:
1211 case Instruction::AShr:
1212 return cloneBitwiseIVUser(DU);
1213 }
1214 }
1215
cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU)1216 Instruction *WidenIV::cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU) {
1217 Instruction *NarrowUse = DU.NarrowUse;
1218 Instruction *NarrowDef = DU.NarrowDef;
1219 Instruction *WideDef = DU.WideDef;
1220
1221 LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
1222
1223 // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
1224 // about the narrow operand yet so must insert a [sz]ext. It is probably loop
1225 // invariant and will be folded or hoisted. If it actually comes from a
1226 // widened IV, it should be removed during a future call to widenIVUse.
1227 bool IsSigned = getExtendKind(NarrowDef) == ExtendKind::Sign;
1228 Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1229 ? WideDef
1230 : createExtendInst(NarrowUse->getOperand(0), WideType,
1231 IsSigned, NarrowUse);
1232 Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1233 ? WideDef
1234 : createExtendInst(NarrowUse->getOperand(1), WideType,
1235 IsSigned, NarrowUse);
1236
1237 auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1238 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1239 NarrowBO->getName());
1240 IRBuilder<> Builder(NarrowUse);
1241 Builder.Insert(WideBO);
1242 WideBO->copyIRFlags(NarrowBO);
1243 return WideBO;
1244 }
1245
cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,const SCEVAddRecExpr * WideAR)1246 Instruction *WidenIV::cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,
1247 const SCEVAddRecExpr *WideAR) {
1248 Instruction *NarrowUse = DU.NarrowUse;
1249 Instruction *NarrowDef = DU.NarrowDef;
1250 Instruction *WideDef = DU.WideDef;
1251
1252 LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1253
1254 unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
1255
1256 // We're trying to find X such that
1257 //
1258 // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
1259 //
1260 // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
1261 // and check using SCEV if any of them are correct.
1262
1263 // Returns true if extending NonIVNarrowDef according to `SignExt` is a
1264 // correct solution to X.
1265 auto GuessNonIVOperand = [&](bool SignExt) {
1266 const SCEV *WideLHS;
1267 const SCEV *WideRHS;
1268
1269 auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
1270 if (SignExt)
1271 return SE->getSignExtendExpr(S, Ty);
1272 return SE->getZeroExtendExpr(S, Ty);
1273 };
1274
1275 if (IVOpIdx == 0) {
1276 WideLHS = SE->getSCEV(WideDef);
1277 const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
1278 WideRHS = GetExtend(NarrowRHS, WideType);
1279 } else {
1280 const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
1281 WideLHS = GetExtend(NarrowLHS, WideType);
1282 WideRHS = SE->getSCEV(WideDef);
1283 }
1284
1285 // WideUse is "WideDef `op.wide` X" as described in the comment.
1286 const SCEV *WideUse =
1287 getSCEVByOpCode(WideLHS, WideRHS, NarrowUse->getOpcode());
1288
1289 return WideUse == WideAR;
1290 };
1291
1292 bool SignExtend = getExtendKind(NarrowDef) == ExtendKind::Sign;
1293 if (!GuessNonIVOperand(SignExtend)) {
1294 SignExtend = !SignExtend;
1295 if (!GuessNonIVOperand(SignExtend))
1296 return nullptr;
1297 }
1298
1299 Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1300 ? WideDef
1301 : createExtendInst(NarrowUse->getOperand(0), WideType,
1302 SignExtend, NarrowUse);
1303 Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1304 ? WideDef
1305 : createExtendInst(NarrowUse->getOperand(1), WideType,
1306 SignExtend, NarrowUse);
1307
1308 auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1309 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1310 NarrowBO->getName());
1311
1312 IRBuilder<> Builder(NarrowUse);
1313 Builder.Insert(WideBO);
1314 WideBO->copyIRFlags(NarrowBO);
1315 return WideBO;
1316 }
1317
getExtendKind(Instruction * I)1318 WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
1319 auto It = ExtendKindMap.find(I);
1320 assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
1321 return It->second;
1322 }
1323
getSCEVByOpCode(const SCEV * LHS,const SCEV * RHS,unsigned OpCode) const1324 const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1325 unsigned OpCode) const {
1326 switch (OpCode) {
1327 case Instruction::Add:
1328 return SE->getAddExpr(LHS, RHS);
1329 case Instruction::Sub:
1330 return SE->getMinusSCEV(LHS, RHS);
1331 case Instruction::Mul:
1332 return SE->getMulExpr(LHS, RHS);
1333 case Instruction::UDiv:
1334 return SE->getUDivExpr(LHS, RHS);
1335 default:
1336 llvm_unreachable("Unsupported opcode.");
1337 };
1338 }
1339
1340 /// No-wrap operations can transfer sign extension of their result to their
1341 /// operands. Generate the SCEV value for the widened operation without
1342 /// actually modifying the IR yet. If the expression after extending the
1343 /// operands is an AddRec for this loop, return the AddRec and the kind of
1344 /// extension used.
1345 WidenIV::WidenedRecTy
getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU)1346 WidenIV::getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU) {
1347 // Handle the common case of add<nsw/nuw>
1348 const unsigned OpCode = DU.NarrowUse->getOpcode();
1349 // Only Add/Sub/Mul instructions supported yet.
1350 if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
1351 OpCode != Instruction::Mul)
1352 return {nullptr, ExtendKind::Unknown};
1353
1354 // One operand (NarrowDef) has already been extended to WideDef. Now determine
1355 // if extending the other will lead to a recurrence.
1356 const unsigned ExtendOperIdx =
1357 DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
1358 assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
1359
1360 const SCEV *ExtendOperExpr = nullptr;
1361 const OverflowingBinaryOperator *OBO =
1362 cast<OverflowingBinaryOperator>(DU.NarrowUse);
1363 ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
1364 if (ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap())
1365 ExtendOperExpr = SE->getSignExtendExpr(
1366 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
1367 else if (ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap())
1368 ExtendOperExpr = SE->getZeroExtendExpr(
1369 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
1370 else
1371 return {nullptr, ExtendKind::Unknown};
1372
1373 // When creating this SCEV expr, don't apply the current operations NSW or NUW
1374 // flags. This instruction may be guarded by control flow that the no-wrap
1375 // behavior depends on. Non-control-equivalent instructions can be mapped to
1376 // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
1377 // semantics to those operations.
1378 const SCEV *lhs = SE->getSCEV(DU.WideDef);
1379 const SCEV *rhs = ExtendOperExpr;
1380
1381 // Let's swap operands to the initial order for the case of non-commutative
1382 // operations, like SUB. See PR21014.
1383 if (ExtendOperIdx == 0)
1384 std::swap(lhs, rhs);
1385 const SCEVAddRecExpr *AddRec =
1386 dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode));
1387
1388 if (!AddRec || AddRec->getLoop() != L)
1389 return {nullptr, ExtendKind::Unknown};
1390
1391 return {AddRec, ExtKind};
1392 }
1393
1394 /// Is this instruction potentially interesting for further simplification after
1395 /// widening it's type? In other words, can the extend be safely hoisted out of
1396 /// the loop with SCEV reducing the value to a recurrence on the same loop. If
1397 /// so, return the extended recurrence and the kind of extension used. Otherwise
1398 /// return {nullptr, ExtendKind::Unknown}.
getWideRecurrence(WidenIV::NarrowIVDefUse DU)1399 WidenIV::WidenedRecTy WidenIV::getWideRecurrence(WidenIV::NarrowIVDefUse DU) {
1400 if (!DU.NarrowUse->getType()->isIntegerTy())
1401 return {nullptr, ExtendKind::Unknown};
1402
1403 const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
1404 if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
1405 SE->getTypeSizeInBits(WideType)) {
1406 // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
1407 // index. So don't follow this use.
1408 return {nullptr, ExtendKind::Unknown};
1409 }
1410
1411 const SCEV *WideExpr;
1412 ExtendKind ExtKind;
1413 if (DU.NeverNegative) {
1414 WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1415 if (isa<SCEVAddRecExpr>(WideExpr))
1416 ExtKind = ExtendKind::Sign;
1417 else {
1418 WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1419 ExtKind = ExtendKind::Zero;
1420 }
1421 } else if (getExtendKind(DU.NarrowDef) == ExtendKind::Sign) {
1422 WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1423 ExtKind = ExtendKind::Sign;
1424 } else {
1425 WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1426 ExtKind = ExtendKind::Zero;
1427 }
1428 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
1429 if (!AddRec || AddRec->getLoop() != L)
1430 return {nullptr, ExtendKind::Unknown};
1431 return {AddRec, ExtKind};
1432 }
1433
1434 /// This IV user cannot be widened. Replace this use of the original narrow IV
1435 /// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
truncateIVUse(WidenIV::NarrowIVDefUse DU,DominatorTree * DT,LoopInfo * LI)1436 static void truncateIVUse(WidenIV::NarrowIVDefUse DU, DominatorTree *DT,
1437 LoopInfo *LI) {
1438 auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
1439 if (!InsertPt)
1440 return;
1441 LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
1442 << *DU.NarrowUse << "\n");
1443 IRBuilder<> Builder(InsertPt);
1444 Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
1445 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
1446 }
1447
1448 /// If the narrow use is a compare instruction, then widen the compare
1449 // (and possibly the other operand). The extend operation is hoisted into the
1450 // loop preheader as far as possible.
widenLoopCompare(WidenIV::NarrowIVDefUse DU)1451 bool WidenIV::widenLoopCompare(WidenIV::NarrowIVDefUse DU) {
1452 ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
1453 if (!Cmp)
1454 return false;
1455
1456 // We can legally widen the comparison in the following two cases:
1457 //
1458 // - The signedness of the IV extension and comparison match
1459 //
1460 // - The narrow IV is always positive (and thus its sign extension is equal
1461 // to its zero extension). For instance, let's say we're zero extending
1462 // %narrow for the following use
1463 //
1464 // icmp slt i32 %narrow, %val ... (A)
1465 //
1466 // and %narrow is always positive. Then
1467 //
1468 // (A) == icmp slt i32 sext(%narrow), sext(%val)
1469 // == icmp slt i32 zext(%narrow), sext(%val)
1470 bool IsSigned = getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
1471 if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
1472 return false;
1473
1474 Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
1475 unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
1476 unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1477 assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
1478
1479 // Widen the compare instruction.
1480 auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
1481 if (!InsertPt)
1482 return false;
1483 IRBuilder<> Builder(InsertPt);
1484 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1485
1486 // Widen the other operand of the compare, if necessary.
1487 if (CastWidth < IVWidth) {
1488 Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
1489 DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
1490 }
1491 return true;
1492 }
1493
1494 // The widenIVUse avoids generating trunc by evaluating the use as AddRec, this
1495 // will not work when:
1496 // 1) SCEV traces back to an instruction inside the loop that SCEV can not
1497 // expand, eg. add %indvar, (load %addr)
1498 // 2) SCEV finds a loop variant, eg. add %indvar, %loopvariant
1499 // While SCEV fails to avoid trunc, we can still try to use instruction
1500 // combining approach to prove trunc is not required. This can be further
1501 // extended with other instruction combining checks, but for now we handle the
1502 // following case (sub can be "add" and "mul", "nsw + sext" can be "nus + zext")
1503 //
1504 // Src:
1505 // %c = sub nsw %b, %indvar
1506 // %d = sext %c to i64
1507 // Dst:
1508 // %indvar.ext1 = sext %indvar to i64
1509 // %m = sext %b to i64
1510 // %d = sub nsw i64 %m, %indvar.ext1
1511 // Therefore, as long as the result of add/sub/mul is extended to wide type, no
1512 // trunc is required regardless of how %b is generated. This pattern is common
1513 // when calculating address in 64 bit architecture
widenWithVariantUse(WidenIV::NarrowIVDefUse DU)1514 bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
1515 Instruction *NarrowUse = DU.NarrowUse;
1516 Instruction *NarrowDef = DU.NarrowDef;
1517 Instruction *WideDef = DU.WideDef;
1518
1519 // Handle the common case of add<nsw/nuw>
1520 const unsigned OpCode = NarrowUse->getOpcode();
1521 // Only Add/Sub/Mul instructions are supported.
1522 if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
1523 OpCode != Instruction::Mul)
1524 return false;
1525
1526 // The operand that is not defined by NarrowDef of DU. Let's call it the
1527 // other operand.
1528 assert((NarrowUse->getOperand(0) == NarrowDef ||
1529 NarrowUse->getOperand(1) == NarrowDef) &&
1530 "bad DU");
1531
1532 const OverflowingBinaryOperator *OBO =
1533 cast<OverflowingBinaryOperator>(NarrowUse);
1534 ExtendKind ExtKind = getExtendKind(NarrowDef);
1535 bool CanSignExtend = ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap();
1536 bool CanZeroExtend = ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap();
1537 auto AnotherOpExtKind = ExtKind;
1538
1539 // Check that all uses are either:
1540 // - narrow def (in case of we are widening the IV increment);
1541 // - single-input LCSSA Phis;
1542 // - comparison of the chosen type;
1543 // - extend of the chosen type (raison d'etre).
1544 SmallVector<Instruction *, 4> ExtUsers;
1545 SmallVector<PHINode *, 4> LCSSAPhiUsers;
1546 SmallVector<ICmpInst *, 4> ICmpUsers;
1547 for (Use &U : NarrowUse->uses()) {
1548 Instruction *User = cast<Instruction>(U.getUser());
1549 if (User == NarrowDef)
1550 continue;
1551 if (!L->contains(User)) {
1552 auto *LCSSAPhi = cast<PHINode>(User);
1553 // Make sure there is only 1 input, so that we don't have to split
1554 // critical edges.
1555 if (LCSSAPhi->getNumOperands() != 1)
1556 return false;
1557 LCSSAPhiUsers.push_back(LCSSAPhi);
1558 continue;
1559 }
1560 if (auto *ICmp = dyn_cast<ICmpInst>(User)) {
1561 auto Pred = ICmp->getPredicate();
1562 // We have 3 types of predicates: signed, unsigned and equality
1563 // predicates. For equality, it's legal to widen icmp for either sign and
1564 // zero extend. For sign extend, we can also do so for signed predicates,
1565 // likeweise for zero extend we can widen icmp for unsigned predicates.
1566 if (ExtKind == ExtendKind::Zero && ICmpInst::isSigned(Pred))
1567 return false;
1568 if (ExtKind == ExtendKind::Sign && ICmpInst::isUnsigned(Pred))
1569 return false;
1570 ICmpUsers.push_back(ICmp);
1571 continue;
1572 }
1573 if (ExtKind == ExtendKind::Sign)
1574 User = dyn_cast<SExtInst>(User);
1575 else
1576 User = dyn_cast<ZExtInst>(User);
1577 if (!User || User->getType() != WideType)
1578 return false;
1579 ExtUsers.push_back(User);
1580 }
1581 if (ExtUsers.empty()) {
1582 DeadInsts.emplace_back(NarrowUse);
1583 return true;
1584 }
1585
1586 // We'll prove some facts that should be true in the context of ext users. If
1587 // there is no users, we are done now. If there are some, pick their common
1588 // dominator as context.
1589 const Instruction *CtxI = findCommonDominator(ExtUsers, *DT);
1590
1591 if (!CanSignExtend && !CanZeroExtend) {
1592 // Because InstCombine turns 'sub nuw' to 'add' losing the no-wrap flag, we
1593 // will most likely not see it. Let's try to prove it.
1594 if (OpCode != Instruction::Add)
1595 return false;
1596 if (ExtKind != ExtendKind::Zero)
1597 return false;
1598 const SCEV *LHS = SE->getSCEV(OBO->getOperand(0));
1599 const SCEV *RHS = SE->getSCEV(OBO->getOperand(1));
1600 // TODO: Support case for NarrowDef = NarrowUse->getOperand(1).
1601 if (NarrowUse->getOperand(0) != NarrowDef)
1602 return false;
1603 if (!SE->isKnownNegative(RHS))
1604 return false;
1605 bool ProvedSubNUW = SE->isKnownPredicateAt(ICmpInst::ICMP_UGE, LHS,
1606 SE->getNegativeSCEV(RHS), CtxI);
1607 if (!ProvedSubNUW)
1608 return false;
1609 // In fact, our 'add' is 'sub nuw'. We will need to widen the 2nd operand as
1610 // neg(zext(neg(op))), which is basically sext(op).
1611 AnotherOpExtKind = ExtendKind::Sign;
1612 }
1613
1614 // Verifying that Defining operand is an AddRec
1615 const SCEV *Op1 = SE->getSCEV(WideDef);
1616 const SCEVAddRecExpr *AddRecOp1 = dyn_cast<SCEVAddRecExpr>(Op1);
1617 if (!AddRecOp1 || AddRecOp1->getLoop() != L)
1618 return false;
1619
1620 LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1621
1622 // Generating a widening use instruction.
1623 Value *LHS =
1624 (NarrowUse->getOperand(0) == NarrowDef)
1625 ? WideDef
1626 : createExtendInst(NarrowUse->getOperand(0), WideType,
1627 AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
1628 Value *RHS =
1629 (NarrowUse->getOperand(1) == NarrowDef)
1630 ? WideDef
1631 : createExtendInst(NarrowUse->getOperand(1), WideType,
1632 AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
1633
1634 auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1635 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1636 NarrowBO->getName());
1637 IRBuilder<> Builder(NarrowUse);
1638 Builder.Insert(WideBO);
1639 WideBO->copyIRFlags(NarrowBO);
1640 ExtendKindMap[NarrowUse] = ExtKind;
1641
1642 for (Instruction *User : ExtUsers) {
1643 assert(User->getType() == WideType && "Checked before!");
1644 LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *User << " replaced by "
1645 << *WideBO << "\n");
1646 ++NumElimExt;
1647 User->replaceAllUsesWith(WideBO);
1648 DeadInsts.emplace_back(User);
1649 }
1650
1651 for (PHINode *User : LCSSAPhiUsers) {
1652 assert(User->getNumOperands() == 1 && "Checked before!");
1653 Builder.SetInsertPoint(User);
1654 auto *WidePN =
1655 Builder.CreatePHI(WideBO->getType(), 1, User->getName() + ".wide");
1656 BasicBlock *LoopExitingBlock = User->getParent()->getSinglePredecessor();
1657 assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
1658 "Not a LCSSA Phi?");
1659 WidePN->addIncoming(WideBO, LoopExitingBlock);
1660 Builder.SetInsertPoint(&*User->getParent()->getFirstInsertionPt());
1661 auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
1662 User->replaceAllUsesWith(TruncPN);
1663 DeadInsts.emplace_back(User);
1664 }
1665
1666 for (ICmpInst *User : ICmpUsers) {
1667 Builder.SetInsertPoint(User);
1668 auto ExtendedOp = [&](Value * V)->Value * {
1669 if (V == NarrowUse)
1670 return WideBO;
1671 if (ExtKind == ExtendKind::Zero)
1672 return Builder.CreateZExt(V, WideBO->getType());
1673 else
1674 return Builder.CreateSExt(V, WideBO->getType());
1675 };
1676 auto Pred = User->getPredicate();
1677 auto *LHS = ExtendedOp(User->getOperand(0));
1678 auto *RHS = ExtendedOp(User->getOperand(1));
1679 auto *WideCmp =
1680 Builder.CreateICmp(Pred, LHS, RHS, User->getName() + ".wide");
1681 User->replaceAllUsesWith(WideCmp);
1682 DeadInsts.emplace_back(User);
1683 }
1684
1685 return true;
1686 }
1687
1688 /// Determine whether an individual user of the narrow IV can be widened. If so,
1689 /// return the wide clone of the user.
widenIVUse(WidenIV::NarrowIVDefUse DU,SCEVExpander & Rewriter)1690 Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU, SCEVExpander &Rewriter) {
1691 assert(ExtendKindMap.count(DU.NarrowDef) &&
1692 "Should already know the kind of extension used to widen NarrowDef");
1693
1694 // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
1695 if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
1696 if (LI->getLoopFor(UsePhi->getParent()) != L) {
1697 // For LCSSA phis, sink the truncate outside the loop.
1698 // After SimplifyCFG most loop exit targets have a single predecessor.
1699 // Otherwise fall back to a truncate within the loop.
1700 if (UsePhi->getNumOperands() != 1)
1701 truncateIVUse(DU, DT, LI);
1702 else {
1703 // Widening the PHI requires us to insert a trunc. The logical place
1704 // for this trunc is in the same BB as the PHI. This is not possible if
1705 // the BB is terminated by a catchswitch.
1706 if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
1707 return nullptr;
1708
1709 PHINode *WidePhi =
1710 PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
1711 UsePhi);
1712 WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
1713 IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt());
1714 Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType());
1715 UsePhi->replaceAllUsesWith(Trunc);
1716 DeadInsts.emplace_back(UsePhi);
1717 LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
1718 << *WidePhi << "\n");
1719 }
1720 return nullptr;
1721 }
1722 }
1723
1724 // This narrow use can be widened by a sext if it's non-negative or its narrow
1725 // def was widended by a sext. Same for zext.
1726 auto canWidenBySExt = [&]() {
1727 return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
1728 };
1729 auto canWidenByZExt = [&]() {
1730 return DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Zero;
1731 };
1732
1733 // Our raison d'etre! Eliminate sign and zero extension.
1734 if ((isa<SExtInst>(DU.NarrowUse) && canWidenBySExt()) ||
1735 (isa<ZExtInst>(DU.NarrowUse) && canWidenByZExt())) {
1736 Value *NewDef = DU.WideDef;
1737 if (DU.NarrowUse->getType() != WideType) {
1738 unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
1739 unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1740 if (CastWidth < IVWidth) {
1741 // The cast isn't as wide as the IV, so insert a Trunc.
1742 IRBuilder<> Builder(DU.NarrowUse);
1743 NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
1744 }
1745 else {
1746 // A wider extend was hidden behind a narrower one. This may induce
1747 // another round of IV widening in which the intermediate IV becomes
1748 // dead. It should be very rare.
1749 LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
1750 << " not wide enough to subsume " << *DU.NarrowUse
1751 << "\n");
1752 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1753 NewDef = DU.NarrowUse;
1754 }
1755 }
1756 if (NewDef != DU.NarrowUse) {
1757 LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
1758 << " replaced by " << *DU.WideDef << "\n");
1759 ++NumElimExt;
1760 DU.NarrowUse->replaceAllUsesWith(NewDef);
1761 DeadInsts.emplace_back(DU.NarrowUse);
1762 }
1763 // Now that the extend is gone, we want to expose it's uses for potential
1764 // further simplification. We don't need to directly inform SimplifyIVUsers
1765 // of the new users, because their parent IV will be processed later as a
1766 // new loop phi. If we preserved IVUsers analysis, we would also want to
1767 // push the uses of WideDef here.
1768
1769 // No further widening is needed. The deceased [sz]ext had done it for us.
1770 return nullptr;
1771 }
1772
1773 // Does this user itself evaluate to a recurrence after widening?
1774 WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
1775 if (!WideAddRec.first)
1776 WideAddRec = getWideRecurrence(DU);
1777
1778 assert((WideAddRec.first == nullptr) ==
1779 (WideAddRec.second == ExtendKind::Unknown));
1780 if (!WideAddRec.first) {
1781 // If use is a loop condition, try to promote the condition instead of
1782 // truncating the IV first.
1783 if (widenLoopCompare(DU))
1784 return nullptr;
1785
1786 // We are here about to generate a truncate instruction that may hurt
1787 // performance because the scalar evolution expression computed earlier
1788 // in WideAddRec.first does not indicate a polynomial induction expression.
1789 // In that case, look at the operands of the use instruction to determine
1790 // if we can still widen the use instead of truncating its operand.
1791 if (widenWithVariantUse(DU))
1792 return nullptr;
1793
1794 // This user does not evaluate to a recurrence after widening, so don't
1795 // follow it. Instead insert a Trunc to kill off the original use,
1796 // eventually isolating the original narrow IV so it can be removed.
1797 truncateIVUse(DU, DT, LI);
1798 return nullptr;
1799 }
1800
1801 // Reuse the IV increment that SCEVExpander created as long as it dominates
1802 // NarrowUse.
1803 Instruction *WideUse = nullptr;
1804 if (WideAddRec.first == WideIncExpr &&
1805 Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
1806 WideUse = WideInc;
1807 else {
1808 WideUse = cloneIVUser(DU, WideAddRec.first);
1809 if (!WideUse)
1810 return nullptr;
1811 }
1812 // Evaluation of WideAddRec ensured that the narrow expression could be
1813 // extended outside the loop without overflow. This suggests that the wide use
1814 // evaluates to the same expression as the extended narrow use, but doesn't
1815 // absolutely guarantee it. Hence the following failsafe check. In rare cases
1816 // where it fails, we simply throw away the newly created wide use.
1817 if (WideAddRec.first != SE->getSCEV(WideUse)) {
1818 LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
1819 << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
1820 << "\n");
1821 DeadInsts.emplace_back(WideUse);
1822 return nullptr;
1823 }
1824
1825 // if we reached this point then we are going to replace
1826 // DU.NarrowUse with WideUse. Reattach DbgValue then.
1827 replaceAllDbgUsesWith(*DU.NarrowUse, *WideUse, *WideUse, *DT);
1828
1829 ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
1830 // Returning WideUse pushes it on the worklist.
1831 return WideUse;
1832 }
1833
1834 /// Add eligible users of NarrowDef to NarrowIVUsers.
pushNarrowIVUsers(Instruction * NarrowDef,Instruction * WideDef)1835 void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
1836 const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
1837 bool NonNegativeDef =
1838 SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
1839 SE->getZero(NarrowSCEV->getType()));
1840 for (User *U : NarrowDef->users()) {
1841 Instruction *NarrowUser = cast<Instruction>(U);
1842
1843 // Handle data flow merges and bizarre phi cycles.
1844 if (!Widened.insert(NarrowUser).second)
1845 continue;
1846
1847 bool NonNegativeUse = false;
1848 if (!NonNegativeDef) {
1849 // We might have a control-dependent range information for this context.
1850 if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
1851 NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
1852 }
1853
1854 NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
1855 NonNegativeDef || NonNegativeUse);
1856 }
1857 }
1858
1859 /// Process a single induction variable. First use the SCEVExpander to create a
1860 /// wide induction variable that evaluates to the same recurrence as the
1861 /// original narrow IV. Then use a worklist to forward traverse the narrow IV's
1862 /// def-use chain. After widenIVUse has processed all interesting IV users, the
1863 /// narrow IV will be isolated for removal by DeleteDeadPHIs.
1864 ///
1865 /// It would be simpler to delete uses as they are processed, but we must avoid
1866 /// invalidating SCEV expressions.
createWideIV(SCEVExpander & Rewriter)1867 PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
1868 // Is this phi an induction variable?
1869 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
1870 if (!AddRec)
1871 return nullptr;
1872
1873 // Widen the induction variable expression.
1874 const SCEV *WideIVExpr = getExtendKind(OrigPhi) == ExtendKind::Sign
1875 ? SE->getSignExtendExpr(AddRec, WideType)
1876 : SE->getZeroExtendExpr(AddRec, WideType);
1877
1878 assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
1879 "Expect the new IV expression to preserve its type");
1880
1881 // Can the IV be extended outside the loop without overflow?
1882 AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
1883 if (!AddRec || AddRec->getLoop() != L)
1884 return nullptr;
1885
1886 // An AddRec must have loop-invariant operands. Since this AddRec is
1887 // materialized by a loop header phi, the expression cannot have any post-loop
1888 // operands, so they must dominate the loop header.
1889 assert(
1890 SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
1891 SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
1892 "Loop header phi recurrence inputs do not dominate the loop");
1893
1894 // Iterate over IV uses (including transitive ones) looking for IV increments
1895 // of the form 'add nsw %iv, <const>'. For each increment and each use of
1896 // the increment calculate control-dependent range information basing on
1897 // dominating conditions inside of the loop (e.g. a range check inside of the
1898 // loop). Calculated ranges are stored in PostIncRangeInfos map.
1899 //
1900 // Control-dependent range information is later used to prove that a narrow
1901 // definition is not negative (see pushNarrowIVUsers). It's difficult to do
1902 // this on demand because when pushNarrowIVUsers needs this information some
1903 // of the dominating conditions might be already widened.
1904 if (UsePostIncrementRanges)
1905 calculatePostIncRanges(OrigPhi);
1906
1907 // The rewriter provides a value for the desired IV expression. This may
1908 // either find an existing phi or materialize a new one. Either way, we
1909 // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
1910 // of the phi-SCC dominates the loop entry.
1911 Instruction *InsertPt = &*L->getHeader()->getFirstInsertionPt();
1912 Value *ExpandInst = Rewriter.expandCodeFor(AddRec, WideType, InsertPt);
1913 // If the wide phi is not a phi node, for example a cast node, like bitcast,
1914 // inttoptr, ptrtoint, just skip for now.
1915 if (!(WidePhi = dyn_cast<PHINode>(ExpandInst))) {
1916 // if the cast node is an inserted instruction without any user, we should
1917 // remove it to make sure the pass don't touch the function as we can not
1918 // wide the phi.
1919 if (ExpandInst->hasNUses(0) &&
1920 Rewriter.isInsertedInstruction(cast<Instruction>(ExpandInst)))
1921 DeadInsts.emplace_back(ExpandInst);
1922 return nullptr;
1923 }
1924
1925 // Remembering the WideIV increment generated by SCEVExpander allows
1926 // widenIVUse to reuse it when widening the narrow IV's increment. We don't
1927 // employ a general reuse mechanism because the call above is the only call to
1928 // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
1929 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1930 WideInc =
1931 cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
1932 WideIncExpr = SE->getSCEV(WideInc);
1933 // Propagate the debug location associated with the original loop increment
1934 // to the new (widened) increment.
1935 auto *OrigInc =
1936 cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
1937 WideInc->setDebugLoc(OrigInc->getDebugLoc());
1938 }
1939
1940 LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
1941 ++NumWidened;
1942
1943 // Traverse the def-use chain using a worklist starting at the original IV.
1944 assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
1945
1946 Widened.insert(OrigPhi);
1947 pushNarrowIVUsers(OrigPhi, WidePhi);
1948
1949 while (!NarrowIVUsers.empty()) {
1950 WidenIV::NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
1951
1952 // Process a def-use edge. This may replace the use, so don't hold a
1953 // use_iterator across it.
1954 Instruction *WideUse = widenIVUse(DU, Rewriter);
1955
1956 // Follow all def-use edges from the previous narrow use.
1957 if (WideUse)
1958 pushNarrowIVUsers(DU.NarrowUse, WideUse);
1959
1960 // widenIVUse may have removed the def-use edge.
1961 if (DU.NarrowDef->use_empty())
1962 DeadInsts.emplace_back(DU.NarrowDef);
1963 }
1964
1965 // Attach any debug information to the new PHI.
1966 replaceAllDbgUsesWith(*OrigPhi, *WidePhi, *WidePhi, *DT);
1967
1968 return WidePhi;
1969 }
1970
1971 /// Calculates control-dependent range for the given def at the given context
1972 /// by looking at dominating conditions inside of the loop
calculatePostIncRange(Instruction * NarrowDef,Instruction * NarrowUser)1973 void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
1974 Instruction *NarrowUser) {
1975 using namespace llvm::PatternMatch;
1976
1977 Value *NarrowDefLHS;
1978 const APInt *NarrowDefRHS;
1979 if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
1980 m_APInt(NarrowDefRHS))) ||
1981 !NarrowDefRHS->isNonNegative())
1982 return;
1983
1984 auto UpdateRangeFromCondition = [&] (Value *Condition,
1985 bool TrueDest) {
1986 CmpInst::Predicate Pred;
1987 Value *CmpRHS;
1988 if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
1989 m_Value(CmpRHS))))
1990 return;
1991
1992 CmpInst::Predicate P =
1993 TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
1994
1995 auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
1996 auto CmpConstrainedLHSRange =
1997 ConstantRange::makeAllowedICmpRegion(P, CmpRHSRange);
1998 auto NarrowDefRange = CmpConstrainedLHSRange.addWithNoWrap(
1999 *NarrowDefRHS, OverflowingBinaryOperator::NoSignedWrap);
2000
2001 updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
2002 };
2003
2004 auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
2005 if (!HasGuards)
2006 return;
2007
2008 for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
2009 Ctx->getParent()->rend())) {
2010 Value *C = nullptr;
2011 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
2012 UpdateRangeFromCondition(C, /*TrueDest=*/true);
2013 }
2014 };
2015
2016 UpdateRangeFromGuards(NarrowUser);
2017
2018 BasicBlock *NarrowUserBB = NarrowUser->getParent();
2019 // If NarrowUserBB is statically unreachable asking dominator queries may
2020 // yield surprising results. (e.g. the block may not have a dom tree node)
2021 if (!DT->isReachableFromEntry(NarrowUserBB))
2022 return;
2023
2024 for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
2025 L->contains(DTB->getBlock());
2026 DTB = DTB->getIDom()) {
2027 auto *BB = DTB->getBlock();
2028 auto *TI = BB->getTerminator();
2029 UpdateRangeFromGuards(TI);
2030
2031 auto *BI = dyn_cast<BranchInst>(TI);
2032 if (!BI || !BI->isConditional())
2033 continue;
2034
2035 auto *TrueSuccessor = BI->getSuccessor(0);
2036 auto *FalseSuccessor = BI->getSuccessor(1);
2037
2038 auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
2039 return BBE.isSingleEdge() &&
2040 DT->dominates(BBE, NarrowUser->getParent());
2041 };
2042
2043 if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
2044 UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
2045
2046 if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
2047 UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
2048 }
2049 }
2050
2051 /// Calculates PostIncRangeInfos map for the given IV
calculatePostIncRanges(PHINode * OrigPhi)2052 void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
2053 SmallPtrSet<Instruction *, 16> Visited;
2054 SmallVector<Instruction *, 6> Worklist;
2055 Worklist.push_back(OrigPhi);
2056 Visited.insert(OrigPhi);
2057
2058 while (!Worklist.empty()) {
2059 Instruction *NarrowDef = Worklist.pop_back_val();
2060
2061 for (Use &U : NarrowDef->uses()) {
2062 auto *NarrowUser = cast<Instruction>(U.getUser());
2063
2064 // Don't go looking outside the current loop.
2065 auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
2066 if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
2067 continue;
2068
2069 if (!Visited.insert(NarrowUser).second)
2070 continue;
2071
2072 Worklist.push_back(NarrowUser);
2073
2074 calculatePostIncRange(NarrowDef, NarrowUser);
2075 }
2076 }
2077 }
2078
createWideIV(const WideIVInfo & WI,LoopInfo * LI,ScalarEvolution * SE,SCEVExpander & Rewriter,DominatorTree * DT,SmallVectorImpl<WeakTrackingVH> & DeadInsts,unsigned & NumElimExt,unsigned & NumWidened,bool HasGuards,bool UsePostIncrementRanges)2079 PHINode *llvm::createWideIV(const WideIVInfo &WI,
2080 LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
2081 DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2082 unsigned &NumElimExt, unsigned &NumWidened,
2083 bool HasGuards, bool UsePostIncrementRanges) {
2084 WidenIV Widener(WI, LI, SE, DT, DeadInsts, HasGuards, UsePostIncrementRanges);
2085 PHINode *WidePHI = Widener.createWideIV(Rewriter);
2086 NumElimExt = Widener.getNumElimExt();
2087 NumWidened = Widener.getNumWidened();
2088 return WidePHI;
2089 }
2090