1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into forms suitable for efficient execution
12 // on the target.
13 //
14 // This pass performs a strength reduction on array references inside loops that
15 // have as one or more of their components the loop induction variable, it
16 // rewrites expressions to take advantage of scaled-index addressing modes
17 // available on the target, and it performs a variety of other optimizations
18 // related to loop induction variables.
19 //
20 // Terminology note: this code has a lot of handling for "post-increment" or
21 // "post-inc" users. This is not talking about post-increment addressing modes;
22 // it is instead talking about code like this:
23 //
24 //   %i = phi [ 0, %entry ], [ %i.next, %latch ]
25 //   ...
26 //   %i.next = add %i, 1
27 //   %c = icmp eq %i.next, %n
28 //
29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
30 // it's useful to think about these as the same register, with some uses using
31 // the value of the register before the add and some using // it after. In this
32 // example, the icmp is a post-increment user, since it uses %i.next, which is
33 // the value of the induction variable after the increment. The other common
34 // case of post-increment users is users outside the loop.
35 //
36 // TODO: More sophistication in the way Formulae are generated and filtered.
37 //
38 // TODO: Handle multiple loops at a time.
39 //
40 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead
41 //       of a GlobalValue?
42 //
43 // TODO: When truncation is free, truncate ICmp users' operands to make it a
44 //       smaller encoding (on x86 at least).
45 //
46 // TODO: When a negated register is used by an add (such as in a list of
47 //       multiple base registers, or as the increment expression in an addrec),
48 //       we may not actually need both reg and (-1 * reg) in registers; the
49 //       negation can be implemented by using a sub instead of an add. The
50 //       lack of support for taking this into consideration when making
51 //       register pressure decisions is partly worked around by the "Special"
52 //       use kind.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Scalar.h"
57 #include "llvm/ADT/DenseSet.h"
58 #include "llvm/ADT/Hashing.h"
59 #include "llvm/ADT/STLExtras.h"
60 #include "llvm/ADT/SetVector.h"
61 #include "llvm/ADT/SmallBitVector.h"
62 #include "llvm/Analysis/IVUsers.h"
63 #include "llvm/Analysis/LoopPass.h"
64 #include "llvm/Analysis/ScalarEvolutionExpander.h"
65 #include "llvm/Analysis/TargetTransformInfo.h"
66 #include "llvm/IR/Constants.h"
67 #include "llvm/IR/DerivedTypes.h"
68 #include "llvm/IR/Dominators.h"
69 #include "llvm/IR/Instructions.h"
70 #include "llvm/IR/IntrinsicInst.h"
71 #include "llvm/IR/ValueHandle.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
76 #include "llvm/Transforms/Utils/Local.h"
77 #include <algorithm>
78 using namespace llvm;
79 
80 #define DEBUG_TYPE "loop-reduce"
81 
82 /// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for
83 /// bail out. This threshold is far beyond the number of users that LSR can
84 /// conceivably solve, so it should not affect generated code, but catches the
85 /// worst cases before LSR burns too much compile time and stack space.
86 static const unsigned MaxIVUsers = 200;
87 
88 // Temporary flag to cleanup congruent phis after LSR phi expansion.
89 // It's currently disabled until we can determine whether it's truly useful or
90 // not. The flag should be removed after the v3.0 release.
91 // This is now needed for ivchains.
92 static cl::opt<bool> EnablePhiElim(
93   "enable-lsr-phielim", cl::Hidden, cl::init(true),
94   cl::desc("Enable LSR phi elimination"));
95 
96 #ifndef NDEBUG
97 // Stress test IV chain generation.
98 static cl::opt<bool> StressIVChain(
99   "stress-ivchain", cl::Hidden, cl::init(false),
100   cl::desc("Stress test LSR IV chains"));
101 #else
102 static bool StressIVChain = false;
103 #endif
104 
105 namespace {
106 
107 /// RegSortData - This class holds data which is used to order reuse candidates.
108 class RegSortData {
109 public:
110   /// UsedByIndices - This represents the set of LSRUse indices which reference
111   /// a particular register.
112   SmallBitVector UsedByIndices;
113 
RegSortData()114   RegSortData() {}
115 
116   void print(raw_ostream &OS) const;
117   void dump() const;
118 };
119 
120 }
121 
print(raw_ostream & OS) const122 void RegSortData::print(raw_ostream &OS) const {
123   OS << "[NumUses=" << UsedByIndices.count() << ']';
124 }
125 
126 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const127 void RegSortData::dump() const {
128   print(errs()); errs() << '\n';
129 }
130 #endif
131 
132 namespace {
133 
134 /// RegUseTracker - Map register candidates to information about how they are
135 /// used.
136 class RegUseTracker {
137   typedef DenseMap<const SCEV *, RegSortData> RegUsesTy;
138 
139   RegUsesTy RegUsesMap;
140   SmallVector<const SCEV *, 16> RegSequence;
141 
142 public:
143   void CountRegister(const SCEV *Reg, size_t LUIdx);
144   void DropRegister(const SCEV *Reg, size_t LUIdx);
145   void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx);
146 
147   bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
148 
149   const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
150 
151   void clear();
152 
153   typedef SmallVectorImpl<const SCEV *>::iterator iterator;
154   typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator;
begin()155   iterator begin() { return RegSequence.begin(); }
end()156   iterator end()   { return RegSequence.end(); }
begin() const157   const_iterator begin() const { return RegSequence.begin(); }
end() const158   const_iterator end() const   { return RegSequence.end(); }
159 };
160 
161 }
162 
163 void
CountRegister(const SCEV * Reg,size_t LUIdx)164 RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
165   std::pair<RegUsesTy::iterator, bool> Pair =
166     RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
167   RegSortData &RSD = Pair.first->second;
168   if (Pair.second)
169     RegSequence.push_back(Reg);
170   RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
171   RSD.UsedByIndices.set(LUIdx);
172 }
173 
174 void
DropRegister(const SCEV * Reg,size_t LUIdx)175 RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) {
176   RegUsesTy::iterator It = RegUsesMap.find(Reg);
177   assert(It != RegUsesMap.end());
178   RegSortData &RSD = It->second;
179   assert(RSD.UsedByIndices.size() > LUIdx);
180   RSD.UsedByIndices.reset(LUIdx);
181 }
182 
183 void
SwapAndDropUse(size_t LUIdx,size_t LastLUIdx)184 RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
185   assert(LUIdx <= LastLUIdx);
186 
187   // Update RegUses. The data structure is not optimized for this purpose;
188   // we must iterate through it and update each of the bit vectors.
189   for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end();
190        I != E; ++I) {
191     SmallBitVector &UsedByIndices = I->second.UsedByIndices;
192     if (LUIdx < UsedByIndices.size())
193       UsedByIndices[LUIdx] =
194         LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0;
195     UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
196   }
197 }
198 
199 bool
isRegUsedByUsesOtherThan(const SCEV * Reg,size_t LUIdx) const200 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
201   RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
202   if (I == RegUsesMap.end())
203     return false;
204   const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
205   int i = UsedByIndices.find_first();
206   if (i == -1) return false;
207   if ((size_t)i != LUIdx) return true;
208   return UsedByIndices.find_next(i) != -1;
209 }
210 
getUsedByIndices(const SCEV * Reg) const211 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
212   RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
213   assert(I != RegUsesMap.end() && "Unknown register!");
214   return I->second.UsedByIndices;
215 }
216 
clear()217 void RegUseTracker::clear() {
218   RegUsesMap.clear();
219   RegSequence.clear();
220 }
221 
222 namespace {
223 
224 /// Formula - This class holds information that describes a formula for
225 /// computing satisfying a use. It may include broken-out immediates and scaled
226 /// registers.
227 struct Formula {
228   /// Global base address used for complex addressing.
229   GlobalValue *BaseGV;
230 
231   /// Base offset for complex addressing.
232   int64_t BaseOffset;
233 
234   /// Whether any complex addressing has a base register.
235   bool HasBaseReg;
236 
237   /// The scale of any complex addressing.
238   int64_t Scale;
239 
240   /// BaseRegs - The list of "base" registers for this use. When this is
241   /// non-empty. The canonical representation of a formula is
242   /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and
243   /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty().
244   /// #1 enforces that the scaled register is always used when at least two
245   /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2.
246   /// #2 enforces that 1 * reg is reg.
247   /// This invariant can be temporarly broken while building a formula.
248   /// However, every formula inserted into the LSRInstance must be in canonical
249   /// form.
250   SmallVector<const SCEV *, 4> BaseRegs;
251 
252   /// ScaledReg - The 'scaled' register for this use. This should be non-null
253   /// when Scale is not zero.
254   const SCEV *ScaledReg;
255 
256   /// UnfoldedOffset - An additional constant offset which added near the
257   /// use. This requires a temporary register, but the offset itself can
258   /// live in an add immediate field rather than a register.
259   int64_t UnfoldedOffset;
260 
Formula__anonbd2ddbdf0311::Formula261   Formula()
262       : BaseGV(nullptr), BaseOffset(0), HasBaseReg(false), Scale(0),
263         ScaledReg(nullptr), UnfoldedOffset(0) {}
264 
265   void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
266 
267   bool isCanonical() const;
268 
269   void Canonicalize();
270 
271   bool Unscale();
272 
273   size_t getNumRegs() const;
274   Type *getType() const;
275 
276   void DeleteBaseReg(const SCEV *&S);
277 
278   bool referencesReg(const SCEV *S) const;
279   bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
280                                   const RegUseTracker &RegUses) const;
281 
282   void print(raw_ostream &OS) const;
283   void dump() const;
284 };
285 
286 }
287 
288 /// DoInitialMatch - Recursion helper for InitialMatch.
DoInitialMatch(const SCEV * S,Loop * L,SmallVectorImpl<const SCEV * > & Good,SmallVectorImpl<const SCEV * > & Bad,ScalarEvolution & SE)289 static void DoInitialMatch(const SCEV *S, Loop *L,
290                            SmallVectorImpl<const SCEV *> &Good,
291                            SmallVectorImpl<const SCEV *> &Bad,
292                            ScalarEvolution &SE) {
293   // Collect expressions which properly dominate the loop header.
294   if (SE.properlyDominates(S, L->getHeader())) {
295     Good.push_back(S);
296     return;
297   }
298 
299   // Look at add operands.
300   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
301     for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
302          I != E; ++I)
303       DoInitialMatch(*I, L, Good, Bad, SE);
304     return;
305   }
306 
307   // Look at addrec operands.
308   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
309     if (!AR->getStart()->isZero()) {
310       DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
311       DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
312                                       AR->getStepRecurrence(SE),
313                                       // FIXME: AR->getNoWrapFlags()
314                                       AR->getLoop(), SCEV::FlagAnyWrap),
315                      L, Good, Bad, SE);
316       return;
317     }
318 
319   // Handle a multiplication by -1 (negation) if it didn't fold.
320   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
321     if (Mul->getOperand(0)->isAllOnesValue()) {
322       SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end());
323       const SCEV *NewMul = SE.getMulExpr(Ops);
324 
325       SmallVector<const SCEV *, 4> MyGood;
326       SmallVector<const SCEV *, 4> MyBad;
327       DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
328       const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
329         SE.getEffectiveSCEVType(NewMul->getType())));
330       for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(),
331            E = MyGood.end(); I != E; ++I)
332         Good.push_back(SE.getMulExpr(NegOne, *I));
333       for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(),
334            E = MyBad.end(); I != E; ++I)
335         Bad.push_back(SE.getMulExpr(NegOne, *I));
336       return;
337     }
338 
339   // Ok, we can't do anything interesting. Just stuff the whole thing into a
340   // register and hope for the best.
341   Bad.push_back(S);
342 }
343 
344 /// InitialMatch - Incorporate loop-variant parts of S into this Formula,
345 /// attempting to keep all loop-invariant and loop-computable values in a
346 /// single base register.
InitialMatch(const SCEV * S,Loop * L,ScalarEvolution & SE)347 void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
348   SmallVector<const SCEV *, 4> Good;
349   SmallVector<const SCEV *, 4> Bad;
350   DoInitialMatch(S, L, Good, Bad, SE);
351   if (!Good.empty()) {
352     const SCEV *Sum = SE.getAddExpr(Good);
353     if (!Sum->isZero())
354       BaseRegs.push_back(Sum);
355     HasBaseReg = true;
356   }
357   if (!Bad.empty()) {
358     const SCEV *Sum = SE.getAddExpr(Bad);
359     if (!Sum->isZero())
360       BaseRegs.push_back(Sum);
361     HasBaseReg = true;
362   }
363   Canonicalize();
364 }
365 
366 /// \brief Check whether or not this formula statisfies the canonical
367 /// representation.
368 /// \see Formula::BaseRegs.
isCanonical() const369 bool Formula::isCanonical() const {
370   if (ScaledReg)
371     return Scale != 1 || !BaseRegs.empty();
372   return BaseRegs.size() <= 1;
373 }
374 
375 /// \brief Helper method to morph a formula into its canonical representation.
376 /// \see Formula::BaseRegs.
377 /// Every formula having more than one base register, must use the ScaledReg
378 /// field. Otherwise, we would have to do special cases everywhere in LSR
379 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ...
380 /// On the other hand, 1*reg should be canonicalized into reg.
Canonicalize()381 void Formula::Canonicalize() {
382   if (isCanonical())
383     return;
384   // So far we did not need this case. This is easy to implement but it is
385   // useless to maintain dead code. Beside it could hurt compile time.
386   assert(!BaseRegs.empty() && "1*reg => reg, should not be needed.");
387   // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg.
388   ScaledReg = BaseRegs.back();
389   BaseRegs.pop_back();
390   Scale = 1;
391   size_t BaseRegsSize = BaseRegs.size();
392   size_t Try = 0;
393   // If ScaledReg is an invariant, try to find a variant expression.
394   while (Try < BaseRegsSize && !isa<SCEVAddRecExpr>(ScaledReg))
395     std::swap(ScaledReg, BaseRegs[Try++]);
396 }
397 
398 /// \brief Get rid of the scale in the formula.
399 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
400 /// \return true if it was possible to get rid of the scale, false otherwise.
401 /// \note After this operation the formula may not be in the canonical form.
Unscale()402 bool Formula::Unscale() {
403   if (Scale != 1)
404     return false;
405   Scale = 0;
406   BaseRegs.push_back(ScaledReg);
407   ScaledReg = nullptr;
408   return true;
409 }
410 
411 /// getNumRegs - Return the total number of register operands used by this
412 /// formula. This does not include register uses implied by non-constant
413 /// addrec strides.
getNumRegs() const414 size_t Formula::getNumRegs() const {
415   return !!ScaledReg + BaseRegs.size();
416 }
417 
418 /// getType - Return the type of this formula, if it has one, or null
419 /// otherwise. This type is meaningless except for the bit size.
getType() const420 Type *Formula::getType() const {
421   return !BaseRegs.empty() ? BaseRegs.front()->getType() :
422          ScaledReg ? ScaledReg->getType() :
423          BaseGV ? BaseGV->getType() :
424          nullptr;
425 }
426 
427 /// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
DeleteBaseReg(const SCEV * & S)428 void Formula::DeleteBaseReg(const SCEV *&S) {
429   if (&S != &BaseRegs.back())
430     std::swap(S, BaseRegs.back());
431   BaseRegs.pop_back();
432 }
433 
434 /// referencesReg - Test if this formula references the given register.
referencesReg(const SCEV * S) const435 bool Formula::referencesReg(const SCEV *S) const {
436   return S == ScaledReg ||
437          std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end();
438 }
439 
440 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers
441 /// which are used by uses other than the use with the given index.
hasRegsUsedByUsesOtherThan(size_t LUIdx,const RegUseTracker & RegUses) const442 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
443                                          const RegUseTracker &RegUses) const {
444   if (ScaledReg)
445     if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
446       return true;
447   for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
448        E = BaseRegs.end(); I != E; ++I)
449     if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx))
450       return true;
451   return false;
452 }
453 
print(raw_ostream & OS) const454 void Formula::print(raw_ostream &OS) const {
455   bool First = true;
456   if (BaseGV) {
457     if (!First) OS << " + "; else First = false;
458     BaseGV->printAsOperand(OS, /*PrintType=*/false);
459   }
460   if (BaseOffset != 0) {
461     if (!First) OS << " + "; else First = false;
462     OS << BaseOffset;
463   }
464   for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
465        E = BaseRegs.end(); I != E; ++I) {
466     if (!First) OS << " + "; else First = false;
467     OS << "reg(" << **I << ')';
468   }
469   if (HasBaseReg && BaseRegs.empty()) {
470     if (!First) OS << " + "; else First = false;
471     OS << "**error: HasBaseReg**";
472   } else if (!HasBaseReg && !BaseRegs.empty()) {
473     if (!First) OS << " + "; else First = false;
474     OS << "**error: !HasBaseReg**";
475   }
476   if (Scale != 0) {
477     if (!First) OS << " + "; else First = false;
478     OS << Scale << "*reg(";
479     if (ScaledReg)
480       OS << *ScaledReg;
481     else
482       OS << "<unknown>";
483     OS << ')';
484   }
485   if (UnfoldedOffset != 0) {
486     if (!First) OS << " + ";
487     OS << "imm(" << UnfoldedOffset << ')';
488   }
489 }
490 
491 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const492 void Formula::dump() const {
493   print(errs()); errs() << '\n';
494 }
495 #endif
496 
497 /// isAddRecSExtable - Return true if the given addrec can be sign-extended
498 /// without changing its value.
isAddRecSExtable(const SCEVAddRecExpr * AR,ScalarEvolution & SE)499 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
500   Type *WideTy =
501     IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
502   return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
503 }
504 
505 /// isAddSExtable - Return true if the given add can be sign-extended
506 /// without changing its value.
isAddSExtable(const SCEVAddExpr * A,ScalarEvolution & SE)507 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
508   Type *WideTy =
509     IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
510   return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
511 }
512 
513 /// isMulSExtable - Return true if the given mul can be sign-extended
514 /// without changing its value.
isMulSExtable(const SCEVMulExpr * M,ScalarEvolution & SE)515 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
516   Type *WideTy =
517     IntegerType::get(SE.getContext(),
518                      SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
519   return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
520 }
521 
522 /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
523 /// and if the remainder is known to be zero,  or null otherwise. If
524 /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified
525 /// to Y, ignoring that the multiplication may overflow, which is useful when
526 /// the result will be used in a context where the most significant bits are
527 /// ignored.
getExactSDiv(const SCEV * LHS,const SCEV * RHS,ScalarEvolution & SE,bool IgnoreSignificantBits=false)528 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
529                                 ScalarEvolution &SE,
530                                 bool IgnoreSignificantBits = false) {
531   // Handle the trivial case, which works for any SCEV type.
532   if (LHS == RHS)
533     return SE.getConstant(LHS->getType(), 1);
534 
535   // Handle a few RHS special cases.
536   const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
537   if (RC) {
538     const APInt &RA = RC->getValue()->getValue();
539     // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
540     // some folding.
541     if (RA.isAllOnesValue())
542       return SE.getMulExpr(LHS, RC);
543     // Handle x /s 1 as x.
544     if (RA == 1)
545       return LHS;
546   }
547 
548   // Check for a division of a constant by a constant.
549   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
550     if (!RC)
551       return nullptr;
552     const APInt &LA = C->getValue()->getValue();
553     const APInt &RA = RC->getValue()->getValue();
554     if (LA.srem(RA) != 0)
555       return nullptr;
556     return SE.getConstant(LA.sdiv(RA));
557   }
558 
559   // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
560   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
561     if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
562       const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
563                                       IgnoreSignificantBits);
564       if (!Step) return nullptr;
565       const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
566                                        IgnoreSignificantBits);
567       if (!Start) return nullptr;
568       // FlagNW is independent of the start value, step direction, and is
569       // preserved with smaller magnitude steps.
570       // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
571       return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
572     }
573     return nullptr;
574   }
575 
576   // Distribute the sdiv over add operands, if the add doesn't overflow.
577   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
578     if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
579       SmallVector<const SCEV *, 8> Ops;
580       for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
581            I != E; ++I) {
582         const SCEV *Op = getExactSDiv(*I, RHS, SE,
583                                       IgnoreSignificantBits);
584         if (!Op) return nullptr;
585         Ops.push_back(Op);
586       }
587       return SE.getAddExpr(Ops);
588     }
589     return nullptr;
590   }
591 
592   // Check for a multiply operand that we can pull RHS out of.
593   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
594     if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
595       SmallVector<const SCEV *, 4> Ops;
596       bool Found = false;
597       for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
598            I != E; ++I) {
599         const SCEV *S = *I;
600         if (!Found)
601           if (const SCEV *Q = getExactSDiv(S, RHS, SE,
602                                            IgnoreSignificantBits)) {
603             S = Q;
604             Found = true;
605           }
606         Ops.push_back(S);
607       }
608       return Found ? SE.getMulExpr(Ops) : nullptr;
609     }
610     return nullptr;
611   }
612 
613   // Otherwise we don't know.
614   return nullptr;
615 }
616 
617 /// ExtractImmediate - If S involves the addition of a constant integer value,
618 /// return that integer value, and mutate S to point to a new SCEV with that
619 /// value excluded.
ExtractImmediate(const SCEV * & S,ScalarEvolution & SE)620 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
621   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
622     if (C->getValue()->getValue().getMinSignedBits() <= 64) {
623       S = SE.getConstant(C->getType(), 0);
624       return C->getValue()->getSExtValue();
625     }
626   } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
627     SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
628     int64_t Result = ExtractImmediate(NewOps.front(), SE);
629     if (Result != 0)
630       S = SE.getAddExpr(NewOps);
631     return Result;
632   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
633     SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
634     int64_t Result = ExtractImmediate(NewOps.front(), SE);
635     if (Result != 0)
636       S = SE.getAddRecExpr(NewOps, AR->getLoop(),
637                            // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
638                            SCEV::FlagAnyWrap);
639     return Result;
640   }
641   return 0;
642 }
643 
644 /// ExtractSymbol - If S involves the addition of a GlobalValue address,
645 /// return that symbol, and mutate S to point to a new SCEV with that
646 /// value excluded.
ExtractSymbol(const SCEV * & S,ScalarEvolution & SE)647 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
648   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
649     if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
650       S = SE.getConstant(GV->getType(), 0);
651       return GV;
652     }
653   } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
654     SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
655     GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
656     if (Result)
657       S = SE.getAddExpr(NewOps);
658     return Result;
659   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
660     SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
661     GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
662     if (Result)
663       S = SE.getAddRecExpr(NewOps, AR->getLoop(),
664                            // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
665                            SCEV::FlagAnyWrap);
666     return Result;
667   }
668   return nullptr;
669 }
670 
671 /// isAddressUse - Returns true if the specified instruction is using the
672 /// specified value as an address.
isAddressUse(Instruction * Inst,Value * OperandVal)673 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
674   bool isAddress = isa<LoadInst>(Inst);
675   if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
676     if (SI->getOperand(1) == OperandVal)
677       isAddress = true;
678   } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
679     // Addressing modes can also be folded into prefetches and a variety
680     // of intrinsics.
681     switch (II->getIntrinsicID()) {
682       default: break;
683       case Intrinsic::prefetch:
684       case Intrinsic::x86_sse_storeu_ps:
685       case Intrinsic::x86_sse2_storeu_pd:
686       case Intrinsic::x86_sse2_storeu_dq:
687       case Intrinsic::x86_sse2_storel_dq:
688         if (II->getArgOperand(0) == OperandVal)
689           isAddress = true;
690         break;
691     }
692   }
693   return isAddress;
694 }
695 
696 /// getAccessType - Return the type of the memory being accessed.
getAccessType(const Instruction * Inst)697 static Type *getAccessType(const Instruction *Inst) {
698   Type *AccessTy = Inst->getType();
699   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
700     AccessTy = SI->getOperand(0)->getType();
701   else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
702     // Addressing modes can also be folded into prefetches and a variety
703     // of intrinsics.
704     switch (II->getIntrinsicID()) {
705     default: break;
706     case Intrinsic::x86_sse_storeu_ps:
707     case Intrinsic::x86_sse2_storeu_pd:
708     case Intrinsic::x86_sse2_storeu_dq:
709     case Intrinsic::x86_sse2_storel_dq:
710       AccessTy = II->getArgOperand(0)->getType();
711       break;
712     }
713   }
714 
715   // All pointers have the same requirements, so canonicalize them to an
716   // arbitrary pointer type to minimize variation.
717   if (PointerType *PTy = dyn_cast<PointerType>(AccessTy))
718     AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
719                                 PTy->getAddressSpace());
720 
721   return AccessTy;
722 }
723 
724 /// isExistingPhi - Return true if this AddRec is already a phi in its loop.
isExistingPhi(const SCEVAddRecExpr * AR,ScalarEvolution & SE)725 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
726   for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin();
727        PHINode *PN = dyn_cast<PHINode>(I); ++I) {
728     if (SE.isSCEVable(PN->getType()) &&
729         (SE.getEffectiveSCEVType(PN->getType()) ==
730          SE.getEffectiveSCEVType(AR->getType())) &&
731         SE.getSCEV(PN) == AR)
732       return true;
733   }
734   return false;
735 }
736 
737 /// Check if expanding this expression is likely to incur significant cost. This
738 /// is tricky because SCEV doesn't track which expressions are actually computed
739 /// by the current IR.
740 ///
741 /// We currently allow expansion of IV increments that involve adds,
742 /// multiplication by constants, and AddRecs from existing phis.
743 ///
744 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an
745 /// obvious multiple of the UDivExpr.
isHighCostExpansion(const SCEV * S,SmallPtrSetImpl<const SCEV * > & Processed,ScalarEvolution & SE)746 static bool isHighCostExpansion(const SCEV *S,
747                                 SmallPtrSetImpl<const SCEV*> &Processed,
748                                 ScalarEvolution &SE) {
749   // Zero/One operand expressions
750   switch (S->getSCEVType()) {
751   case scUnknown:
752   case scConstant:
753     return false;
754   case scTruncate:
755     return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(),
756                                Processed, SE);
757   case scZeroExtend:
758     return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(),
759                                Processed, SE);
760   case scSignExtend:
761     return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(),
762                                Processed, SE);
763   }
764 
765   if (!Processed.insert(S).second)
766     return false;
767 
768   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
769     for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
770          I != E; ++I) {
771       if (isHighCostExpansion(*I, Processed, SE))
772         return true;
773     }
774     return false;
775   }
776 
777   if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
778     if (Mul->getNumOperands() == 2) {
779       // Multiplication by a constant is ok
780       if (isa<SCEVConstant>(Mul->getOperand(0)))
781         return isHighCostExpansion(Mul->getOperand(1), Processed, SE);
782 
783       // If we have the value of one operand, check if an existing
784       // multiplication already generates this expression.
785       if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
786         Value *UVal = U->getValue();
787         for (User *UR : UVal->users()) {
788           // If U is a constant, it may be used by a ConstantExpr.
789           Instruction *UI = dyn_cast<Instruction>(UR);
790           if (UI && UI->getOpcode() == Instruction::Mul &&
791               SE.isSCEVable(UI->getType())) {
792             return SE.getSCEV(UI) == Mul;
793           }
794         }
795       }
796     }
797   }
798 
799   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
800     if (isExistingPhi(AR, SE))
801       return false;
802   }
803 
804   // Fow now, consider any other type of expression (div/mul/min/max) high cost.
805   return true;
806 }
807 
808 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
809 /// specified set are trivially dead, delete them and see if this makes any of
810 /// their operands subsequently dead.
811 static bool
DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> & DeadInsts)812 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
813   bool Changed = false;
814 
815   while (!DeadInsts.empty()) {
816     Value *V = DeadInsts.pop_back_val();
817     Instruction *I = dyn_cast_or_null<Instruction>(V);
818 
819     if (!I || !isInstructionTriviallyDead(I))
820       continue;
821 
822     for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
823       if (Instruction *U = dyn_cast<Instruction>(*OI)) {
824         *OI = nullptr;
825         if (U->use_empty())
826           DeadInsts.push_back(U);
827       }
828 
829     I->eraseFromParent();
830     Changed = true;
831   }
832 
833   return Changed;
834 }
835 
836 namespace {
837 class LSRUse;
838 }
839 
840 /// \brief Check if the addressing mode defined by \p F is completely
841 /// folded in \p LU at isel time.
842 /// This includes address-mode folding and special icmp tricks.
843 /// This function returns true if \p LU can accommodate what \p F
844 /// defines and up to 1 base + 1 scaled + offset.
845 /// In other words, if \p F has several base registers, this function may
846 /// still return true. Therefore, users still need to account for
847 /// additional base registers and/or unfolded offsets to derive an
848 /// accurate cost model.
849 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
850                                  const LSRUse &LU, const Formula &F);
851 // Get the cost of the scaling factor used in F for LU.
852 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
853                                      const LSRUse &LU, const Formula &F);
854 
855 namespace {
856 
857 /// Cost - This class is used to measure and compare candidate formulae.
858 class Cost {
859   /// TODO: Some of these could be merged. Also, a lexical ordering
860   /// isn't always optimal.
861   unsigned NumRegs;
862   unsigned AddRecCost;
863   unsigned NumIVMuls;
864   unsigned NumBaseAdds;
865   unsigned ImmCost;
866   unsigned SetupCost;
867   unsigned ScaleCost;
868 
869 public:
Cost()870   Cost()
871     : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0),
872       SetupCost(0), ScaleCost(0) {}
873 
874   bool operator<(const Cost &Other) const;
875 
876   void Lose();
877 
878 #ifndef NDEBUG
879   // Once any of the metrics loses, they must all remain losers.
isValid()880   bool isValid() {
881     return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds
882              | ImmCost | SetupCost | ScaleCost) != ~0u)
883       || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds
884            & ImmCost & SetupCost & ScaleCost) == ~0u);
885   }
886 #endif
887 
isLoser()888   bool isLoser() {
889     assert(isValid() && "invalid cost");
890     return NumRegs == ~0u;
891   }
892 
893   void RateFormula(const TargetTransformInfo &TTI,
894                    const Formula &F,
895                    SmallPtrSetImpl<const SCEV *> &Regs,
896                    const DenseSet<const SCEV *> &VisitedRegs,
897                    const Loop *L,
898                    const SmallVectorImpl<int64_t> &Offsets,
899                    ScalarEvolution &SE, DominatorTree &DT,
900                    const LSRUse &LU,
901                    SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr);
902 
903   void print(raw_ostream &OS) const;
904   void dump() const;
905 
906 private:
907   void RateRegister(const SCEV *Reg,
908                     SmallPtrSetImpl<const SCEV *> &Regs,
909                     const Loop *L,
910                     ScalarEvolution &SE, DominatorTree &DT);
911   void RatePrimaryRegister(const SCEV *Reg,
912                            SmallPtrSetImpl<const SCEV *> &Regs,
913                            const Loop *L,
914                            ScalarEvolution &SE, DominatorTree &DT,
915                            SmallPtrSetImpl<const SCEV *> *LoserRegs);
916 };
917 
918 }
919 
920 /// RateRegister - Tally up interesting quantities from the given register.
RateRegister(const SCEV * Reg,SmallPtrSetImpl<const SCEV * > & Regs,const Loop * L,ScalarEvolution & SE,DominatorTree & DT)921 void Cost::RateRegister(const SCEV *Reg,
922                         SmallPtrSetImpl<const SCEV *> &Regs,
923                         const Loop *L,
924                         ScalarEvolution &SE, DominatorTree &DT) {
925   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
926     // If this is an addrec for another loop, don't second-guess its addrec phi
927     // nodes. LSR isn't currently smart enough to reason about more than one
928     // loop at a time. LSR has already run on inner loops, will not run on outer
929     // loops, and cannot be expected to change sibling loops.
930     if (AR->getLoop() != L) {
931       // If the AddRec exists, consider it's register free and leave it alone.
932       if (isExistingPhi(AR, SE))
933         return;
934 
935       // Otherwise, do not consider this formula at all.
936       Lose();
937       return;
938     }
939     AddRecCost += 1; /// TODO: This should be a function of the stride.
940 
941     // Add the step value register, if it needs one.
942     // TODO: The non-affine case isn't precisely modeled here.
943     if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) {
944       if (!Regs.count(AR->getOperand(1))) {
945         RateRegister(AR->getOperand(1), Regs, L, SE, DT);
946         if (isLoser())
947           return;
948       }
949     }
950   }
951   ++NumRegs;
952 
953   // Rough heuristic; favor registers which don't require extra setup
954   // instructions in the preheader.
955   if (!isa<SCEVUnknown>(Reg) &&
956       !isa<SCEVConstant>(Reg) &&
957       !(isa<SCEVAddRecExpr>(Reg) &&
958         (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) ||
959          isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart()))))
960     ++SetupCost;
961 
962     NumIVMuls += isa<SCEVMulExpr>(Reg) &&
963                  SE.hasComputableLoopEvolution(Reg, L);
964 }
965 
966 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it
967 /// before, rate it. Optional LoserRegs provides a way to declare any formula
968 /// that refers to one of those regs an instant loser.
RatePrimaryRegister(const SCEV * Reg,SmallPtrSetImpl<const SCEV * > & Regs,const Loop * L,ScalarEvolution & SE,DominatorTree & DT,SmallPtrSetImpl<const SCEV * > * LoserRegs)969 void Cost::RatePrimaryRegister(const SCEV *Reg,
970                                SmallPtrSetImpl<const SCEV *> &Regs,
971                                const Loop *L,
972                                ScalarEvolution &SE, DominatorTree &DT,
973                                SmallPtrSetImpl<const SCEV *> *LoserRegs) {
974   if (LoserRegs && LoserRegs->count(Reg)) {
975     Lose();
976     return;
977   }
978   if (Regs.insert(Reg).second) {
979     RateRegister(Reg, Regs, L, SE, DT);
980     if (LoserRegs && isLoser())
981       LoserRegs->insert(Reg);
982   }
983 }
984 
RateFormula(const TargetTransformInfo & TTI,const Formula & F,SmallPtrSetImpl<const SCEV * > & Regs,const DenseSet<const SCEV * > & VisitedRegs,const Loop * L,const SmallVectorImpl<int64_t> & Offsets,ScalarEvolution & SE,DominatorTree & DT,const LSRUse & LU,SmallPtrSetImpl<const SCEV * > * LoserRegs)985 void Cost::RateFormula(const TargetTransformInfo &TTI,
986                        const Formula &F,
987                        SmallPtrSetImpl<const SCEV *> &Regs,
988                        const DenseSet<const SCEV *> &VisitedRegs,
989                        const Loop *L,
990                        const SmallVectorImpl<int64_t> &Offsets,
991                        ScalarEvolution &SE, DominatorTree &DT,
992                        const LSRUse &LU,
993                        SmallPtrSetImpl<const SCEV *> *LoserRegs) {
994   assert(F.isCanonical() && "Cost is accurate only for canonical formula");
995   // Tally up the registers.
996   if (const SCEV *ScaledReg = F.ScaledReg) {
997     if (VisitedRegs.count(ScaledReg)) {
998       Lose();
999       return;
1000     }
1001     RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs);
1002     if (isLoser())
1003       return;
1004   }
1005   for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
1006        E = F.BaseRegs.end(); I != E; ++I) {
1007     const SCEV *BaseReg = *I;
1008     if (VisitedRegs.count(BaseReg)) {
1009       Lose();
1010       return;
1011     }
1012     RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs);
1013     if (isLoser())
1014       return;
1015   }
1016 
1017   // Determine how many (unfolded) adds we'll need inside the loop.
1018   size_t NumBaseParts = F.getNumRegs();
1019   if (NumBaseParts > 1)
1020     // Do not count the base and a possible second register if the target
1021     // allows to fold 2 registers.
1022     NumBaseAdds +=
1023         NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(TTI, LU, F)));
1024   NumBaseAdds += (F.UnfoldedOffset != 0);
1025 
1026   // Accumulate non-free scaling amounts.
1027   ScaleCost += getScalingFactorCost(TTI, LU, F);
1028 
1029   // Tally up the non-zero immediates.
1030   for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
1031        E = Offsets.end(); I != E; ++I) {
1032     int64_t Offset = (uint64_t)*I + F.BaseOffset;
1033     if (F.BaseGV)
1034       ImmCost += 64; // Handle symbolic values conservatively.
1035                      // TODO: This should probably be the pointer size.
1036     else if (Offset != 0)
1037       ImmCost += APInt(64, Offset, true).getMinSignedBits();
1038   }
1039   assert(isValid() && "invalid cost");
1040 }
1041 
1042 /// Lose - Set this cost to a losing value.
Lose()1043 void Cost::Lose() {
1044   NumRegs = ~0u;
1045   AddRecCost = ~0u;
1046   NumIVMuls = ~0u;
1047   NumBaseAdds = ~0u;
1048   ImmCost = ~0u;
1049   SetupCost = ~0u;
1050   ScaleCost = ~0u;
1051 }
1052 
1053 /// operator< - Choose the lower cost.
operator <(const Cost & Other) const1054 bool Cost::operator<(const Cost &Other) const {
1055   return std::tie(NumRegs, AddRecCost, NumIVMuls, NumBaseAdds, ScaleCost,
1056                   ImmCost, SetupCost) <
1057          std::tie(Other.NumRegs, Other.AddRecCost, Other.NumIVMuls,
1058                   Other.NumBaseAdds, Other.ScaleCost, Other.ImmCost,
1059                   Other.SetupCost);
1060 }
1061 
print(raw_ostream & OS) const1062 void Cost::print(raw_ostream &OS) const {
1063   OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s");
1064   if (AddRecCost != 0)
1065     OS << ", with addrec cost " << AddRecCost;
1066   if (NumIVMuls != 0)
1067     OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s");
1068   if (NumBaseAdds != 0)
1069     OS << ", plus " << NumBaseAdds << " base add"
1070        << (NumBaseAdds == 1 ? "" : "s");
1071   if (ScaleCost != 0)
1072     OS << ", plus " << ScaleCost << " scale cost";
1073   if (ImmCost != 0)
1074     OS << ", plus " << ImmCost << " imm cost";
1075   if (SetupCost != 0)
1076     OS << ", plus " << SetupCost << " setup cost";
1077 }
1078 
1079 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1080 void Cost::dump() const {
1081   print(errs()); errs() << '\n';
1082 }
1083 #endif
1084 
1085 namespace {
1086 
1087 /// LSRFixup - An operand value in an instruction which is to be replaced
1088 /// with some equivalent, possibly strength-reduced, replacement.
1089 struct LSRFixup {
1090   /// UserInst - The instruction which will be updated.
1091   Instruction *UserInst;
1092 
1093   /// OperandValToReplace - The operand of the instruction which will
1094   /// be replaced. The operand may be used more than once; every instance
1095   /// will be replaced.
1096   Value *OperandValToReplace;
1097 
1098   /// PostIncLoops - If this user is to use the post-incremented value of an
1099   /// induction variable, this variable is non-null and holds the loop
1100   /// associated with the induction variable.
1101   PostIncLoopSet PostIncLoops;
1102 
1103   /// LUIdx - The index of the LSRUse describing the expression which
1104   /// this fixup needs, minus an offset (below).
1105   size_t LUIdx;
1106 
1107   /// Offset - A constant offset to be added to the LSRUse expression.
1108   /// This allows multiple fixups to share the same LSRUse with different
1109   /// offsets, for example in an unrolled loop.
1110   int64_t Offset;
1111 
1112   bool isUseFullyOutsideLoop(const Loop *L) const;
1113 
1114   LSRFixup();
1115 
1116   void print(raw_ostream &OS) const;
1117   void dump() const;
1118 };
1119 
1120 }
1121 
LSRFixup()1122 LSRFixup::LSRFixup()
1123   : UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)),
1124     Offset(0) {}
1125 
1126 /// isUseFullyOutsideLoop - Test whether this fixup always uses its
1127 /// value outside of the given loop.
isUseFullyOutsideLoop(const Loop * L) const1128 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
1129   // PHI nodes use their value in their incoming blocks.
1130   if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
1131     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1132       if (PN->getIncomingValue(i) == OperandValToReplace &&
1133           L->contains(PN->getIncomingBlock(i)))
1134         return false;
1135     return true;
1136   }
1137 
1138   return !L->contains(UserInst);
1139 }
1140 
print(raw_ostream & OS) const1141 void LSRFixup::print(raw_ostream &OS) const {
1142   OS << "UserInst=";
1143   // Store is common and interesting enough to be worth special-casing.
1144   if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
1145     OS << "store ";
1146     Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
1147   } else if (UserInst->getType()->isVoidTy())
1148     OS << UserInst->getOpcodeName();
1149   else
1150     UserInst->printAsOperand(OS, /*PrintType=*/false);
1151 
1152   OS << ", OperandValToReplace=";
1153   OperandValToReplace->printAsOperand(OS, /*PrintType=*/false);
1154 
1155   for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(),
1156        E = PostIncLoops.end(); I != E; ++I) {
1157     OS << ", PostIncLoop=";
1158     (*I)->getHeader()->printAsOperand(OS, /*PrintType=*/false);
1159   }
1160 
1161   if (LUIdx != ~size_t(0))
1162     OS << ", LUIdx=" << LUIdx;
1163 
1164   if (Offset != 0)
1165     OS << ", Offset=" << Offset;
1166 }
1167 
1168 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1169 void LSRFixup::dump() const {
1170   print(errs()); errs() << '\n';
1171 }
1172 #endif
1173 
1174 namespace {
1175 
1176 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding
1177 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*.
1178 struct UniquifierDenseMapInfo {
getEmptyKey__anonbd2ddbdf0711::UniquifierDenseMapInfo1179   static SmallVector<const SCEV *, 4> getEmptyKey() {
1180     SmallVector<const SCEV *, 4>  V;
1181     V.push_back(reinterpret_cast<const SCEV *>(-1));
1182     return V;
1183   }
1184 
getTombstoneKey__anonbd2ddbdf0711::UniquifierDenseMapInfo1185   static SmallVector<const SCEV *, 4> getTombstoneKey() {
1186     SmallVector<const SCEV *, 4> V;
1187     V.push_back(reinterpret_cast<const SCEV *>(-2));
1188     return V;
1189   }
1190 
getHashValue__anonbd2ddbdf0711::UniquifierDenseMapInfo1191   static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) {
1192     return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
1193   }
1194 
isEqual__anonbd2ddbdf0711::UniquifierDenseMapInfo1195   static bool isEqual(const SmallVector<const SCEV *, 4> &LHS,
1196                       const SmallVector<const SCEV *, 4> &RHS) {
1197     return LHS == RHS;
1198   }
1199 };
1200 
1201 /// LSRUse - This class holds the state that LSR keeps for each use in
1202 /// IVUsers, as well as uses invented by LSR itself. It includes information
1203 /// about what kinds of things can be folded into the user, information about
1204 /// the user itself, and information about how the use may be satisfied.
1205 /// TODO: Represent multiple users of the same expression in common?
1206 class LSRUse {
1207   DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier;
1208 
1209 public:
1210   /// KindType - An enum for a kind of use, indicating what types of
1211   /// scaled and immediate operands it might support.
1212   enum KindType {
1213     Basic,   ///< A normal use, with no folding.
1214     Special, ///< A special case of basic, allowing -1 scales.
1215     Address, ///< An address use; folding according to TargetLowering
1216     ICmpZero ///< An equality icmp with both operands folded into one.
1217     // TODO: Add a generic icmp too?
1218   };
1219 
1220   typedef PointerIntPair<const SCEV *, 2, KindType> SCEVUseKindPair;
1221 
1222   KindType Kind;
1223   Type *AccessTy;
1224 
1225   SmallVector<int64_t, 8> Offsets;
1226   int64_t MinOffset;
1227   int64_t MaxOffset;
1228 
1229   /// AllFixupsOutsideLoop - This records whether all of the fixups using this
1230   /// LSRUse are outside of the loop, in which case some special-case heuristics
1231   /// may be used.
1232   bool AllFixupsOutsideLoop;
1233 
1234   /// RigidFormula is set to true to guarantee that this use will be associated
1235   /// with a single formula--the one that initially matched. Some SCEV
1236   /// expressions cannot be expanded. This allows LSR to consider the registers
1237   /// used by those expressions without the need to expand them later after
1238   /// changing the formula.
1239   bool RigidFormula;
1240 
1241   /// WidestFixupType - This records the widest use type for any fixup using
1242   /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
1243   /// max fixup widths to be equivalent, because the narrower one may be relying
1244   /// on the implicit truncation to truncate away bogus bits.
1245   Type *WidestFixupType;
1246 
1247   /// Formulae - A list of ways to build a value that can satisfy this user.
1248   /// After the list is populated, one of these is selected heuristically and
1249   /// used to formulate a replacement for OperandValToReplace in UserInst.
1250   SmallVector<Formula, 12> Formulae;
1251 
1252   /// Regs - The set of register candidates used by all formulae in this LSRUse.
1253   SmallPtrSet<const SCEV *, 4> Regs;
1254 
LSRUse(KindType K,Type * T)1255   LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T),
1256                                       MinOffset(INT64_MAX),
1257                                       MaxOffset(INT64_MIN),
1258                                       AllFixupsOutsideLoop(true),
1259                                       RigidFormula(false),
1260                                       WidestFixupType(nullptr) {}
1261 
1262   bool HasFormulaWithSameRegs(const Formula &F) const;
1263   bool InsertFormula(const Formula &F);
1264   void DeleteFormula(Formula &F);
1265   void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
1266 
1267   void print(raw_ostream &OS) const;
1268   void dump() const;
1269 };
1270 
1271 }
1272 
1273 /// HasFormula - Test whether this use as a formula which has the same
1274 /// registers as the given formula.
HasFormulaWithSameRegs(const Formula & F) const1275 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
1276   SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1277   if (F.ScaledReg) Key.push_back(F.ScaledReg);
1278   // Unstable sort by host order ok, because this is only used for uniquifying.
1279   std::sort(Key.begin(), Key.end());
1280   return Uniquifier.count(Key);
1281 }
1282 
1283 /// InsertFormula - If the given formula has not yet been inserted, add it to
1284 /// the list, and return true. Return false otherwise.
1285 /// The formula must be in canonical form.
InsertFormula(const Formula & F)1286 bool LSRUse::InsertFormula(const Formula &F) {
1287   assert(F.isCanonical() && "Invalid canonical representation");
1288 
1289   if (!Formulae.empty() && RigidFormula)
1290     return false;
1291 
1292   SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1293   if (F.ScaledReg) Key.push_back(F.ScaledReg);
1294   // Unstable sort by host order ok, because this is only used for uniquifying.
1295   std::sort(Key.begin(), Key.end());
1296 
1297   if (!Uniquifier.insert(Key).second)
1298     return false;
1299 
1300   // Using a register to hold the value of 0 is not profitable.
1301   assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
1302          "Zero allocated in a scaled register!");
1303 #ifndef NDEBUG
1304   for (SmallVectorImpl<const SCEV *>::const_iterator I =
1305        F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I)
1306     assert(!(*I)->isZero() && "Zero allocated in a base register!");
1307 #endif
1308 
1309   // Add the formula to the list.
1310   Formulae.push_back(F);
1311 
1312   // Record registers now being used by this use.
1313   Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1314   if (F.ScaledReg)
1315     Regs.insert(F.ScaledReg);
1316 
1317   return true;
1318 }
1319 
1320 /// DeleteFormula - Remove the given formula from this use's list.
DeleteFormula(Formula & F)1321 void LSRUse::DeleteFormula(Formula &F) {
1322   if (&F != &Formulae.back())
1323     std::swap(F, Formulae.back());
1324   Formulae.pop_back();
1325 }
1326 
1327 /// RecomputeRegs - Recompute the Regs field, and update RegUses.
RecomputeRegs(size_t LUIdx,RegUseTracker & RegUses)1328 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
1329   // Now that we've filtered out some formulae, recompute the Regs set.
1330   SmallPtrSet<const SCEV *, 4> OldRegs = Regs;
1331   Regs.clear();
1332   for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(),
1333        E = Formulae.end(); I != E; ++I) {
1334     const Formula &F = *I;
1335     if (F.ScaledReg) Regs.insert(F.ScaledReg);
1336     Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1337   }
1338 
1339   // Update the RegTracker.
1340   for (const SCEV *S : OldRegs)
1341     if (!Regs.count(S))
1342       RegUses.DropRegister(S, LUIdx);
1343 }
1344 
print(raw_ostream & OS) const1345 void LSRUse::print(raw_ostream &OS) const {
1346   OS << "LSR Use: Kind=";
1347   switch (Kind) {
1348   case Basic:    OS << "Basic"; break;
1349   case Special:  OS << "Special"; break;
1350   case ICmpZero: OS << "ICmpZero"; break;
1351   case Address:
1352     OS << "Address of ";
1353     if (AccessTy->isPointerTy())
1354       OS << "pointer"; // the full pointer type could be really verbose
1355     else
1356       OS << *AccessTy;
1357   }
1358 
1359   OS << ", Offsets={";
1360   for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
1361        E = Offsets.end(); I != E; ++I) {
1362     OS << *I;
1363     if (std::next(I) != E)
1364       OS << ',';
1365   }
1366   OS << '}';
1367 
1368   if (AllFixupsOutsideLoop)
1369     OS << ", all-fixups-outside-loop";
1370 
1371   if (WidestFixupType)
1372     OS << ", widest fixup type: " << *WidestFixupType;
1373 }
1374 
1375 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1376 void LSRUse::dump() const {
1377   print(errs()); errs() << '\n';
1378 }
1379 #endif
1380 
isAMCompletelyFolded(const TargetTransformInfo & TTI,LSRUse::KindType Kind,Type * AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale)1381 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1382                                  LSRUse::KindType Kind, Type *AccessTy,
1383                                  GlobalValue *BaseGV, int64_t BaseOffset,
1384                                  bool HasBaseReg, int64_t Scale) {
1385   switch (Kind) {
1386   case LSRUse::Address:
1387     return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale);
1388 
1389     // Otherwise, just guess that reg+reg addressing is legal.
1390     //return ;
1391 
1392   case LSRUse::ICmpZero:
1393     // There's not even a target hook for querying whether it would be legal to
1394     // fold a GV into an ICmp.
1395     if (BaseGV)
1396       return false;
1397 
1398     // ICmp only has two operands; don't allow more than two non-trivial parts.
1399     if (Scale != 0 && HasBaseReg && BaseOffset != 0)
1400       return false;
1401 
1402     // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1403     // putting the scaled register in the other operand of the icmp.
1404     if (Scale != 0 && Scale != -1)
1405       return false;
1406 
1407     // If we have low-level target information, ask the target if it can fold an
1408     // integer immediate on an icmp.
1409     if (BaseOffset != 0) {
1410       // We have one of:
1411       // ICmpZero     BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset
1412       // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset
1413       // Offs is the ICmp immediate.
1414       if (Scale == 0)
1415         // The cast does the right thing with INT64_MIN.
1416         BaseOffset = -(uint64_t)BaseOffset;
1417       return TTI.isLegalICmpImmediate(BaseOffset);
1418     }
1419 
1420     // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
1421     return true;
1422 
1423   case LSRUse::Basic:
1424     // Only handle single-register values.
1425     return !BaseGV && Scale == 0 && BaseOffset == 0;
1426 
1427   case LSRUse::Special:
1428     // Special case Basic to handle -1 scales.
1429     return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0;
1430   }
1431 
1432   llvm_unreachable("Invalid LSRUse Kind!");
1433 }
1434 
isAMCompletelyFolded(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,Type * AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale)1435 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1436                                  int64_t MinOffset, int64_t MaxOffset,
1437                                  LSRUse::KindType Kind, Type *AccessTy,
1438                                  GlobalValue *BaseGV, int64_t BaseOffset,
1439                                  bool HasBaseReg, int64_t Scale) {
1440   // Check for overflow.
1441   if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) !=
1442       (MinOffset > 0))
1443     return false;
1444   MinOffset = (uint64_t)BaseOffset + MinOffset;
1445   if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) !=
1446       (MaxOffset > 0))
1447     return false;
1448   MaxOffset = (uint64_t)BaseOffset + MaxOffset;
1449 
1450   return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset,
1451                               HasBaseReg, Scale) &&
1452          isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset,
1453                               HasBaseReg, Scale);
1454 }
1455 
isAMCompletelyFolded(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,Type * AccessTy,const Formula & F)1456 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1457                                  int64_t MinOffset, int64_t MaxOffset,
1458                                  LSRUse::KindType Kind, Type *AccessTy,
1459                                  const Formula &F) {
1460   // For the purpose of isAMCompletelyFolded either having a canonical formula
1461   // or a scale not equal to zero is correct.
1462   // Problems may arise from non canonical formulae having a scale == 0.
1463   // Strictly speaking it would best to just rely on canonical formulae.
1464   // However, when we generate the scaled formulae, we first check that the
1465   // scaling factor is profitable before computing the actual ScaledReg for
1466   // compile time sake.
1467   assert((F.isCanonical() || F.Scale != 0));
1468   return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1469                               F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale);
1470 }
1471 
1472 /// isLegalUse - Test whether we know how to expand the current formula.
isLegalUse(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,Type * AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale)1473 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1474                        int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy,
1475                        GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
1476                        int64_t Scale) {
1477   // We know how to expand completely foldable formulae.
1478   return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1479                               BaseOffset, HasBaseReg, Scale) ||
1480          // Or formulae that use a base register produced by a sum of base
1481          // registers.
1482          (Scale == 1 &&
1483           isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1484                                BaseGV, BaseOffset, true, 0));
1485 }
1486 
isLegalUse(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,Type * AccessTy,const Formula & F)1487 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1488                        int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy,
1489                        const Formula &F) {
1490   return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV,
1491                     F.BaseOffset, F.HasBaseReg, F.Scale);
1492 }
1493 
isAMCompletelyFolded(const TargetTransformInfo & TTI,const LSRUse & LU,const Formula & F)1494 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1495                                  const LSRUse &LU, const Formula &F) {
1496   return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1497                               LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg,
1498                               F.Scale);
1499 }
1500 
getScalingFactorCost(const TargetTransformInfo & TTI,const LSRUse & LU,const Formula & F)1501 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
1502                                      const LSRUse &LU, const Formula &F) {
1503   if (!F.Scale)
1504     return 0;
1505 
1506   // If the use is not completely folded in that instruction, we will have to
1507   // pay an extra cost only for scale != 1.
1508   if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1509                             LU.AccessTy, F))
1510     return F.Scale != 1;
1511 
1512   switch (LU.Kind) {
1513   case LSRUse::Address: {
1514     // Check the scaling factor cost with both the min and max offsets.
1515     int ScaleCostMinOffset =
1516       TTI.getScalingFactorCost(LU.AccessTy, F.BaseGV,
1517                                F.BaseOffset + LU.MinOffset,
1518                                F.HasBaseReg, F.Scale);
1519     int ScaleCostMaxOffset =
1520       TTI.getScalingFactorCost(LU.AccessTy, F.BaseGV,
1521                                F.BaseOffset + LU.MaxOffset,
1522                                F.HasBaseReg, F.Scale);
1523 
1524     assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 &&
1525            "Legal addressing mode has an illegal cost!");
1526     return std::max(ScaleCostMinOffset, ScaleCostMaxOffset);
1527   }
1528   case LSRUse::ICmpZero:
1529   case LSRUse::Basic:
1530   case LSRUse::Special:
1531     // The use is completely folded, i.e., everything is folded into the
1532     // instruction.
1533     return 0;
1534   }
1535 
1536   llvm_unreachable("Invalid LSRUse Kind!");
1537 }
1538 
isAlwaysFoldable(const TargetTransformInfo & TTI,LSRUse::KindType Kind,Type * AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg)1539 static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1540                              LSRUse::KindType Kind, Type *AccessTy,
1541                              GlobalValue *BaseGV, int64_t BaseOffset,
1542                              bool HasBaseReg) {
1543   // Fast-path: zero is always foldable.
1544   if (BaseOffset == 0 && !BaseGV) return true;
1545 
1546   // Conservatively, create an address with an immediate and a
1547   // base and a scale.
1548   int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1549 
1550   // Canonicalize a scale of 1 to a base register if the formula doesn't
1551   // already have a base register.
1552   if (!HasBaseReg && Scale == 1) {
1553     Scale = 0;
1554     HasBaseReg = true;
1555   }
1556 
1557   return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset,
1558                               HasBaseReg, Scale);
1559 }
1560 
isAlwaysFoldable(const TargetTransformInfo & TTI,ScalarEvolution & SE,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,Type * AccessTy,const SCEV * S,bool HasBaseReg)1561 static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1562                              ScalarEvolution &SE, int64_t MinOffset,
1563                              int64_t MaxOffset, LSRUse::KindType Kind,
1564                              Type *AccessTy, const SCEV *S, bool HasBaseReg) {
1565   // Fast-path: zero is always foldable.
1566   if (S->isZero()) return true;
1567 
1568   // Conservatively, create an address with an immediate and a
1569   // base and a scale.
1570   int64_t BaseOffset = ExtractImmediate(S, SE);
1571   GlobalValue *BaseGV = ExtractSymbol(S, SE);
1572 
1573   // If there's anything else involved, it's not foldable.
1574   if (!S->isZero()) return false;
1575 
1576   // Fast-path: zero is always foldable.
1577   if (BaseOffset == 0 && !BaseGV) return true;
1578 
1579   // Conservatively, create an address with an immediate and a
1580   // base and a scale.
1581   int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1582 
1583   return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1584                               BaseOffset, HasBaseReg, Scale);
1585 }
1586 
1587 namespace {
1588 
1589 /// IVInc - An individual increment in a Chain of IV increments.
1590 /// Relate an IV user to an expression that computes the IV it uses from the IV
1591 /// used by the previous link in the Chain.
1592 ///
1593 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the
1594 /// original IVOperand. The head of the chain's IVOperand is only valid during
1595 /// chain collection, before LSR replaces IV users. During chain generation,
1596 /// IncExpr can be used to find the new IVOperand that computes the same
1597 /// expression.
1598 struct IVInc {
1599   Instruction *UserInst;
1600   Value* IVOperand;
1601   const SCEV *IncExpr;
1602 
IVInc__anonbd2ddbdf0811::IVInc1603   IVInc(Instruction *U, Value *O, const SCEV *E):
1604     UserInst(U), IVOperand(O), IncExpr(E) {}
1605 };
1606 
1607 // IVChain - The list of IV increments in program order.
1608 // We typically add the head of a chain without finding subsequent links.
1609 struct IVChain {
1610   SmallVector<IVInc,1> Incs;
1611   const SCEV *ExprBase;
1612 
IVChain__anonbd2ddbdf0811::IVChain1613   IVChain() : ExprBase(nullptr) {}
1614 
IVChain__anonbd2ddbdf0811::IVChain1615   IVChain(const IVInc &Head, const SCEV *Base)
1616     : Incs(1, Head), ExprBase(Base) {}
1617 
1618   typedef SmallVectorImpl<IVInc>::const_iterator const_iterator;
1619 
1620   // begin - return the first increment in the chain.
begin__anonbd2ddbdf0811::IVChain1621   const_iterator begin() const {
1622     assert(!Incs.empty());
1623     return std::next(Incs.begin());
1624   }
end__anonbd2ddbdf0811::IVChain1625   const_iterator end() const {
1626     return Incs.end();
1627   }
1628 
1629   // hasIncs - Returns true if this chain contains any increments.
hasIncs__anonbd2ddbdf0811::IVChain1630   bool hasIncs() const { return Incs.size() >= 2; }
1631 
1632   // add - Add an IVInc to the end of this chain.
add__anonbd2ddbdf0811::IVChain1633   void add(const IVInc &X) { Incs.push_back(X); }
1634 
1635   // tailUserInst - Returns the last UserInst in the chain.
tailUserInst__anonbd2ddbdf0811::IVChain1636   Instruction *tailUserInst() const { return Incs.back().UserInst; }
1637 
1638   // isProfitableIncrement - Returns true if IncExpr can be profitably added to
1639   // this chain.
1640   bool isProfitableIncrement(const SCEV *OperExpr,
1641                              const SCEV *IncExpr,
1642                              ScalarEvolution&);
1643 };
1644 
1645 /// ChainUsers - Helper for CollectChains to track multiple IV increment uses.
1646 /// Distinguish between FarUsers that definitely cross IV increments and
1647 /// NearUsers that may be used between IV increments.
1648 struct ChainUsers {
1649   SmallPtrSet<Instruction*, 4> FarUsers;
1650   SmallPtrSet<Instruction*, 4> NearUsers;
1651 };
1652 
1653 /// LSRInstance - This class holds state for the main loop strength reduction
1654 /// logic.
1655 class LSRInstance {
1656   IVUsers &IU;
1657   ScalarEvolution &SE;
1658   DominatorTree &DT;
1659   LoopInfo &LI;
1660   const TargetTransformInfo &TTI;
1661   Loop *const L;
1662   bool Changed;
1663 
1664   /// IVIncInsertPos - This is the insert position that the current loop's
1665   /// induction variable increment should be placed. In simple loops, this is
1666   /// the latch block's terminator. But in more complicated cases, this is a
1667   /// position which will dominate all the in-loop post-increment users.
1668   Instruction *IVIncInsertPos;
1669 
1670   /// Factors - Interesting factors between use strides.
1671   SmallSetVector<int64_t, 8> Factors;
1672 
1673   /// Types - Interesting use types, to facilitate truncation reuse.
1674   SmallSetVector<Type *, 4> Types;
1675 
1676   /// Fixups - The list of operands which are to be replaced.
1677   SmallVector<LSRFixup, 16> Fixups;
1678 
1679   /// Uses - The list of interesting uses.
1680   SmallVector<LSRUse, 16> Uses;
1681 
1682   /// RegUses - Track which uses use which register candidates.
1683   RegUseTracker RegUses;
1684 
1685   // Limit the number of chains to avoid quadratic behavior. We don't expect to
1686   // have more than a few IV increment chains in a loop. Missing a Chain falls
1687   // back to normal LSR behavior for those uses.
1688   static const unsigned MaxChains = 8;
1689 
1690   /// IVChainVec - IV users can form a chain of IV increments.
1691   SmallVector<IVChain, MaxChains> IVChainVec;
1692 
1693   /// IVIncSet - IV users that belong to profitable IVChains.
1694   SmallPtrSet<Use*, MaxChains> IVIncSet;
1695 
1696   void OptimizeShadowIV();
1697   bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
1698   ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
1699   void OptimizeLoopTermCond();
1700 
1701   void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
1702                         SmallVectorImpl<ChainUsers> &ChainUsersVec);
1703   void FinalizeChain(IVChain &Chain);
1704   void CollectChains();
1705   void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
1706                        SmallVectorImpl<WeakVH> &DeadInsts);
1707 
1708   void CollectInterestingTypesAndFactors();
1709   void CollectFixupsAndInitialFormulae();
1710 
getNewFixup()1711   LSRFixup &getNewFixup() {
1712     Fixups.push_back(LSRFixup());
1713     return Fixups.back();
1714   }
1715 
1716   // Support for sharing of LSRUses between LSRFixups.
1717   typedef DenseMap<LSRUse::SCEVUseKindPair, size_t> UseMapTy;
1718   UseMapTy UseMap;
1719 
1720   bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
1721                           LSRUse::KindType Kind, Type *AccessTy);
1722 
1723   std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
1724                                     LSRUse::KindType Kind,
1725                                     Type *AccessTy);
1726 
1727   void DeleteUse(LSRUse &LU, size_t LUIdx);
1728 
1729   LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
1730 
1731   void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1732   void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1733   void CountRegisters(const Formula &F, size_t LUIdx);
1734   bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
1735 
1736   void CollectLoopInvariantFixupsAndFormulae();
1737 
1738   void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
1739                               unsigned Depth = 0);
1740 
1741   void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
1742                                   const Formula &Base, unsigned Depth,
1743                                   size_t Idx, bool IsScaledReg = false);
1744   void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
1745   void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
1746                                    const Formula &Base, size_t Idx,
1747                                    bool IsScaledReg = false);
1748   void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1749   void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx,
1750                                    const Formula &Base,
1751                                    const SmallVectorImpl<int64_t> &Worklist,
1752                                    size_t Idx, bool IsScaledReg = false);
1753   void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1754   void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
1755   void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
1756   void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
1757   void GenerateCrossUseConstantOffsets();
1758   void GenerateAllReuseFormulae();
1759 
1760   void FilterOutUndesirableDedicatedRegisters();
1761 
1762   size_t EstimateSearchSpaceComplexity() const;
1763   void NarrowSearchSpaceByDetectingSupersets();
1764   void NarrowSearchSpaceByCollapsingUnrolledCode();
1765   void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
1766   void NarrowSearchSpaceByPickingWinnerRegs();
1767   void NarrowSearchSpaceUsingHeuristics();
1768 
1769   void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
1770                     Cost &SolutionCost,
1771                     SmallVectorImpl<const Formula *> &Workspace,
1772                     const Cost &CurCost,
1773                     const SmallPtrSet<const SCEV *, 16> &CurRegs,
1774                     DenseSet<const SCEV *> &VisitedRegs) const;
1775   void Solve(SmallVectorImpl<const Formula *> &Solution) const;
1776 
1777   BasicBlock::iterator
1778     HoistInsertPosition(BasicBlock::iterator IP,
1779                         const SmallVectorImpl<Instruction *> &Inputs) const;
1780   BasicBlock::iterator
1781     AdjustInsertPositionForExpand(BasicBlock::iterator IP,
1782                                   const LSRFixup &LF,
1783                                   const LSRUse &LU,
1784                                   SCEVExpander &Rewriter) const;
1785 
1786   Value *Expand(const LSRFixup &LF,
1787                 const Formula &F,
1788                 BasicBlock::iterator IP,
1789                 SCEVExpander &Rewriter,
1790                 SmallVectorImpl<WeakVH> &DeadInsts) const;
1791   void RewriteForPHI(PHINode *PN, const LSRFixup &LF,
1792                      const Formula &F,
1793                      SCEVExpander &Rewriter,
1794                      SmallVectorImpl<WeakVH> &DeadInsts,
1795                      Pass *P) const;
1796   void Rewrite(const LSRFixup &LF,
1797                const Formula &F,
1798                SCEVExpander &Rewriter,
1799                SmallVectorImpl<WeakVH> &DeadInsts,
1800                Pass *P) const;
1801   void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
1802                          Pass *P);
1803 
1804 public:
1805   LSRInstance(Loop *L, Pass *P);
1806 
getChanged() const1807   bool getChanged() const { return Changed; }
1808 
1809   void print_factors_and_types(raw_ostream &OS) const;
1810   void print_fixups(raw_ostream &OS) const;
1811   void print_uses(raw_ostream &OS) const;
1812   void print(raw_ostream &OS) const;
1813   void dump() const;
1814 };
1815 
1816 }
1817 
1818 /// OptimizeShadowIV - If IV is used in a int-to-float cast
1819 /// inside the loop then try to eliminate the cast operation.
OptimizeShadowIV()1820 void LSRInstance::OptimizeShadowIV() {
1821   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1822   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
1823     return;
1824 
1825   for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
1826        UI != E; /* empty */) {
1827     IVUsers::const_iterator CandidateUI = UI;
1828     ++UI;
1829     Instruction *ShadowUse = CandidateUI->getUser();
1830     Type *DestTy = nullptr;
1831     bool IsSigned = false;
1832 
1833     /* If shadow use is a int->float cast then insert a second IV
1834        to eliminate this cast.
1835 
1836          for (unsigned i = 0; i < n; ++i)
1837            foo((double)i);
1838 
1839        is transformed into
1840 
1841          double d = 0.0;
1842          for (unsigned i = 0; i < n; ++i, ++d)
1843            foo(d);
1844     */
1845     if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) {
1846       IsSigned = false;
1847       DestTy = UCast->getDestTy();
1848     }
1849     else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) {
1850       IsSigned = true;
1851       DestTy = SCast->getDestTy();
1852     }
1853     if (!DestTy) continue;
1854 
1855     // If target does not support DestTy natively then do not apply
1856     // this transformation.
1857     if (!TTI.isTypeLegal(DestTy)) continue;
1858 
1859     PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
1860     if (!PH) continue;
1861     if (PH->getNumIncomingValues() != 2) continue;
1862 
1863     Type *SrcTy = PH->getType();
1864     int Mantissa = DestTy->getFPMantissaWidth();
1865     if (Mantissa == -1) continue;
1866     if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
1867       continue;
1868 
1869     unsigned Entry, Latch;
1870     if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
1871       Entry = 0;
1872       Latch = 1;
1873     } else {
1874       Entry = 1;
1875       Latch = 0;
1876     }
1877 
1878     ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
1879     if (!Init) continue;
1880     Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
1881                                         (double)Init->getSExtValue() :
1882                                         (double)Init->getZExtValue());
1883 
1884     BinaryOperator *Incr =
1885       dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
1886     if (!Incr) continue;
1887     if (Incr->getOpcode() != Instruction::Add
1888         && Incr->getOpcode() != Instruction::Sub)
1889       continue;
1890 
1891     /* Initialize new IV, double d = 0.0 in above example. */
1892     ConstantInt *C = nullptr;
1893     if (Incr->getOperand(0) == PH)
1894       C = dyn_cast<ConstantInt>(Incr->getOperand(1));
1895     else if (Incr->getOperand(1) == PH)
1896       C = dyn_cast<ConstantInt>(Incr->getOperand(0));
1897     else
1898       continue;
1899 
1900     if (!C) continue;
1901 
1902     // Ignore negative constants, as the code below doesn't handle them
1903     // correctly. TODO: Remove this restriction.
1904     if (!C->getValue().isStrictlyPositive()) continue;
1905 
1906     /* Add new PHINode. */
1907     PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH);
1908 
1909     /* create new increment. '++d' in above example. */
1910     Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
1911     BinaryOperator *NewIncr =
1912       BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
1913                                Instruction::FAdd : Instruction::FSub,
1914                              NewPH, CFP, "IV.S.next.", Incr);
1915 
1916     NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
1917     NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
1918 
1919     /* Remove cast operation */
1920     ShadowUse->replaceAllUsesWith(NewPH);
1921     ShadowUse->eraseFromParent();
1922     Changed = true;
1923     break;
1924   }
1925 }
1926 
1927 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1928 /// set the IV user and stride information and return true, otherwise return
1929 /// false.
FindIVUserForCond(ICmpInst * Cond,IVStrideUse * & CondUse)1930 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
1931   for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
1932     if (UI->getUser() == Cond) {
1933       // NOTE: we could handle setcc instructions with multiple uses here, but
1934       // InstCombine does it as well for simple uses, it's not clear that it
1935       // occurs enough in real life to handle.
1936       CondUse = UI;
1937       return true;
1938     }
1939   return false;
1940 }
1941 
1942 /// OptimizeMax - Rewrite the loop's terminating condition if it uses
1943 /// a max computation.
1944 ///
1945 /// This is a narrow solution to a specific, but acute, problem. For loops
1946 /// like this:
1947 ///
1948 ///   i = 0;
1949 ///   do {
1950 ///     p[i] = 0.0;
1951 ///   } while (++i < n);
1952 ///
1953 /// the trip count isn't just 'n', because 'n' might not be positive. And
1954 /// unfortunately this can come up even for loops where the user didn't use
1955 /// a C do-while loop. For example, seemingly well-behaved top-test loops
1956 /// will commonly be lowered like this:
1957 //
1958 ///   if (n > 0) {
1959 ///     i = 0;
1960 ///     do {
1961 ///       p[i] = 0.0;
1962 ///     } while (++i < n);
1963 ///   }
1964 ///
1965 /// and then it's possible for subsequent optimization to obscure the if
1966 /// test in such a way that indvars can't find it.
1967 ///
1968 /// When indvars can't find the if test in loops like this, it creates a
1969 /// max expression, which allows it to give the loop a canonical
1970 /// induction variable:
1971 ///
1972 ///   i = 0;
1973 ///   max = n < 1 ? 1 : n;
1974 ///   do {
1975 ///     p[i] = 0.0;
1976 ///   } while (++i != max);
1977 ///
1978 /// Canonical induction variables are necessary because the loop passes
1979 /// are designed around them. The most obvious example of this is the
1980 /// LoopInfo analysis, which doesn't remember trip count values. It
1981 /// expects to be able to rediscover the trip count each time it is
1982 /// needed, and it does this using a simple analysis that only succeeds if
1983 /// the loop has a canonical induction variable.
1984 ///
1985 /// However, when it comes time to generate code, the maximum operation
1986 /// can be quite costly, especially if it's inside of an outer loop.
1987 ///
1988 /// This function solves this problem by detecting this type of loop and
1989 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
1990 /// the instructions for the maximum computation.
1991 ///
OptimizeMax(ICmpInst * Cond,IVStrideUse * & CondUse)1992 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
1993   // Check that the loop matches the pattern we're looking for.
1994   if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
1995       Cond->getPredicate() != CmpInst::ICMP_NE)
1996     return Cond;
1997 
1998   SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
1999   if (!Sel || !Sel->hasOneUse()) return Cond;
2000 
2001   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2002   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2003     return Cond;
2004   const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
2005 
2006   // Add one to the backedge-taken count to get the trip count.
2007   const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
2008   if (IterationCount != SE.getSCEV(Sel)) return Cond;
2009 
2010   // Check for a max calculation that matches the pattern. There's no check
2011   // for ICMP_ULE here because the comparison would be with zero, which
2012   // isn't interesting.
2013   CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
2014   const SCEVNAryExpr *Max = nullptr;
2015   if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
2016     Pred = ICmpInst::ICMP_SLE;
2017     Max = S;
2018   } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
2019     Pred = ICmpInst::ICMP_SLT;
2020     Max = S;
2021   } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
2022     Pred = ICmpInst::ICMP_ULT;
2023     Max = U;
2024   } else {
2025     // No match; bail.
2026     return Cond;
2027   }
2028 
2029   // To handle a max with more than two operands, this optimization would
2030   // require additional checking and setup.
2031   if (Max->getNumOperands() != 2)
2032     return Cond;
2033 
2034   const SCEV *MaxLHS = Max->getOperand(0);
2035   const SCEV *MaxRHS = Max->getOperand(1);
2036 
2037   // ScalarEvolution canonicalizes constants to the left. For < and >, look
2038   // for a comparison with 1. For <= and >=, a comparison with zero.
2039   if (!MaxLHS ||
2040       (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
2041     return Cond;
2042 
2043   // Check the relevant induction variable for conformance to
2044   // the pattern.
2045   const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
2046   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2047   if (!AR || !AR->isAffine() ||
2048       AR->getStart() != One ||
2049       AR->getStepRecurrence(SE) != One)
2050     return Cond;
2051 
2052   assert(AR->getLoop() == L &&
2053          "Loop condition operand is an addrec in a different loop!");
2054 
2055   // Check the right operand of the select, and remember it, as it will
2056   // be used in the new comparison instruction.
2057   Value *NewRHS = nullptr;
2058   if (ICmpInst::isTrueWhenEqual(Pred)) {
2059     // Look for n+1, and grab n.
2060     if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
2061       if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2062          if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2063            NewRHS = BO->getOperand(0);
2064     if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
2065       if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2066         if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2067           NewRHS = BO->getOperand(0);
2068     if (!NewRHS)
2069       return Cond;
2070   } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
2071     NewRHS = Sel->getOperand(1);
2072   else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
2073     NewRHS = Sel->getOperand(2);
2074   else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
2075     NewRHS = SU->getValue();
2076   else
2077     // Max doesn't match expected pattern.
2078     return Cond;
2079 
2080   // Determine the new comparison opcode. It may be signed or unsigned,
2081   // and the original comparison may be either equality or inequality.
2082   if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2083     Pred = CmpInst::getInversePredicate(Pred);
2084 
2085   // Ok, everything looks ok to change the condition into an SLT or SGE and
2086   // delete the max calculation.
2087   ICmpInst *NewCond =
2088     new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2089 
2090   // Delete the max calculation instructions.
2091   Cond->replaceAllUsesWith(NewCond);
2092   CondUse->setUser(NewCond);
2093   Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2094   Cond->eraseFromParent();
2095   Sel->eraseFromParent();
2096   if (Cmp->use_empty())
2097     Cmp->eraseFromParent();
2098   return NewCond;
2099 }
2100 
2101 /// OptimizeLoopTermCond - Change loop terminating condition to use the
2102 /// postinc iv when possible.
2103 void
OptimizeLoopTermCond()2104 LSRInstance::OptimizeLoopTermCond() {
2105   SmallPtrSet<Instruction *, 4> PostIncs;
2106 
2107   BasicBlock *LatchBlock = L->getLoopLatch();
2108   SmallVector<BasicBlock*, 8> ExitingBlocks;
2109   L->getExitingBlocks(ExitingBlocks);
2110 
2111   for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2112     BasicBlock *ExitingBlock = ExitingBlocks[i];
2113 
2114     // Get the terminating condition for the loop if possible.  If we
2115     // can, we want to change it to use a post-incremented version of its
2116     // induction variable, to allow coalescing the live ranges for the IV into
2117     // one register value.
2118 
2119     BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2120     if (!TermBr)
2121       continue;
2122     // FIXME: Overly conservative, termination condition could be an 'or' etc..
2123     if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2124       continue;
2125 
2126     // Search IVUsesByStride to find Cond's IVUse if there is one.
2127     IVStrideUse *CondUse = nullptr;
2128     ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2129     if (!FindIVUserForCond(Cond, CondUse))
2130       continue;
2131 
2132     // If the trip count is computed in terms of a max (due to ScalarEvolution
2133     // being unable to find a sufficient guard, for example), change the loop
2134     // comparison to use SLT or ULT instead of NE.
2135     // One consequence of doing this now is that it disrupts the count-down
2136     // optimization. That's not always a bad thing though, because in such
2137     // cases it may still be worthwhile to avoid a max.
2138     Cond = OptimizeMax(Cond, CondUse);
2139 
2140     // If this exiting block dominates the latch block, it may also use
2141     // the post-inc value if it won't be shared with other uses.
2142     // Check for dominance.
2143     if (!DT.dominates(ExitingBlock, LatchBlock))
2144       continue;
2145 
2146     // Conservatively avoid trying to use the post-inc value in non-latch
2147     // exits if there may be pre-inc users in intervening blocks.
2148     if (LatchBlock != ExitingBlock)
2149       for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
2150         // Test if the use is reachable from the exiting block. This dominator
2151         // query is a conservative approximation of reachability.
2152         if (&*UI != CondUse &&
2153             !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
2154           // Conservatively assume there may be reuse if the quotient of their
2155           // strides could be a legal scale.
2156           const SCEV *A = IU.getStride(*CondUse, L);
2157           const SCEV *B = IU.getStride(*UI, L);
2158           if (!A || !B) continue;
2159           if (SE.getTypeSizeInBits(A->getType()) !=
2160               SE.getTypeSizeInBits(B->getType())) {
2161             if (SE.getTypeSizeInBits(A->getType()) >
2162                 SE.getTypeSizeInBits(B->getType()))
2163               B = SE.getSignExtendExpr(B, A->getType());
2164             else
2165               A = SE.getSignExtendExpr(A, B->getType());
2166           }
2167           if (const SCEVConstant *D =
2168                 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
2169             const ConstantInt *C = D->getValue();
2170             // Stride of one or negative one can have reuse with non-addresses.
2171             if (C->isOne() || C->isAllOnesValue())
2172               goto decline_post_inc;
2173             // Avoid weird situations.
2174             if (C->getValue().getMinSignedBits() >= 64 ||
2175                 C->getValue().isMinSignedValue())
2176               goto decline_post_inc;
2177             // Check for possible scaled-address reuse.
2178             Type *AccessTy = getAccessType(UI->getUser());
2179             int64_t Scale = C->getSExtValue();
2180             if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr,
2181                                           /*BaseOffset=*/ 0,
2182                                           /*HasBaseReg=*/ false, Scale))
2183               goto decline_post_inc;
2184             Scale = -Scale;
2185             if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr,
2186                                           /*BaseOffset=*/ 0,
2187                                           /*HasBaseReg=*/ false, Scale))
2188               goto decline_post_inc;
2189           }
2190         }
2191 
2192     DEBUG(dbgs() << "  Change loop exiting icmp to use postinc iv: "
2193                  << *Cond << '\n');
2194 
2195     // It's possible for the setcc instruction to be anywhere in the loop, and
2196     // possible for it to have multiple users.  If it is not immediately before
2197     // the exiting block branch, move it.
2198     if (&*++BasicBlock::iterator(Cond) != TermBr) {
2199       if (Cond->hasOneUse()) {
2200         Cond->moveBefore(TermBr);
2201       } else {
2202         // Clone the terminating condition and insert into the loopend.
2203         ICmpInst *OldCond = Cond;
2204         Cond = cast<ICmpInst>(Cond->clone());
2205         Cond->setName(L->getHeader()->getName() + ".termcond");
2206         ExitingBlock->getInstList().insert(TermBr, Cond);
2207 
2208         // Clone the IVUse, as the old use still exists!
2209         CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
2210         TermBr->replaceUsesOfWith(OldCond, Cond);
2211       }
2212     }
2213 
2214     // If we get to here, we know that we can transform the setcc instruction to
2215     // use the post-incremented version of the IV, allowing us to coalesce the
2216     // live ranges for the IV correctly.
2217     CondUse->transformToPostInc(L);
2218     Changed = true;
2219 
2220     PostIncs.insert(Cond);
2221   decline_post_inc:;
2222   }
2223 
2224   // Determine an insertion point for the loop induction variable increment. It
2225   // must dominate all the post-inc comparisons we just set up, and it must
2226   // dominate the loop latch edge.
2227   IVIncInsertPos = L->getLoopLatch()->getTerminator();
2228   for (Instruction *Inst : PostIncs) {
2229     BasicBlock *BB =
2230       DT.findNearestCommonDominator(IVIncInsertPos->getParent(),
2231                                     Inst->getParent());
2232     if (BB == Inst->getParent())
2233       IVIncInsertPos = Inst;
2234     else if (BB != IVIncInsertPos->getParent())
2235       IVIncInsertPos = BB->getTerminator();
2236   }
2237 }
2238 
2239 /// reconcileNewOffset - Determine if the given use can accommodate a fixup
2240 /// at the given offset and other details. If so, update the use and
2241 /// return true.
2242 bool
reconcileNewOffset(LSRUse & LU,int64_t NewOffset,bool HasBaseReg,LSRUse::KindType Kind,Type * AccessTy)2243 LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
2244                                 LSRUse::KindType Kind, Type *AccessTy) {
2245   int64_t NewMinOffset = LU.MinOffset;
2246   int64_t NewMaxOffset = LU.MaxOffset;
2247   Type *NewAccessTy = AccessTy;
2248 
2249   // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
2250   // something conservative, however this can pessimize in the case that one of
2251   // the uses will have all its uses outside the loop, for example.
2252   if (LU.Kind != Kind)
2253     return false;
2254 
2255   // Check for a mismatched access type, and fall back conservatively as needed.
2256   // TODO: Be less conservative when the type is similar and can use the same
2257   // addressing modes.
2258   if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
2259     NewAccessTy = Type::getVoidTy(AccessTy->getContext());
2260 
2261   // Conservatively assume HasBaseReg is true for now.
2262   if (NewOffset < LU.MinOffset) {
2263     if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2264                           LU.MaxOffset - NewOffset, HasBaseReg))
2265       return false;
2266     NewMinOffset = NewOffset;
2267   } else if (NewOffset > LU.MaxOffset) {
2268     if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2269                           NewOffset - LU.MinOffset, HasBaseReg))
2270       return false;
2271     NewMaxOffset = NewOffset;
2272   }
2273 
2274   // Update the use.
2275   LU.MinOffset = NewMinOffset;
2276   LU.MaxOffset = NewMaxOffset;
2277   LU.AccessTy = NewAccessTy;
2278   if (NewOffset != LU.Offsets.back())
2279     LU.Offsets.push_back(NewOffset);
2280   return true;
2281 }
2282 
2283 /// getUse - Return an LSRUse index and an offset value for a fixup which
2284 /// needs the given expression, with the given kind and optional access type.
2285 /// Either reuse an existing use or create a new one, as needed.
2286 std::pair<size_t, int64_t>
getUse(const SCEV * & Expr,LSRUse::KindType Kind,Type * AccessTy)2287 LSRInstance::getUse(const SCEV *&Expr,
2288                     LSRUse::KindType Kind, Type *AccessTy) {
2289   const SCEV *Copy = Expr;
2290   int64_t Offset = ExtractImmediate(Expr, SE);
2291 
2292   // Basic uses can't accept any offset, for example.
2293   if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
2294                         Offset, /*HasBaseReg=*/ true)) {
2295     Expr = Copy;
2296     Offset = 0;
2297   }
2298 
2299   std::pair<UseMapTy::iterator, bool> P =
2300     UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0));
2301   if (!P.second) {
2302     // A use already existed with this base.
2303     size_t LUIdx = P.first->second;
2304     LSRUse &LU = Uses[LUIdx];
2305     if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
2306       // Reuse this use.
2307       return std::make_pair(LUIdx, Offset);
2308   }
2309 
2310   // Create a new use.
2311   size_t LUIdx = Uses.size();
2312   P.first->second = LUIdx;
2313   Uses.push_back(LSRUse(Kind, AccessTy));
2314   LSRUse &LU = Uses[LUIdx];
2315 
2316   // We don't need to track redundant offsets, but we don't need to go out
2317   // of our way here to avoid them.
2318   if (LU.Offsets.empty() || Offset != LU.Offsets.back())
2319     LU.Offsets.push_back(Offset);
2320 
2321   LU.MinOffset = Offset;
2322   LU.MaxOffset = Offset;
2323   return std::make_pair(LUIdx, Offset);
2324 }
2325 
2326 /// DeleteUse - Delete the given use from the Uses list.
DeleteUse(LSRUse & LU,size_t LUIdx)2327 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
2328   if (&LU != &Uses.back())
2329     std::swap(LU, Uses.back());
2330   Uses.pop_back();
2331 
2332   // Update RegUses.
2333   RegUses.SwapAndDropUse(LUIdx, Uses.size());
2334 }
2335 
2336 /// FindUseWithFormula - Look for a use distinct from OrigLU which is has
2337 /// a formula that has the same registers as the given formula.
2338 LSRUse *
FindUseWithSimilarFormula(const Formula & OrigF,const LSRUse & OrigLU)2339 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
2340                                        const LSRUse &OrigLU) {
2341   // Search all uses for the formula. This could be more clever.
2342   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2343     LSRUse &LU = Uses[LUIdx];
2344     // Check whether this use is close enough to OrigLU, to see whether it's
2345     // worthwhile looking through its formulae.
2346     // Ignore ICmpZero uses because they may contain formulae generated by
2347     // GenerateICmpZeroScales, in which case adding fixup offsets may
2348     // be invalid.
2349     if (&LU != &OrigLU &&
2350         LU.Kind != LSRUse::ICmpZero &&
2351         LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
2352         LU.WidestFixupType == OrigLU.WidestFixupType &&
2353         LU.HasFormulaWithSameRegs(OrigF)) {
2354       // Scan through this use's formulae.
2355       for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
2356            E = LU.Formulae.end(); I != E; ++I) {
2357         const Formula &F = *I;
2358         // Check to see if this formula has the same registers and symbols
2359         // as OrigF.
2360         if (F.BaseRegs == OrigF.BaseRegs &&
2361             F.ScaledReg == OrigF.ScaledReg &&
2362             F.BaseGV == OrigF.BaseGV &&
2363             F.Scale == OrigF.Scale &&
2364             F.UnfoldedOffset == OrigF.UnfoldedOffset) {
2365           if (F.BaseOffset == 0)
2366             return &LU;
2367           // This is the formula where all the registers and symbols matched;
2368           // there aren't going to be any others. Since we declined it, we
2369           // can skip the rest of the formulae and proceed to the next LSRUse.
2370           break;
2371         }
2372       }
2373     }
2374   }
2375 
2376   // Nothing looked good.
2377   return nullptr;
2378 }
2379 
CollectInterestingTypesAndFactors()2380 void LSRInstance::CollectInterestingTypesAndFactors() {
2381   SmallSetVector<const SCEV *, 4> Strides;
2382 
2383   // Collect interesting types and strides.
2384   SmallVector<const SCEV *, 4> Worklist;
2385   for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
2386     const SCEV *Expr = IU.getExpr(*UI);
2387 
2388     // Collect interesting types.
2389     Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
2390 
2391     // Add strides for mentioned loops.
2392     Worklist.push_back(Expr);
2393     do {
2394       const SCEV *S = Worklist.pop_back_val();
2395       if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2396         if (AR->getLoop() == L)
2397           Strides.insert(AR->getStepRecurrence(SE));
2398         Worklist.push_back(AR->getStart());
2399       } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2400         Worklist.append(Add->op_begin(), Add->op_end());
2401       }
2402     } while (!Worklist.empty());
2403   }
2404 
2405   // Compute interesting factors from the set of interesting strides.
2406   for (SmallSetVector<const SCEV *, 4>::const_iterator
2407        I = Strides.begin(), E = Strides.end(); I != E; ++I)
2408     for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
2409          std::next(I); NewStrideIter != E; ++NewStrideIter) {
2410       const SCEV *OldStride = *I;
2411       const SCEV *NewStride = *NewStrideIter;
2412 
2413       if (SE.getTypeSizeInBits(OldStride->getType()) !=
2414           SE.getTypeSizeInBits(NewStride->getType())) {
2415         if (SE.getTypeSizeInBits(OldStride->getType()) >
2416             SE.getTypeSizeInBits(NewStride->getType()))
2417           NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
2418         else
2419           OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
2420       }
2421       if (const SCEVConstant *Factor =
2422             dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
2423                                                         SE, true))) {
2424         if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
2425           Factors.insert(Factor->getValue()->getValue().getSExtValue());
2426       } else if (const SCEVConstant *Factor =
2427                    dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride,
2428                                                                NewStride,
2429                                                                SE, true))) {
2430         if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
2431           Factors.insert(Factor->getValue()->getValue().getSExtValue());
2432       }
2433     }
2434 
2435   // If all uses use the same type, don't bother looking for truncation-based
2436   // reuse.
2437   if (Types.size() == 1)
2438     Types.clear();
2439 
2440   DEBUG(print_factors_and_types(dbgs()));
2441 }
2442 
2443 /// findIVOperand - Helper for CollectChains that finds an IV operand (computed
2444 /// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped
2445 /// Instructions to IVStrideUses, we could partially skip this.
2446 static User::op_iterator
findIVOperand(User::op_iterator OI,User::op_iterator OE,Loop * L,ScalarEvolution & SE)2447 findIVOperand(User::op_iterator OI, User::op_iterator OE,
2448               Loop *L, ScalarEvolution &SE) {
2449   for(; OI != OE; ++OI) {
2450     if (Instruction *Oper = dyn_cast<Instruction>(*OI)) {
2451       if (!SE.isSCEVable(Oper->getType()))
2452         continue;
2453 
2454       if (const SCEVAddRecExpr *AR =
2455           dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) {
2456         if (AR->getLoop() == L)
2457           break;
2458       }
2459     }
2460   }
2461   return OI;
2462 }
2463 
2464 /// getWideOperand - IVChain logic must consistenctly peek base TruncInst
2465 /// operands, so wrap it in a convenient helper.
getWideOperand(Value * Oper)2466 static Value *getWideOperand(Value *Oper) {
2467   if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
2468     return Trunc->getOperand(0);
2469   return Oper;
2470 }
2471 
2472 /// isCompatibleIVType - Return true if we allow an IV chain to include both
2473 /// types.
isCompatibleIVType(Value * LVal,Value * RVal)2474 static bool isCompatibleIVType(Value *LVal, Value *RVal) {
2475   Type *LType = LVal->getType();
2476   Type *RType = RVal->getType();
2477   return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy());
2478 }
2479 
2480 /// getExprBase - Return an approximation of this SCEV expression's "base", or
2481 /// NULL for any constant. Returning the expression itself is
2482 /// conservative. Returning a deeper subexpression is more precise and valid as
2483 /// long as it isn't less complex than another subexpression. For expressions
2484 /// involving multiple unscaled values, we need to return the pointer-type
2485 /// SCEVUnknown. This avoids forming chains across objects, such as:
2486 /// PrevOper==a[i], IVOper==b[i], IVInc==b-a.
2487 ///
2488 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
2489 /// SCEVUnknown, we simply return the rightmost SCEV operand.
getExprBase(const SCEV * S)2490 static const SCEV *getExprBase(const SCEV *S) {
2491   switch (S->getSCEVType()) {
2492   default: // uncluding scUnknown.
2493     return S;
2494   case scConstant:
2495     return nullptr;
2496   case scTruncate:
2497     return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
2498   case scZeroExtend:
2499     return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand());
2500   case scSignExtend:
2501     return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand());
2502   case scAddExpr: {
2503     // Skip over scaled operands (scMulExpr) to follow add operands as long as
2504     // there's nothing more complex.
2505     // FIXME: not sure if we want to recognize negation.
2506     const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
2507     for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()),
2508            E(Add->op_begin()); I != E; ++I) {
2509       const SCEV *SubExpr = *I;
2510       if (SubExpr->getSCEVType() == scAddExpr)
2511         return getExprBase(SubExpr);
2512 
2513       if (SubExpr->getSCEVType() != scMulExpr)
2514         return SubExpr;
2515     }
2516     return S; // all operands are scaled, be conservative.
2517   }
2518   case scAddRecExpr:
2519     return getExprBase(cast<SCEVAddRecExpr>(S)->getStart());
2520   }
2521 }
2522 
2523 /// Return true if the chain increment is profitable to expand into a loop
2524 /// invariant value, which may require its own register. A profitable chain
2525 /// increment will be an offset relative to the same base. We allow such offsets
2526 /// to potentially be used as chain increment as long as it's not obviously
2527 /// expensive to expand using real instructions.
isProfitableIncrement(const SCEV * OperExpr,const SCEV * IncExpr,ScalarEvolution & SE)2528 bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
2529                                     const SCEV *IncExpr,
2530                                     ScalarEvolution &SE) {
2531   // Aggressively form chains when -stress-ivchain.
2532   if (StressIVChain)
2533     return true;
2534 
2535   // Do not replace a constant offset from IV head with a nonconstant IV
2536   // increment.
2537   if (!isa<SCEVConstant>(IncExpr)) {
2538     const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand));
2539     if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
2540       return 0;
2541   }
2542 
2543   SmallPtrSet<const SCEV*, 8> Processed;
2544   return !isHighCostExpansion(IncExpr, Processed, SE);
2545 }
2546 
2547 /// Return true if the number of registers needed for the chain is estimated to
2548 /// be less than the number required for the individual IV users. First prohibit
2549 /// any IV users that keep the IV live across increments (the Users set should
2550 /// be empty). Next count the number and type of increments in the chain.
2551 ///
2552 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't
2553 /// effectively use postinc addressing modes. Only consider it profitable it the
2554 /// increments can be computed in fewer registers when chained.
2555 ///
2556 /// TODO: Consider IVInc free if it's already used in another chains.
2557 static bool
isProfitableChain(IVChain & Chain,SmallPtrSetImpl<Instruction * > & Users,ScalarEvolution & SE,const TargetTransformInfo & TTI)2558 isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users,
2559                   ScalarEvolution &SE, const TargetTransformInfo &TTI) {
2560   if (StressIVChain)
2561     return true;
2562 
2563   if (!Chain.hasIncs())
2564     return false;
2565 
2566   if (!Users.empty()) {
2567     DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";
2568           for (Instruction *Inst : Users) {
2569             dbgs() << "  " << *Inst << "\n";
2570           });
2571     return false;
2572   }
2573   assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
2574 
2575   // The chain itself may require a register, so intialize cost to 1.
2576   int cost = 1;
2577 
2578   // A complete chain likely eliminates the need for keeping the original IV in
2579   // a register. LSR does not currently know how to form a complete chain unless
2580   // the header phi already exists.
2581   if (isa<PHINode>(Chain.tailUserInst())
2582       && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
2583     --cost;
2584   }
2585   const SCEV *LastIncExpr = nullptr;
2586   unsigned NumConstIncrements = 0;
2587   unsigned NumVarIncrements = 0;
2588   unsigned NumReusedIncrements = 0;
2589   for (IVChain::const_iterator I = Chain.begin(), E = Chain.end();
2590        I != E; ++I) {
2591 
2592     if (I->IncExpr->isZero())
2593       continue;
2594 
2595     // Incrementing by zero or some constant is neutral. We assume constants can
2596     // be folded into an addressing mode or an add's immediate operand.
2597     if (isa<SCEVConstant>(I->IncExpr)) {
2598       ++NumConstIncrements;
2599       continue;
2600     }
2601 
2602     if (I->IncExpr == LastIncExpr)
2603       ++NumReusedIncrements;
2604     else
2605       ++NumVarIncrements;
2606 
2607     LastIncExpr = I->IncExpr;
2608   }
2609   // An IV chain with a single increment is handled by LSR's postinc
2610   // uses. However, a chain with multiple increments requires keeping the IV's
2611   // value live longer than it needs to be if chained.
2612   if (NumConstIncrements > 1)
2613     --cost;
2614 
2615   // Materializing increment expressions in the preheader that didn't exist in
2616   // the original code may cost a register. For example, sign-extended array
2617   // indices can produce ridiculous increments like this:
2618   // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
2619   cost += NumVarIncrements;
2620 
2621   // Reusing variable increments likely saves a register to hold the multiple of
2622   // the stride.
2623   cost -= NumReusedIncrements;
2624 
2625   DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost
2626                << "\n");
2627 
2628   return cost < 0;
2629 }
2630 
2631 /// ChainInstruction - Add this IV user to an existing chain or make it the head
2632 /// of a new chain.
ChainInstruction(Instruction * UserInst,Instruction * IVOper,SmallVectorImpl<ChainUsers> & ChainUsersVec)2633 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2634                                    SmallVectorImpl<ChainUsers> &ChainUsersVec) {
2635   // When IVs are used as types of varying widths, they are generally converted
2636   // to a wider type with some uses remaining narrow under a (free) trunc.
2637   Value *const NextIV = getWideOperand(IVOper);
2638   const SCEV *const OperExpr = SE.getSCEV(NextIV);
2639   const SCEV *const OperExprBase = getExprBase(OperExpr);
2640 
2641   // Visit all existing chains. Check if its IVOper can be computed as a
2642   // profitable loop invariant increment from the last link in the Chain.
2643   unsigned ChainIdx = 0, NChains = IVChainVec.size();
2644   const SCEV *LastIncExpr = nullptr;
2645   for (; ChainIdx < NChains; ++ChainIdx) {
2646     IVChain &Chain = IVChainVec[ChainIdx];
2647 
2648     // Prune the solution space aggressively by checking that both IV operands
2649     // are expressions that operate on the same unscaled SCEVUnknown. This
2650     // "base" will be canceled by the subsequent getMinusSCEV call. Checking
2651     // first avoids creating extra SCEV expressions.
2652     if (!StressIVChain && Chain.ExprBase != OperExprBase)
2653       continue;
2654 
2655     Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand);
2656     if (!isCompatibleIVType(PrevIV, NextIV))
2657       continue;
2658 
2659     // A phi node terminates a chain.
2660     if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst()))
2661       continue;
2662 
2663     // The increment must be loop-invariant so it can be kept in a register.
2664     const SCEV *PrevExpr = SE.getSCEV(PrevIV);
2665     const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
2666     if (!SE.isLoopInvariant(IncExpr, L))
2667       continue;
2668 
2669     if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) {
2670       LastIncExpr = IncExpr;
2671       break;
2672     }
2673   }
2674   // If we haven't found a chain, create a new one, unless we hit the max. Don't
2675   // bother for phi nodes, because they must be last in the chain.
2676   if (ChainIdx == NChains) {
2677     if (isa<PHINode>(UserInst))
2678       return;
2679     if (NChains >= MaxChains && !StressIVChain) {
2680       DEBUG(dbgs() << "IV Chain Limit\n");
2681       return;
2682     }
2683     LastIncExpr = OperExpr;
2684     // IVUsers may have skipped over sign/zero extensions. We don't currently
2685     // attempt to form chains involving extensions unless they can be hoisted
2686     // into this loop's AddRec.
2687     if (!isa<SCEVAddRecExpr>(LastIncExpr))
2688       return;
2689     ++NChains;
2690     IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
2691                                  OperExprBase));
2692     ChainUsersVec.resize(NChains);
2693     DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst
2694                  << ") IV=" << *LastIncExpr << "\n");
2695   } else {
2696     DEBUG(dbgs() << "IV Chain#" << ChainIdx << "  Inc: (" << *UserInst
2697                  << ") IV+" << *LastIncExpr << "\n");
2698     // Add this IV user to the end of the chain.
2699     IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
2700   }
2701   IVChain &Chain = IVChainVec[ChainIdx];
2702 
2703   SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
2704   // This chain's NearUsers become FarUsers.
2705   if (!LastIncExpr->isZero()) {
2706     ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(),
2707                                             NearUsers.end());
2708     NearUsers.clear();
2709   }
2710 
2711   // All other uses of IVOperand become near uses of the chain.
2712   // We currently ignore intermediate values within SCEV expressions, assuming
2713   // they will eventually be used be the current chain, or can be computed
2714   // from one of the chain increments. To be more precise we could
2715   // transitively follow its user and only add leaf IV users to the set.
2716   for (User *U : IVOper->users()) {
2717     Instruction *OtherUse = dyn_cast<Instruction>(U);
2718     if (!OtherUse)
2719       continue;
2720     // Uses in the chain will no longer be uses if the chain is formed.
2721     // Include the head of the chain in this iteration (not Chain.begin()).
2722     IVChain::const_iterator IncIter = Chain.Incs.begin();
2723     IVChain::const_iterator IncEnd = Chain.Incs.end();
2724     for( ; IncIter != IncEnd; ++IncIter) {
2725       if (IncIter->UserInst == OtherUse)
2726         break;
2727     }
2728     if (IncIter != IncEnd)
2729       continue;
2730 
2731     if (SE.isSCEVable(OtherUse->getType())
2732         && !isa<SCEVUnknown>(SE.getSCEV(OtherUse))
2733         && IU.isIVUserOrOperand(OtherUse)) {
2734       continue;
2735     }
2736     NearUsers.insert(OtherUse);
2737   }
2738 
2739   // Since this user is part of the chain, it's no longer considered a use
2740   // of the chain.
2741   ChainUsersVec[ChainIdx].FarUsers.erase(UserInst);
2742 }
2743 
2744 /// CollectChains - Populate the vector of Chains.
2745 ///
2746 /// This decreases ILP at the architecture level. Targets with ample registers,
2747 /// multiple memory ports, and no register renaming probably don't want
2748 /// this. However, such targets should probably disable LSR altogether.
2749 ///
2750 /// The job of LSR is to make a reasonable choice of induction variables across
2751 /// the loop. Subsequent passes can easily "unchain" computation exposing more
2752 /// ILP *within the loop* if the target wants it.
2753 ///
2754 /// Finding the best IV chain is potentially a scheduling problem. Since LSR
2755 /// will not reorder memory operations, it will recognize this as a chain, but
2756 /// will generate redundant IV increments. Ideally this would be corrected later
2757 /// by a smart scheduler:
2758 ///        = A[i]
2759 ///        = A[i+x]
2760 /// A[i]   =
2761 /// A[i+x] =
2762 ///
2763 /// TODO: Walk the entire domtree within this loop, not just the path to the
2764 /// loop latch. This will discover chains on side paths, but requires
2765 /// maintaining multiple copies of the Chains state.
CollectChains()2766 void LSRInstance::CollectChains() {
2767   DEBUG(dbgs() << "Collecting IV Chains.\n");
2768   SmallVector<ChainUsers, 8> ChainUsersVec;
2769 
2770   SmallVector<BasicBlock *,8> LatchPath;
2771   BasicBlock *LoopHeader = L->getHeader();
2772   for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch());
2773        Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) {
2774     LatchPath.push_back(Rung->getBlock());
2775   }
2776   LatchPath.push_back(LoopHeader);
2777 
2778   // Walk the instruction stream from the loop header to the loop latch.
2779   for (SmallVectorImpl<BasicBlock *>::reverse_iterator
2780          BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend();
2781        BBIter != BBEnd; ++BBIter) {
2782     for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end();
2783          I != E; ++I) {
2784       // Skip instructions that weren't seen by IVUsers analysis.
2785       if (isa<PHINode>(I) || !IU.isIVUserOrOperand(I))
2786         continue;
2787 
2788       // Ignore users that are part of a SCEV expression. This way we only
2789       // consider leaf IV Users. This effectively rediscovers a portion of
2790       // IVUsers analysis but in program order this time.
2791       if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(I)))
2792         continue;
2793 
2794       // Remove this instruction from any NearUsers set it may be in.
2795       for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
2796            ChainIdx < NChains; ++ChainIdx) {
2797         ChainUsersVec[ChainIdx].NearUsers.erase(I);
2798       }
2799       // Search for operands that can be chained.
2800       SmallPtrSet<Instruction*, 4> UniqueOperands;
2801       User::op_iterator IVOpEnd = I->op_end();
2802       User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE);
2803       while (IVOpIter != IVOpEnd) {
2804         Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
2805         if (UniqueOperands.insert(IVOpInst).second)
2806           ChainInstruction(I, IVOpInst, ChainUsersVec);
2807         IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
2808       }
2809     } // Continue walking down the instructions.
2810   } // Continue walking down the domtree.
2811   // Visit phi backedges to determine if the chain can generate the IV postinc.
2812   for (BasicBlock::iterator I = L->getHeader()->begin();
2813        PHINode *PN = dyn_cast<PHINode>(I); ++I) {
2814     if (!SE.isSCEVable(PN->getType()))
2815       continue;
2816 
2817     Instruction *IncV =
2818       dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
2819     if (IncV)
2820       ChainInstruction(PN, IncV, ChainUsersVec);
2821   }
2822   // Remove any unprofitable chains.
2823   unsigned ChainIdx = 0;
2824   for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
2825        UsersIdx < NChains; ++UsersIdx) {
2826     if (!isProfitableChain(IVChainVec[UsersIdx],
2827                            ChainUsersVec[UsersIdx].FarUsers, SE, TTI))
2828       continue;
2829     // Preserve the chain at UsesIdx.
2830     if (ChainIdx != UsersIdx)
2831       IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
2832     FinalizeChain(IVChainVec[ChainIdx]);
2833     ++ChainIdx;
2834   }
2835   IVChainVec.resize(ChainIdx);
2836 }
2837 
FinalizeChain(IVChain & Chain)2838 void LSRInstance::FinalizeChain(IVChain &Chain) {
2839   assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
2840   DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
2841 
2842   for (IVChain::const_iterator I = Chain.begin(), E = Chain.end();
2843        I != E; ++I) {
2844     DEBUG(dbgs() << "        Inc: " << *I->UserInst << "\n");
2845     User::op_iterator UseI =
2846       std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand);
2847     assert(UseI != I->UserInst->op_end() && "cannot find IV operand");
2848     IVIncSet.insert(UseI);
2849   }
2850 }
2851 
2852 /// Return true if the IVInc can be folded into an addressing mode.
canFoldIVIncExpr(const SCEV * IncExpr,Instruction * UserInst,Value * Operand,const TargetTransformInfo & TTI)2853 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
2854                              Value *Operand, const TargetTransformInfo &TTI) {
2855   const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
2856   if (!IncConst || !isAddressUse(UserInst, Operand))
2857     return false;
2858 
2859   if (IncConst->getValue()->getValue().getMinSignedBits() > 64)
2860     return false;
2861 
2862   int64_t IncOffset = IncConst->getValue()->getSExtValue();
2863   if (!isAlwaysFoldable(TTI, LSRUse::Address,
2864                         getAccessType(UserInst), /*BaseGV=*/ nullptr,
2865                         IncOffset, /*HaseBaseReg=*/ false))
2866     return false;
2867 
2868   return true;
2869 }
2870 
2871 /// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to
2872 /// materialize the IV user's operand from the previous IV user's operand.
GenerateIVChain(const IVChain & Chain,SCEVExpander & Rewriter,SmallVectorImpl<WeakVH> & DeadInsts)2873 void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
2874                                   SmallVectorImpl<WeakVH> &DeadInsts) {
2875   // Find the new IVOperand for the head of the chain. It may have been replaced
2876   // by LSR.
2877   const IVInc &Head = Chain.Incs[0];
2878   User::op_iterator IVOpEnd = Head.UserInst->op_end();
2879   // findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
2880   User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
2881                                              IVOpEnd, L, SE);
2882   Value *IVSrc = nullptr;
2883   while (IVOpIter != IVOpEnd) {
2884     IVSrc = getWideOperand(*IVOpIter);
2885 
2886     // If this operand computes the expression that the chain needs, we may use
2887     // it. (Check this after setting IVSrc which is used below.)
2888     //
2889     // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
2890     // narrow for the chain, so we can no longer use it. We do allow using a
2891     // wider phi, assuming the LSR checked for free truncation. In that case we
2892     // should already have a truncate on this operand such that
2893     // getSCEV(IVSrc) == IncExpr.
2894     if (SE.getSCEV(*IVOpIter) == Head.IncExpr
2895         || SE.getSCEV(IVSrc) == Head.IncExpr) {
2896       break;
2897     }
2898     IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
2899   }
2900   if (IVOpIter == IVOpEnd) {
2901     // Gracefully give up on this chain.
2902     DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
2903     return;
2904   }
2905 
2906   DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
2907   Type *IVTy = IVSrc->getType();
2908   Type *IntTy = SE.getEffectiveSCEVType(IVTy);
2909   const SCEV *LeftOverExpr = nullptr;
2910   for (IVChain::const_iterator IncI = Chain.begin(),
2911          IncE = Chain.end(); IncI != IncE; ++IncI) {
2912 
2913     Instruction *InsertPt = IncI->UserInst;
2914     if (isa<PHINode>(InsertPt))
2915       InsertPt = L->getLoopLatch()->getTerminator();
2916 
2917     // IVOper will replace the current IV User's operand. IVSrc is the IV
2918     // value currently held in a register.
2919     Value *IVOper = IVSrc;
2920     if (!IncI->IncExpr->isZero()) {
2921       // IncExpr was the result of subtraction of two narrow values, so must
2922       // be signed.
2923       const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy);
2924       LeftOverExpr = LeftOverExpr ?
2925         SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
2926     }
2927     if (LeftOverExpr && !LeftOverExpr->isZero()) {
2928       // Expand the IV increment.
2929       Rewriter.clearPostInc();
2930       Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
2931       const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc),
2932                                              SE.getUnknown(IncV));
2933       IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
2934 
2935       // If an IV increment can't be folded, use it as the next IV value.
2936       if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand,
2937                             TTI)) {
2938         assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
2939         IVSrc = IVOper;
2940         LeftOverExpr = nullptr;
2941       }
2942     }
2943     Type *OperTy = IncI->IVOperand->getType();
2944     if (IVTy != OperTy) {
2945       assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&
2946              "cannot extend a chained IV");
2947       IRBuilder<> Builder(InsertPt);
2948       IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
2949     }
2950     IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper);
2951     DeadInsts.push_back(IncI->IVOperand);
2952   }
2953   // If LSR created a new, wider phi, we may also replace its postinc. We only
2954   // do this if we also found a wide value for the head of the chain.
2955   if (isa<PHINode>(Chain.tailUserInst())) {
2956     for (BasicBlock::iterator I = L->getHeader()->begin();
2957          PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
2958       if (!isCompatibleIVType(Phi, IVSrc))
2959         continue;
2960       Instruction *PostIncV = dyn_cast<Instruction>(
2961         Phi->getIncomingValueForBlock(L->getLoopLatch()));
2962       if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc)))
2963         continue;
2964       Value *IVOper = IVSrc;
2965       Type *PostIncTy = PostIncV->getType();
2966       if (IVTy != PostIncTy) {
2967         assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types");
2968         IRBuilder<> Builder(L->getLoopLatch()->getTerminator());
2969         Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc());
2970         IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain");
2971       }
2972       Phi->replaceUsesOfWith(PostIncV, IVOper);
2973       DeadInsts.push_back(PostIncV);
2974     }
2975   }
2976 }
2977 
CollectFixupsAndInitialFormulae()2978 void LSRInstance::CollectFixupsAndInitialFormulae() {
2979   for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
2980     Instruction *UserInst = UI->getUser();
2981     // Skip IV users that are part of profitable IV Chains.
2982     User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(),
2983                                        UI->getOperandValToReplace());
2984     assert(UseI != UserInst->op_end() && "cannot find IV operand");
2985     if (IVIncSet.count(UseI))
2986       continue;
2987 
2988     // Record the uses.
2989     LSRFixup &LF = getNewFixup();
2990     LF.UserInst = UserInst;
2991     LF.OperandValToReplace = UI->getOperandValToReplace();
2992     LF.PostIncLoops = UI->getPostIncLoops();
2993 
2994     LSRUse::KindType Kind = LSRUse::Basic;
2995     Type *AccessTy = nullptr;
2996     if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
2997       Kind = LSRUse::Address;
2998       AccessTy = getAccessType(LF.UserInst);
2999     }
3000 
3001     const SCEV *S = IU.getExpr(*UI);
3002 
3003     // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
3004     // (N - i == 0), and this allows (N - i) to be the expression that we work
3005     // with rather than just N or i, so we can consider the register
3006     // requirements for both N and i at the same time. Limiting this code to
3007     // equality icmps is not a problem because all interesting loops use
3008     // equality icmps, thanks to IndVarSimplify.
3009     if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst))
3010       if (CI->isEquality()) {
3011         // Swap the operands if needed to put the OperandValToReplace on the
3012         // left, for consistency.
3013         Value *NV = CI->getOperand(1);
3014         if (NV == LF.OperandValToReplace) {
3015           CI->setOperand(1, CI->getOperand(0));
3016           CI->setOperand(0, NV);
3017           NV = CI->getOperand(1);
3018           Changed = true;
3019         }
3020 
3021         // x == y  -->  x - y == 0
3022         const SCEV *N = SE.getSCEV(NV);
3023         if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) {
3024           // S is normalized, so normalize N before folding it into S
3025           // to keep the result normalized.
3026           N = TransformForPostIncUse(Normalize, N, CI, nullptr,
3027                                      LF.PostIncLoops, SE, DT);
3028           Kind = LSRUse::ICmpZero;
3029           S = SE.getMinusSCEV(N, S);
3030         }
3031 
3032         // -1 and the negations of all interesting strides (except the negation
3033         // of -1) are now also interesting.
3034         for (size_t i = 0, e = Factors.size(); i != e; ++i)
3035           if (Factors[i] != -1)
3036             Factors.insert(-(uint64_t)Factors[i]);
3037         Factors.insert(-1);
3038       }
3039 
3040     // Set up the initial formula for this use.
3041     std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
3042     LF.LUIdx = P.first;
3043     LF.Offset = P.second;
3044     LSRUse &LU = Uses[LF.LUIdx];
3045     LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3046     if (!LU.WidestFixupType ||
3047         SE.getTypeSizeInBits(LU.WidestFixupType) <
3048         SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3049       LU.WidestFixupType = LF.OperandValToReplace->getType();
3050 
3051     // If this is the first use of this LSRUse, give it a formula.
3052     if (LU.Formulae.empty()) {
3053       InsertInitialFormula(S, LU, LF.LUIdx);
3054       CountRegisters(LU.Formulae.back(), LF.LUIdx);
3055     }
3056   }
3057 
3058   DEBUG(print_fixups(dbgs()));
3059 }
3060 
3061 /// InsertInitialFormula - Insert a formula for the given expression into
3062 /// the given use, separating out loop-variant portions from loop-invariant
3063 /// and loop-computable portions.
3064 void
InsertInitialFormula(const SCEV * S,LSRUse & LU,size_t LUIdx)3065 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
3066   // Mark uses whose expressions cannot be expanded.
3067   if (!isSafeToExpand(S, SE))
3068     LU.RigidFormula = true;
3069 
3070   Formula F;
3071   F.InitialMatch(S, L, SE);
3072   bool Inserted = InsertFormula(LU, LUIdx, F);
3073   assert(Inserted && "Initial formula already exists!"); (void)Inserted;
3074 }
3075 
3076 /// InsertSupplementalFormula - Insert a simple single-register formula for
3077 /// the given expression into the given use.
3078 void
InsertSupplementalFormula(const SCEV * S,LSRUse & LU,size_t LUIdx)3079 LSRInstance::InsertSupplementalFormula(const SCEV *S,
3080                                        LSRUse &LU, size_t LUIdx) {
3081   Formula F;
3082   F.BaseRegs.push_back(S);
3083   F.HasBaseReg = true;
3084   bool Inserted = InsertFormula(LU, LUIdx, F);
3085   assert(Inserted && "Supplemental formula already exists!"); (void)Inserted;
3086 }
3087 
3088 /// CountRegisters - Note which registers are used by the given formula,
3089 /// updating RegUses.
CountRegisters(const Formula & F,size_t LUIdx)3090 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
3091   if (F.ScaledReg)
3092     RegUses.CountRegister(F.ScaledReg, LUIdx);
3093   for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
3094        E = F.BaseRegs.end(); I != E; ++I)
3095     RegUses.CountRegister(*I, LUIdx);
3096 }
3097 
3098 /// InsertFormula - If the given formula has not yet been inserted, add it to
3099 /// the list, and return true. Return false otherwise.
InsertFormula(LSRUse & LU,unsigned LUIdx,const Formula & F)3100 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
3101   // Do not insert formula that we will not be able to expand.
3102   assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) &&
3103          "Formula is illegal");
3104   if (!LU.InsertFormula(F))
3105     return false;
3106 
3107   CountRegisters(F, LUIdx);
3108   return true;
3109 }
3110 
3111 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of
3112 /// loop-invariant values which we're tracking. These other uses will pin these
3113 /// values in registers, making them less profitable for elimination.
3114 /// TODO: This currently misses non-constant addrec step registers.
3115 /// TODO: Should this give more weight to users inside the loop?
3116 void
CollectLoopInvariantFixupsAndFormulae()3117 LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
3118   SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
3119   SmallPtrSet<const SCEV *, 32> Visited;
3120 
3121   while (!Worklist.empty()) {
3122     const SCEV *S = Worklist.pop_back_val();
3123 
3124     // Don't process the same SCEV twice
3125     if (!Visited.insert(S).second)
3126       continue;
3127 
3128     if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
3129       Worklist.append(N->op_begin(), N->op_end());
3130     else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
3131       Worklist.push_back(C->getOperand());
3132     else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
3133       Worklist.push_back(D->getLHS());
3134       Worklist.push_back(D->getRHS());
3135     } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) {
3136       const Value *V = US->getValue();
3137       if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
3138         // Look for instructions defined outside the loop.
3139         if (L->contains(Inst)) continue;
3140       } else if (isa<UndefValue>(V))
3141         // Undef doesn't have a live range, so it doesn't matter.
3142         continue;
3143       for (const Use &U : V->uses()) {
3144         const Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
3145         // Ignore non-instructions.
3146         if (!UserInst)
3147           continue;
3148         // Ignore instructions in other functions (as can happen with
3149         // Constants).
3150         if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
3151           continue;
3152         // Ignore instructions not dominated by the loop.
3153         const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
3154           UserInst->getParent() :
3155           cast<PHINode>(UserInst)->getIncomingBlock(
3156             PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3157         if (!DT.dominates(L->getHeader(), UseBB))
3158           continue;
3159         // Ignore uses which are part of other SCEV expressions, to avoid
3160         // analyzing them multiple times.
3161         if (SE.isSCEVable(UserInst->getType())) {
3162           const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
3163           // If the user is a no-op, look through to its uses.
3164           if (!isa<SCEVUnknown>(UserS))
3165             continue;
3166           if (UserS == US) {
3167             Worklist.push_back(
3168               SE.getUnknown(const_cast<Instruction *>(UserInst)));
3169             continue;
3170           }
3171         }
3172         // Ignore icmp instructions which are already being analyzed.
3173         if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
3174           unsigned OtherIdx = !U.getOperandNo();
3175           Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
3176           if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
3177             continue;
3178         }
3179 
3180         LSRFixup &LF = getNewFixup();
3181         LF.UserInst = const_cast<Instruction *>(UserInst);
3182         LF.OperandValToReplace = U;
3183         std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, nullptr);
3184         LF.LUIdx = P.first;
3185         LF.Offset = P.second;
3186         LSRUse &LU = Uses[LF.LUIdx];
3187         LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3188         if (!LU.WidestFixupType ||
3189             SE.getTypeSizeInBits(LU.WidestFixupType) <
3190             SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3191           LU.WidestFixupType = LF.OperandValToReplace->getType();
3192         InsertSupplementalFormula(US, LU, LF.LUIdx);
3193         CountRegisters(LU.Formulae.back(), Uses.size() - 1);
3194         break;
3195       }
3196     }
3197   }
3198 }
3199 
3200 /// CollectSubexprs - Split S into subexpressions which can be pulled out into
3201 /// separate registers. If C is non-null, multiply each subexpression by C.
3202 ///
3203 /// Return remainder expression after factoring the subexpressions captured by
3204 /// Ops. If Ops is complete, return NULL.
CollectSubexprs(const SCEV * S,const SCEVConstant * C,SmallVectorImpl<const SCEV * > & Ops,const Loop * L,ScalarEvolution & SE,unsigned Depth=0)3205 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
3206                                    SmallVectorImpl<const SCEV *> &Ops,
3207                                    const Loop *L,
3208                                    ScalarEvolution &SE,
3209                                    unsigned Depth = 0) {
3210   // Arbitrarily cap recursion to protect compile time.
3211   if (Depth >= 3)
3212     return S;
3213 
3214   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3215     // Break out add operands.
3216     for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
3217          I != E; ++I) {
3218       const SCEV *Remainder = CollectSubexprs(*I, C, Ops, L, SE, Depth+1);
3219       if (Remainder)
3220         Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3221     }
3222     return nullptr;
3223   } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
3224     // Split a non-zero base out of an addrec.
3225     if (AR->getStart()->isZero())
3226       return S;
3227 
3228     const SCEV *Remainder = CollectSubexprs(AR->getStart(),
3229                                             C, Ops, L, SE, Depth+1);
3230     // Split the non-zero AddRec unless it is part of a nested recurrence that
3231     // does not pertain to this loop.
3232     if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
3233       Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3234       Remainder = nullptr;
3235     }
3236     if (Remainder != AR->getStart()) {
3237       if (!Remainder)
3238         Remainder = SE.getConstant(AR->getType(), 0);
3239       return SE.getAddRecExpr(Remainder,
3240                               AR->getStepRecurrence(SE),
3241                               AR->getLoop(),
3242                               //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
3243                               SCEV::FlagAnyWrap);
3244     }
3245   } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3246     // Break (C * (a + b + c)) into C*a + C*b + C*c.
3247     if (Mul->getNumOperands() != 2)
3248       return S;
3249     if (const SCEVConstant *Op0 =
3250         dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3251       C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0;
3252       const SCEV *Remainder =
3253         CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
3254       if (Remainder)
3255         Ops.push_back(SE.getMulExpr(C, Remainder));
3256       return nullptr;
3257     }
3258   }
3259   return S;
3260 }
3261 
3262 /// \brief Helper function for LSRInstance::GenerateReassociations.
GenerateReassociationsImpl(LSRUse & LU,unsigned LUIdx,const Formula & Base,unsigned Depth,size_t Idx,bool IsScaledReg)3263 void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
3264                                              const Formula &Base,
3265                                              unsigned Depth, size_t Idx,
3266                                              bool IsScaledReg) {
3267   const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3268   SmallVector<const SCEV *, 8> AddOps;
3269   const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
3270   if (Remainder)
3271     AddOps.push_back(Remainder);
3272 
3273   if (AddOps.size() == 1)
3274     return;
3275 
3276   for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
3277                                                      JE = AddOps.end();
3278        J != JE; ++J) {
3279 
3280     // Loop-variant "unknown" values are uninteresting; we won't be able to
3281     // do anything meaningful with them.
3282     if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
3283       continue;
3284 
3285     // Don't pull a constant into a register if the constant could be folded
3286     // into an immediate field.
3287     if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3288                          LU.AccessTy, *J, Base.getNumRegs() > 1))
3289       continue;
3290 
3291     // Collect all operands except *J.
3292     SmallVector<const SCEV *, 8> InnerAddOps(
3293         ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
3294     InnerAddOps.append(std::next(J),
3295                        ((const SmallVector<const SCEV *, 8> &)AddOps).end());
3296 
3297     // Don't leave just a constant behind in a register if the constant could
3298     // be folded into an immediate field.
3299     if (InnerAddOps.size() == 1 &&
3300         isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3301                          LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1))
3302       continue;
3303 
3304     const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
3305     if (InnerSum->isZero())
3306       continue;
3307     Formula F = Base;
3308 
3309     // Add the remaining pieces of the add back into the new formula.
3310     const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
3311     if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
3312         TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3313                                 InnerSumSC->getValue()->getZExtValue())) {
3314       F.UnfoldedOffset =
3315           (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue();
3316       if (IsScaledReg)
3317         F.ScaledReg = nullptr;
3318       else
3319         F.BaseRegs.erase(F.BaseRegs.begin() + Idx);
3320     } else if (IsScaledReg)
3321       F.ScaledReg = InnerSum;
3322     else
3323       F.BaseRegs[Idx] = InnerSum;
3324 
3325     // Add J as its own register, or an unfolded immediate.
3326     const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
3327     if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
3328         TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3329                                 SC->getValue()->getZExtValue()))
3330       F.UnfoldedOffset =
3331           (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue();
3332     else
3333       F.BaseRegs.push_back(*J);
3334     // We may have changed the number of register in base regs, adjust the
3335     // formula accordingly.
3336     F.Canonicalize();
3337 
3338     if (InsertFormula(LU, LUIdx, F))
3339       // If that formula hadn't been seen before, recurse to find more like
3340       // it.
3341       GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth + 1);
3342   }
3343 }
3344 
3345 /// GenerateReassociations - Split out subexpressions from adds and the bases of
3346 /// addrecs.
GenerateReassociations(LSRUse & LU,unsigned LUIdx,Formula Base,unsigned Depth)3347 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
3348                                          Formula Base, unsigned Depth) {
3349   assert(Base.isCanonical() && "Input must be in the canonical form");
3350   // Arbitrarily cap recursion to protect compile time.
3351   if (Depth >= 3)
3352     return;
3353 
3354   for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3355     GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i);
3356 
3357   if (Base.Scale == 1)
3358     GenerateReassociationsImpl(LU, LUIdx, Base, Depth,
3359                                /* Idx */ -1, /* IsScaledReg */ true);
3360 }
3361 
3362 /// GenerateCombinations - Generate a formula consisting of all of the
3363 /// loop-dominating registers added into a single register.
GenerateCombinations(LSRUse & LU,unsigned LUIdx,Formula Base)3364 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
3365                                        Formula Base) {
3366   // This method is only interesting on a plurality of registers.
3367   if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1)
3368     return;
3369 
3370   // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
3371   // processing the formula.
3372   Base.Unscale();
3373   Formula F = Base;
3374   F.BaseRegs.clear();
3375   SmallVector<const SCEV *, 4> Ops;
3376   for (SmallVectorImpl<const SCEV *>::const_iterator
3377        I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) {
3378     const SCEV *BaseReg = *I;
3379     if (SE.properlyDominates(BaseReg, L->getHeader()) &&
3380         !SE.hasComputableLoopEvolution(BaseReg, L))
3381       Ops.push_back(BaseReg);
3382     else
3383       F.BaseRegs.push_back(BaseReg);
3384   }
3385   if (Ops.size() > 1) {
3386     const SCEV *Sum = SE.getAddExpr(Ops);
3387     // TODO: If Sum is zero, it probably means ScalarEvolution missed an
3388     // opportunity to fold something. For now, just ignore such cases
3389     // rather than proceed with zero in a register.
3390     if (!Sum->isZero()) {
3391       F.BaseRegs.push_back(Sum);
3392       F.Canonicalize();
3393       (void)InsertFormula(LU, LUIdx, F);
3394     }
3395   }
3396 }
3397 
3398 /// \brief Helper function for LSRInstance::GenerateSymbolicOffsets.
GenerateSymbolicOffsetsImpl(LSRUse & LU,unsigned LUIdx,const Formula & Base,size_t Idx,bool IsScaledReg)3399 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
3400                                               const Formula &Base, size_t Idx,
3401                                               bool IsScaledReg) {
3402   const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3403   GlobalValue *GV = ExtractSymbol(G, SE);
3404   if (G->isZero() || !GV)
3405     return;
3406   Formula F = Base;
3407   F.BaseGV = GV;
3408   if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3409     return;
3410   if (IsScaledReg)
3411     F.ScaledReg = G;
3412   else
3413     F.BaseRegs[Idx] = G;
3414   (void)InsertFormula(LU, LUIdx, F);
3415 }
3416 
3417 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets.
GenerateSymbolicOffsets(LSRUse & LU,unsigned LUIdx,Formula Base)3418 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
3419                                           Formula Base) {
3420   // We can't add a symbolic offset if the address already contains one.
3421   if (Base.BaseGV) return;
3422 
3423   for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3424     GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i);
3425   if (Base.Scale == 1)
3426     GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1,
3427                                 /* IsScaledReg */ true);
3428 }
3429 
3430 /// \brief Helper function for LSRInstance::GenerateConstantOffsets.
GenerateConstantOffsetsImpl(LSRUse & LU,unsigned LUIdx,const Formula & Base,const SmallVectorImpl<int64_t> & Worklist,size_t Idx,bool IsScaledReg)3431 void LSRInstance::GenerateConstantOffsetsImpl(
3432     LSRUse &LU, unsigned LUIdx, const Formula &Base,
3433     const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
3434   const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3435   for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
3436                                                 E = Worklist.end();
3437        I != E; ++I) {
3438     Formula F = Base;
3439     F.BaseOffset = (uint64_t)Base.BaseOffset - *I;
3440     if (isLegalUse(TTI, LU.MinOffset - *I, LU.MaxOffset - *I, LU.Kind,
3441                    LU.AccessTy, F)) {
3442       // Add the offset to the base register.
3443       const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
3444       // If it cancelled out, drop the base register, otherwise update it.
3445       if (NewG->isZero()) {
3446         if (IsScaledReg) {
3447           F.Scale = 0;
3448           F.ScaledReg = nullptr;
3449         } else
3450           F.DeleteBaseReg(F.BaseRegs[Idx]);
3451         F.Canonicalize();
3452       } else if (IsScaledReg)
3453         F.ScaledReg = NewG;
3454       else
3455         F.BaseRegs[Idx] = NewG;
3456 
3457       (void)InsertFormula(LU, LUIdx, F);
3458     }
3459   }
3460 
3461   int64_t Imm = ExtractImmediate(G, SE);
3462   if (G->isZero() || Imm == 0)
3463     return;
3464   Formula F = Base;
3465   F.BaseOffset = (uint64_t)F.BaseOffset + Imm;
3466   if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3467     return;
3468   if (IsScaledReg)
3469     F.ScaledReg = G;
3470   else
3471     F.BaseRegs[Idx] = G;
3472   (void)InsertFormula(LU, LUIdx, F);
3473 }
3474 
3475 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
GenerateConstantOffsets(LSRUse & LU,unsigned LUIdx,Formula Base)3476 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
3477                                           Formula Base) {
3478   // TODO: For now, just add the min and max offset, because it usually isn't
3479   // worthwhile looking at everything inbetween.
3480   SmallVector<int64_t, 2> Worklist;
3481   Worklist.push_back(LU.MinOffset);
3482   if (LU.MaxOffset != LU.MinOffset)
3483     Worklist.push_back(LU.MaxOffset);
3484 
3485   for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3486     GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i);
3487   if (Base.Scale == 1)
3488     GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1,
3489                                 /* IsScaledReg */ true);
3490 }
3491 
3492 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up
3493 /// the comparison. For example, x == y -> x*c == y*c.
GenerateICmpZeroScales(LSRUse & LU,unsigned LUIdx,Formula Base)3494 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
3495                                          Formula Base) {
3496   if (LU.Kind != LSRUse::ICmpZero) return;
3497 
3498   // Determine the integer type for the base formula.
3499   Type *IntTy = Base.getType();
3500   if (!IntTy) return;
3501   if (SE.getTypeSizeInBits(IntTy) > 64) return;
3502 
3503   // Don't do this if there is more than one offset.
3504   if (LU.MinOffset != LU.MaxOffset) return;
3505 
3506   assert(!Base.BaseGV && "ICmpZero use is not legal!");
3507 
3508   // Check each interesting stride.
3509   for (SmallSetVector<int64_t, 8>::const_iterator
3510        I = Factors.begin(), E = Factors.end(); I != E; ++I) {
3511     int64_t Factor = *I;
3512 
3513     // Check that the multiplication doesn't overflow.
3514     if (Base.BaseOffset == INT64_MIN && Factor == -1)
3515       continue;
3516     int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor;
3517     if (NewBaseOffset / Factor != Base.BaseOffset)
3518       continue;
3519     // If the offset will be truncated at this use, check that it is in bounds.
3520     if (!IntTy->isPointerTy() &&
3521         !ConstantInt::isValueValidForType(IntTy, NewBaseOffset))
3522       continue;
3523 
3524     // Check that multiplying with the use offset doesn't overflow.
3525     int64_t Offset = LU.MinOffset;
3526     if (Offset == INT64_MIN && Factor == -1)
3527       continue;
3528     Offset = (uint64_t)Offset * Factor;
3529     if (Offset / Factor != LU.MinOffset)
3530       continue;
3531     // If the offset will be truncated at this use, check that it is in bounds.
3532     if (!IntTy->isPointerTy() &&
3533         !ConstantInt::isValueValidForType(IntTy, Offset))
3534       continue;
3535 
3536     Formula F = Base;
3537     F.BaseOffset = NewBaseOffset;
3538 
3539     // Check that this scale is legal.
3540     if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F))
3541       continue;
3542 
3543     // Compensate for the use having MinOffset built into it.
3544     F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset;
3545 
3546     const SCEV *FactorS = SE.getConstant(IntTy, Factor);
3547 
3548     // Check that multiplying with each base register doesn't overflow.
3549     for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
3550       F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
3551       if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
3552         goto next;
3553     }
3554 
3555     // Check that multiplying with the scaled register doesn't overflow.
3556     if (F.ScaledReg) {
3557       F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
3558       if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
3559         continue;
3560     }
3561 
3562     // Check that multiplying with the unfolded offset doesn't overflow.
3563     if (F.UnfoldedOffset != 0) {
3564       if (F.UnfoldedOffset == INT64_MIN && Factor == -1)
3565         continue;
3566       F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor;
3567       if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset)
3568         continue;
3569       // If the offset will be truncated, check that it is in bounds.
3570       if (!IntTy->isPointerTy() &&
3571           !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset))
3572         continue;
3573     }
3574 
3575     // If we make it here and it's legal, add it.
3576     (void)InsertFormula(LU, LUIdx, F);
3577   next:;
3578   }
3579 }
3580 
3581 /// GenerateScales - Generate stride factor reuse formulae by making use of
3582 /// scaled-offset address modes, for example.
GenerateScales(LSRUse & LU,unsigned LUIdx,Formula Base)3583 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
3584   // Determine the integer type for the base formula.
3585   Type *IntTy = Base.getType();
3586   if (!IntTy) return;
3587 
3588   // If this Formula already has a scaled register, we can't add another one.
3589   // Try to unscale the formula to generate a better scale.
3590   if (Base.Scale != 0 && !Base.Unscale())
3591     return;
3592 
3593   assert(Base.Scale == 0 && "Unscale did not did its job!");
3594 
3595   // Check each interesting stride.
3596   for (SmallSetVector<int64_t, 8>::const_iterator
3597        I = Factors.begin(), E = Factors.end(); I != E; ++I) {
3598     int64_t Factor = *I;
3599 
3600     Base.Scale = Factor;
3601     Base.HasBaseReg = Base.BaseRegs.size() > 1;
3602     // Check whether this scale is going to be legal.
3603     if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
3604                     Base)) {
3605       // As a special-case, handle special out-of-loop Basic users specially.
3606       // TODO: Reconsider this special case.
3607       if (LU.Kind == LSRUse::Basic &&
3608           isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special,
3609                      LU.AccessTy, Base) &&
3610           LU.AllFixupsOutsideLoop)
3611         LU.Kind = LSRUse::Special;
3612       else
3613         continue;
3614     }
3615     // For an ICmpZero, negating a solitary base register won't lead to
3616     // new solutions.
3617     if (LU.Kind == LSRUse::ICmpZero &&
3618         !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV)
3619       continue;
3620     // For each addrec base reg, apply the scale, if possible.
3621     for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3622       if (const SCEVAddRecExpr *AR =
3623             dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) {
3624         const SCEV *FactorS = SE.getConstant(IntTy, Factor);
3625         if (FactorS->isZero())
3626           continue;
3627         // Divide out the factor, ignoring high bits, since we'll be
3628         // scaling the value back up in the end.
3629         if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) {
3630           // TODO: This could be optimized to avoid all the copying.
3631           Formula F = Base;
3632           F.ScaledReg = Quotient;
3633           F.DeleteBaseReg(F.BaseRegs[i]);
3634           // The canonical representation of 1*reg is reg, which is already in
3635           // Base. In that case, do not try to insert the formula, it will be
3636           // rejected anyway.
3637           if (F.Scale == 1 && F.BaseRegs.empty())
3638             continue;
3639           (void)InsertFormula(LU, LUIdx, F);
3640         }
3641       }
3642   }
3643 }
3644 
3645 /// GenerateTruncates - Generate reuse formulae from different IV types.
GenerateTruncates(LSRUse & LU,unsigned LUIdx,Formula Base)3646 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
3647   // Don't bother truncating symbolic values.
3648   if (Base.BaseGV) return;
3649 
3650   // Determine the integer type for the base formula.
3651   Type *DstTy = Base.getType();
3652   if (!DstTy) return;
3653   DstTy = SE.getEffectiveSCEVType(DstTy);
3654 
3655   for (SmallSetVector<Type *, 4>::const_iterator
3656        I = Types.begin(), E = Types.end(); I != E; ++I) {
3657     Type *SrcTy = *I;
3658     if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
3659       Formula F = Base;
3660 
3661       if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
3662       for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(),
3663            JE = F.BaseRegs.end(); J != JE; ++J)
3664         *J = SE.getAnyExtendExpr(*J, SrcTy);
3665 
3666       // TODO: This assumes we've done basic processing on all uses and
3667       // have an idea what the register usage is.
3668       if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
3669         continue;
3670 
3671       (void)InsertFormula(LU, LUIdx, F);
3672     }
3673   }
3674 }
3675 
3676 namespace {
3677 
3678 /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to
3679 /// defer modifications so that the search phase doesn't have to worry about
3680 /// the data structures moving underneath it.
3681 struct WorkItem {
3682   size_t LUIdx;
3683   int64_t Imm;
3684   const SCEV *OrigReg;
3685 
WorkItem__anonbd2ddbdf0911::WorkItem3686   WorkItem(size_t LI, int64_t I, const SCEV *R)
3687     : LUIdx(LI), Imm(I), OrigReg(R) {}
3688 
3689   void print(raw_ostream &OS) const;
3690   void dump() const;
3691 };
3692 
3693 }
3694 
print(raw_ostream & OS) const3695 void WorkItem::print(raw_ostream &OS) const {
3696   OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
3697      << " , add offset " << Imm;
3698 }
3699 
3700 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const3701 void WorkItem::dump() const {
3702   print(errs()); errs() << '\n';
3703 }
3704 #endif
3705 
3706 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant
3707 /// distance apart and try to form reuse opportunities between them.
GenerateCrossUseConstantOffsets()3708 void LSRInstance::GenerateCrossUseConstantOffsets() {
3709   // Group the registers by their value without any added constant offset.
3710   typedef std::map<int64_t, const SCEV *> ImmMapTy;
3711   typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy;
3712   RegMapTy Map;
3713   DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
3714   SmallVector<const SCEV *, 8> Sequence;
3715   for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
3716        I != E; ++I) {
3717     const SCEV *Reg = *I;
3718     int64_t Imm = ExtractImmediate(Reg, SE);
3719     std::pair<RegMapTy::iterator, bool> Pair =
3720       Map.insert(std::make_pair(Reg, ImmMapTy()));
3721     if (Pair.second)
3722       Sequence.push_back(Reg);
3723     Pair.first->second.insert(std::make_pair(Imm, *I));
3724     UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I);
3725   }
3726 
3727   // Now examine each set of registers with the same base value. Build up
3728   // a list of work to do and do the work in a separate step so that we're
3729   // not adding formulae and register counts while we're searching.
3730   SmallVector<WorkItem, 32> WorkItems;
3731   SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
3732   for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(),
3733        E = Sequence.end(); I != E; ++I) {
3734     const SCEV *Reg = *I;
3735     const ImmMapTy &Imms = Map.find(Reg)->second;
3736 
3737     // It's not worthwhile looking for reuse if there's only one offset.
3738     if (Imms.size() == 1)
3739       continue;
3740 
3741     DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
3742           for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
3743                J != JE; ++J)
3744             dbgs() << ' ' << J->first;
3745           dbgs() << '\n');
3746 
3747     // Examine each offset.
3748     for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
3749          J != JE; ++J) {
3750       const SCEV *OrigReg = J->second;
3751 
3752       int64_t JImm = J->first;
3753       const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
3754 
3755       if (!isa<SCEVConstant>(OrigReg) &&
3756           UsedByIndicesMap[Reg].count() == 1) {
3757         DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n');
3758         continue;
3759       }
3760 
3761       // Conservatively examine offsets between this orig reg a few selected
3762       // other orig regs.
3763       ImmMapTy::const_iterator OtherImms[] = {
3764         Imms.begin(), std::prev(Imms.end()),
3765         Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) /
3766                          2)
3767       };
3768       for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
3769         ImmMapTy::const_iterator M = OtherImms[i];
3770         if (M == J || M == JE) continue;
3771 
3772         // Compute the difference between the two.
3773         int64_t Imm = (uint64_t)JImm - M->first;
3774         for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1;
3775              LUIdx = UsedByIndices.find_next(LUIdx))
3776           // Make a memo of this use, offset, and register tuple.
3777           if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second)
3778             WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
3779       }
3780     }
3781   }
3782 
3783   Map.clear();
3784   Sequence.clear();
3785   UsedByIndicesMap.clear();
3786   UniqueItems.clear();
3787 
3788   // Now iterate through the worklist and add new formulae.
3789   for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(),
3790        E = WorkItems.end(); I != E; ++I) {
3791     const WorkItem &WI = *I;
3792     size_t LUIdx = WI.LUIdx;
3793     LSRUse &LU = Uses[LUIdx];
3794     int64_t Imm = WI.Imm;
3795     const SCEV *OrigReg = WI.OrigReg;
3796 
3797     Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
3798     const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
3799     unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
3800 
3801     // TODO: Use a more targeted data structure.
3802     for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
3803       Formula F = LU.Formulae[L];
3804       // FIXME: The code for the scaled and unscaled registers looks
3805       // very similar but slightly different. Investigate if they
3806       // could be merged. That way, we would not have to unscale the
3807       // Formula.
3808       F.Unscale();
3809       // Use the immediate in the scaled register.
3810       if (F.ScaledReg == OrigReg) {
3811         int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale;
3812         // Don't create 50 + reg(-50).
3813         if (F.referencesReg(SE.getSCEV(
3814                    ConstantInt::get(IntTy, -(uint64_t)Offset))))
3815           continue;
3816         Formula NewF = F;
3817         NewF.BaseOffset = Offset;
3818         if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
3819                         NewF))
3820           continue;
3821         NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
3822 
3823         // If the new scale is a constant in a register, and adding the constant
3824         // value to the immediate would produce a value closer to zero than the
3825         // immediate itself, then the formula isn't worthwhile.
3826         if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
3827           if (C->getValue()->isNegative() !=
3828                 (NewF.BaseOffset < 0) &&
3829               (C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale))
3830                 .ule(abs64(NewF.BaseOffset)))
3831             continue;
3832 
3833         // OK, looks good.
3834         NewF.Canonicalize();
3835         (void)InsertFormula(LU, LUIdx, NewF);
3836       } else {
3837         // Use the immediate in a base register.
3838         for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
3839           const SCEV *BaseReg = F.BaseRegs[N];
3840           if (BaseReg != OrigReg)
3841             continue;
3842           Formula NewF = F;
3843           NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm;
3844           if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset,
3845                           LU.Kind, LU.AccessTy, NewF)) {
3846             if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
3847               continue;
3848             NewF = F;
3849             NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
3850           }
3851           NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
3852 
3853           // If the new formula has a constant in a register, and adding the
3854           // constant value to the immediate would produce a value closer to
3855           // zero than the immediate itself, then the formula isn't worthwhile.
3856           for (SmallVectorImpl<const SCEV *>::const_iterator
3857                J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end();
3858                J != JE; ++J)
3859             if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J))
3860               if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt(
3861                    abs64(NewF.BaseOffset)) &&
3862                   (C->getValue()->getValue() +
3863                    NewF.BaseOffset).countTrailingZeros() >=
3864                    countTrailingZeros<uint64_t>(NewF.BaseOffset))
3865                 goto skip_formula;
3866 
3867           // Ok, looks good.
3868           NewF.Canonicalize();
3869           (void)InsertFormula(LU, LUIdx, NewF);
3870           break;
3871         skip_formula:;
3872         }
3873       }
3874     }
3875   }
3876 }
3877 
3878 /// GenerateAllReuseFormulae - Generate formulae for each use.
3879 void
GenerateAllReuseFormulae()3880 LSRInstance::GenerateAllReuseFormulae() {
3881   // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
3882   // queries are more precise.
3883   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3884     LSRUse &LU = Uses[LUIdx];
3885     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3886       GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
3887     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3888       GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
3889   }
3890   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3891     LSRUse &LU = Uses[LUIdx];
3892     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3893       GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
3894     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3895       GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
3896     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3897       GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
3898     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3899       GenerateScales(LU, LUIdx, LU.Formulae[i]);
3900   }
3901   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3902     LSRUse &LU = Uses[LUIdx];
3903     for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
3904       GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
3905   }
3906 
3907   GenerateCrossUseConstantOffsets();
3908 
3909   DEBUG(dbgs() << "\n"
3910                   "After generating reuse formulae:\n";
3911         print_uses(dbgs()));
3912 }
3913 
3914 /// If there are multiple formulae with the same set of registers used
3915 /// by other uses, pick the best one and delete the others.
FilterOutUndesirableDedicatedRegisters()3916 void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
3917   DenseSet<const SCEV *> VisitedRegs;
3918   SmallPtrSet<const SCEV *, 16> Regs;
3919   SmallPtrSet<const SCEV *, 16> LoserRegs;
3920 #ifndef NDEBUG
3921   bool ChangedFormulae = false;
3922 #endif
3923 
3924   // Collect the best formula for each unique set of shared registers. This
3925   // is reset for each use.
3926   typedef DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>
3927     BestFormulaeTy;
3928   BestFormulaeTy BestFormulae;
3929 
3930   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3931     LSRUse &LU = Uses[LUIdx];
3932     DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
3933 
3934     bool Any = false;
3935     for (size_t FIdx = 0, NumForms = LU.Formulae.size();
3936          FIdx != NumForms; ++FIdx) {
3937       Formula &F = LU.Formulae[FIdx];
3938 
3939       // Some formulas are instant losers. For example, they may depend on
3940       // nonexistent AddRecs from other loops. These need to be filtered
3941       // immediately, otherwise heuristics could choose them over others leading
3942       // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
3943       // avoids the need to recompute this information across formulae using the
3944       // same bad AddRec. Passing LoserRegs is also essential unless we remove
3945       // the corresponding bad register from the Regs set.
3946       Cost CostF;
3947       Regs.clear();
3948       CostF.RateFormula(TTI, F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, LU,
3949                         &LoserRegs);
3950       if (CostF.isLoser()) {
3951         // During initial formula generation, undesirable formulae are generated
3952         // by uses within other loops that have some non-trivial address mode or
3953         // use the postinc form of the IV. LSR needs to provide these formulae
3954         // as the basis of rediscovering the desired formula that uses an AddRec
3955         // corresponding to the existing phi. Once all formulae have been
3956         // generated, these initial losers may be pruned.
3957         DEBUG(dbgs() << "  Filtering loser "; F.print(dbgs());
3958               dbgs() << "\n");
3959       }
3960       else {
3961         SmallVector<const SCEV *, 4> Key;
3962         for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
3963                JE = F.BaseRegs.end(); J != JE; ++J) {
3964           const SCEV *Reg = *J;
3965           if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
3966             Key.push_back(Reg);
3967         }
3968         if (F.ScaledReg &&
3969             RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
3970           Key.push_back(F.ScaledReg);
3971         // Unstable sort by host order ok, because this is only used for
3972         // uniquifying.
3973         std::sort(Key.begin(), Key.end());
3974 
3975         std::pair<BestFormulaeTy::const_iterator, bool> P =
3976           BestFormulae.insert(std::make_pair(Key, FIdx));
3977         if (P.second)
3978           continue;
3979 
3980         Formula &Best = LU.Formulae[P.first->second];
3981 
3982         Cost CostBest;
3983         Regs.clear();
3984         CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, LU.Offsets, SE,
3985                              DT, LU);
3986         if (CostF < CostBest)
3987           std::swap(F, Best);
3988         DEBUG(dbgs() << "  Filtering out formula "; F.print(dbgs());
3989               dbgs() << "\n"
3990                         "    in favor of formula "; Best.print(dbgs());
3991               dbgs() << '\n');
3992       }
3993 #ifndef NDEBUG
3994       ChangedFormulae = true;
3995 #endif
3996       LU.DeleteFormula(F);
3997       --FIdx;
3998       --NumForms;
3999       Any = true;
4000     }
4001 
4002     // Now that we've filtered out some formulae, recompute the Regs set.
4003     if (Any)
4004       LU.RecomputeRegs(LUIdx, RegUses);
4005 
4006     // Reset this to prepare for the next use.
4007     BestFormulae.clear();
4008   }
4009 
4010   DEBUG(if (ChangedFormulae) {
4011           dbgs() << "\n"
4012                     "After filtering out undesirable candidates:\n";
4013           print_uses(dbgs());
4014         });
4015 }
4016 
4017 // This is a rough guess that seems to work fairly well.
4018 static const size_t ComplexityLimit = UINT16_MAX;
4019 
4020 /// EstimateSearchSpaceComplexity - Estimate the worst-case number of
4021 /// solutions the solver might have to consider. It almost never considers
4022 /// this many solutions because it prune the search space, but the pruning
4023 /// isn't always sufficient.
EstimateSearchSpaceComplexity() const4024 size_t LSRInstance::EstimateSearchSpaceComplexity() const {
4025   size_t Power = 1;
4026   for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
4027        E = Uses.end(); I != E; ++I) {
4028     size_t FSize = I->Formulae.size();
4029     if (FSize >= ComplexityLimit) {
4030       Power = ComplexityLimit;
4031       break;
4032     }
4033     Power *= FSize;
4034     if (Power >= ComplexityLimit)
4035       break;
4036   }
4037   return Power;
4038 }
4039 
4040 /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset
4041 /// of the registers of another formula, it won't help reduce register
4042 /// pressure (though it may not necessarily hurt register pressure); remove
4043 /// it to simplify the system.
NarrowSearchSpaceByDetectingSupersets()4044 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
4045   if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4046     DEBUG(dbgs() << "The search space is too complex.\n");
4047 
4048     DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
4049                     "which use a superset of registers used by other "
4050                     "formulae.\n");
4051 
4052     for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4053       LSRUse &LU = Uses[LUIdx];
4054       bool Any = false;
4055       for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4056         Formula &F = LU.Formulae[i];
4057         // Look for a formula with a constant or GV in a register. If the use
4058         // also has a formula with that same value in an immediate field,
4059         // delete the one that uses a register.
4060         for (SmallVectorImpl<const SCEV *>::const_iterator
4061              I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
4062           if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
4063             Formula NewF = F;
4064             NewF.BaseOffset += C->getValue()->getSExtValue();
4065             NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4066                                 (I - F.BaseRegs.begin()));
4067             if (LU.HasFormulaWithSameRegs(NewF)) {
4068               DEBUG(dbgs() << "  Deleting "; F.print(dbgs()); dbgs() << '\n');
4069               LU.DeleteFormula(F);
4070               --i;
4071               --e;
4072               Any = true;
4073               break;
4074             }
4075           } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
4076             if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
4077               if (!F.BaseGV) {
4078                 Formula NewF = F;
4079                 NewF.BaseGV = GV;
4080                 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4081                                     (I - F.BaseRegs.begin()));
4082                 if (LU.HasFormulaWithSameRegs(NewF)) {
4083                   DEBUG(dbgs() << "  Deleting "; F.print(dbgs());
4084                         dbgs() << '\n');
4085                   LU.DeleteFormula(F);
4086                   --i;
4087                   --e;
4088                   Any = true;
4089                   break;
4090                 }
4091               }
4092           }
4093         }
4094       }
4095       if (Any)
4096         LU.RecomputeRegs(LUIdx, RegUses);
4097     }
4098 
4099     DEBUG(dbgs() << "After pre-selection:\n";
4100           print_uses(dbgs()));
4101   }
4102 }
4103 
4104 /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers
4105 /// for expressions like A, A+1, A+2, etc., allocate a single register for
4106 /// them.
NarrowSearchSpaceByCollapsingUnrolledCode()4107 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
4108   if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4109     return;
4110 
4111   DEBUG(dbgs() << "The search space is too complex.\n"
4112                   "Narrowing the search space by assuming that uses separated "
4113                   "by a constant offset will use the same registers.\n");
4114 
4115   // This is especially useful for unrolled loops.
4116 
4117   for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4118     LSRUse &LU = Uses[LUIdx];
4119     for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
4120          E = LU.Formulae.end(); I != E; ++I) {
4121       const Formula &F = *I;
4122       if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1))
4123         continue;
4124 
4125       LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU);
4126       if (!LUThatHas)
4127         continue;
4128 
4129       if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false,
4130                               LU.Kind, LU.AccessTy))
4131         continue;
4132 
4133       DEBUG(dbgs() << "  Deleting use "; LU.print(dbgs()); dbgs() << '\n');
4134 
4135       LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
4136 
4137       // Update the relocs to reference the new use.
4138       for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
4139            E = Fixups.end(); I != E; ++I) {
4140         LSRFixup &Fixup = *I;
4141         if (Fixup.LUIdx == LUIdx) {
4142           Fixup.LUIdx = LUThatHas - &Uses.front();
4143           Fixup.Offset += F.BaseOffset;
4144           // Add the new offset to LUThatHas' offset list.
4145           if (LUThatHas->Offsets.back() != Fixup.Offset) {
4146             LUThatHas->Offsets.push_back(Fixup.Offset);
4147             if (Fixup.Offset > LUThatHas->MaxOffset)
4148               LUThatHas->MaxOffset = Fixup.Offset;
4149             if (Fixup.Offset < LUThatHas->MinOffset)
4150               LUThatHas->MinOffset = Fixup.Offset;
4151           }
4152           DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n');
4153         }
4154         if (Fixup.LUIdx == NumUses-1)
4155           Fixup.LUIdx = LUIdx;
4156       }
4157 
4158       // Delete formulae from the new use which are no longer legal.
4159       bool Any = false;
4160       for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
4161         Formula &F = LUThatHas->Formulae[i];
4162         if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset,
4163                         LUThatHas->Kind, LUThatHas->AccessTy, F)) {
4164           DEBUG(dbgs() << "  Deleting "; F.print(dbgs());
4165                 dbgs() << '\n');
4166           LUThatHas->DeleteFormula(F);
4167           --i;
4168           --e;
4169           Any = true;
4170         }
4171       }
4172 
4173       if (Any)
4174         LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
4175 
4176       // Delete the old use.
4177       DeleteUse(LU, LUIdx);
4178       --LUIdx;
4179       --NumUses;
4180       break;
4181     }
4182   }
4183 
4184   DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4185 }
4186 
4187 /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
4188 /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
4189 /// we've done more filtering, as it may be able to find more formulae to
4190 /// eliminate.
NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters()4191 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
4192   if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4193     DEBUG(dbgs() << "The search space is too complex.\n");
4194 
4195     DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
4196                     "undesirable dedicated registers.\n");
4197 
4198     FilterOutUndesirableDedicatedRegisters();
4199 
4200     DEBUG(dbgs() << "After pre-selection:\n";
4201           print_uses(dbgs()));
4202   }
4203 }
4204 
4205 /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely
4206 /// to be profitable, and then in any use which has any reference to that
4207 /// register, delete all formulae which do not reference that register.
NarrowSearchSpaceByPickingWinnerRegs()4208 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
4209   // With all other options exhausted, loop until the system is simple
4210   // enough to handle.
4211   SmallPtrSet<const SCEV *, 4> Taken;
4212   while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4213     // Ok, we have too many of formulae on our hands to conveniently handle.
4214     // Use a rough heuristic to thin out the list.
4215     DEBUG(dbgs() << "The search space is too complex.\n");
4216 
4217     // Pick the register which is used by the most LSRUses, which is likely
4218     // to be a good reuse register candidate.
4219     const SCEV *Best = nullptr;
4220     unsigned BestNum = 0;
4221     for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
4222          I != E; ++I) {
4223       const SCEV *Reg = *I;
4224       if (Taken.count(Reg))
4225         continue;
4226       if (!Best)
4227         Best = Reg;
4228       else {
4229         unsigned Count = RegUses.getUsedByIndices(Reg).count();
4230         if (Count > BestNum) {
4231           Best = Reg;
4232           BestNum = Count;
4233         }
4234       }
4235     }
4236 
4237     DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
4238                  << " will yield profitable reuse.\n");
4239     Taken.insert(Best);
4240 
4241     // In any use with formulae which references this register, delete formulae
4242     // which don't reference it.
4243     for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4244       LSRUse &LU = Uses[LUIdx];
4245       if (!LU.Regs.count(Best)) continue;
4246 
4247       bool Any = false;
4248       for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4249         Formula &F = LU.Formulae[i];
4250         if (!F.referencesReg(Best)) {
4251           DEBUG(dbgs() << "  Deleting "; F.print(dbgs()); dbgs() << '\n');
4252           LU.DeleteFormula(F);
4253           --e;
4254           --i;
4255           Any = true;
4256           assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?");
4257           continue;
4258         }
4259       }
4260 
4261       if (Any)
4262         LU.RecomputeRegs(LUIdx, RegUses);
4263     }
4264 
4265     DEBUG(dbgs() << "After pre-selection:\n";
4266           print_uses(dbgs()));
4267   }
4268 }
4269 
4270 /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
4271 /// formulae to choose from, use some rough heuristics to prune down the number
4272 /// of formulae. This keeps the main solver from taking an extraordinary amount
4273 /// of time in some worst-case scenarios.
NarrowSearchSpaceUsingHeuristics()4274 void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
4275   NarrowSearchSpaceByDetectingSupersets();
4276   NarrowSearchSpaceByCollapsingUnrolledCode();
4277   NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
4278   NarrowSearchSpaceByPickingWinnerRegs();
4279 }
4280 
4281 /// SolveRecurse - This is the recursive solver.
SolveRecurse(SmallVectorImpl<const Formula * > & Solution,Cost & SolutionCost,SmallVectorImpl<const Formula * > & Workspace,const Cost & CurCost,const SmallPtrSet<const SCEV *,16> & CurRegs,DenseSet<const SCEV * > & VisitedRegs) const4282 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
4283                                Cost &SolutionCost,
4284                                SmallVectorImpl<const Formula *> &Workspace,
4285                                const Cost &CurCost,
4286                                const SmallPtrSet<const SCEV *, 16> &CurRegs,
4287                                DenseSet<const SCEV *> &VisitedRegs) const {
4288   // Some ideas:
4289   //  - prune more:
4290   //    - use more aggressive filtering
4291   //    - sort the formula so that the most profitable solutions are found first
4292   //    - sort the uses too
4293   //  - search faster:
4294   //    - don't compute a cost, and then compare. compare while computing a cost
4295   //      and bail early.
4296   //    - track register sets with SmallBitVector
4297 
4298   const LSRUse &LU = Uses[Workspace.size()];
4299 
4300   // If this use references any register that's already a part of the
4301   // in-progress solution, consider it a requirement that a formula must
4302   // reference that register in order to be considered. This prunes out
4303   // unprofitable searching.
4304   SmallSetVector<const SCEV *, 4> ReqRegs;
4305   for (const SCEV *S : CurRegs)
4306     if (LU.Regs.count(S))
4307       ReqRegs.insert(S);
4308 
4309   SmallPtrSet<const SCEV *, 16> NewRegs;
4310   Cost NewCost;
4311   for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
4312        E = LU.Formulae.end(); I != E; ++I) {
4313     const Formula &F = *I;
4314 
4315     // Ignore formulae which may not be ideal in terms of register reuse of
4316     // ReqRegs.  The formula should use all required registers before
4317     // introducing new ones.
4318     int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
4319     for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
4320          JE = ReqRegs.end(); J != JE; ++J) {
4321       const SCEV *Reg = *J;
4322       if ((F.ScaledReg && F.ScaledReg == Reg) ||
4323           std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) !=
4324           F.BaseRegs.end()) {
4325         --NumReqRegsToFind;
4326         if (NumReqRegsToFind == 0)
4327           break;
4328       }
4329     }
4330     if (NumReqRegsToFind != 0) {
4331       // If none of the formulae satisfied the required registers, then we could
4332       // clear ReqRegs and try again. Currently, we simply give up in this case.
4333       continue;
4334     }
4335 
4336     // Evaluate the cost of the current formula. If it's already worse than
4337     // the current best, prune the search at that point.
4338     NewCost = CurCost;
4339     NewRegs = CurRegs;
4340     NewCost.RateFormula(TTI, F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT,
4341                         LU);
4342     if (NewCost < SolutionCost) {
4343       Workspace.push_back(&F);
4344       if (Workspace.size() != Uses.size()) {
4345         SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
4346                      NewRegs, VisitedRegs);
4347         if (F.getNumRegs() == 1 && Workspace.size() == 1)
4348           VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
4349       } else {
4350         DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
4351               dbgs() << ".\n Regs:";
4352               for (const SCEV *S : NewRegs)
4353                 dbgs() << ' ' << *S;
4354               dbgs() << '\n');
4355 
4356         SolutionCost = NewCost;
4357         Solution = Workspace;
4358       }
4359       Workspace.pop_back();
4360     }
4361   }
4362 }
4363 
4364 /// Solve - Choose one formula from each use. Return the results in the given
4365 /// Solution vector.
Solve(SmallVectorImpl<const Formula * > & Solution) const4366 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
4367   SmallVector<const Formula *, 8> Workspace;
4368   Cost SolutionCost;
4369   SolutionCost.Lose();
4370   Cost CurCost;
4371   SmallPtrSet<const SCEV *, 16> CurRegs;
4372   DenseSet<const SCEV *> VisitedRegs;
4373   Workspace.reserve(Uses.size());
4374 
4375   // SolveRecurse does all the work.
4376   SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
4377                CurRegs, VisitedRegs);
4378   if (Solution.empty()) {
4379     DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
4380     return;
4381   }
4382 
4383   // Ok, we've now made all our decisions.
4384   DEBUG(dbgs() << "\n"
4385                   "The chosen solution requires "; SolutionCost.print(dbgs());
4386         dbgs() << ":\n";
4387         for (size_t i = 0, e = Uses.size(); i != e; ++i) {
4388           dbgs() << "  ";
4389           Uses[i].print(dbgs());
4390           dbgs() << "\n"
4391                     "    ";
4392           Solution[i]->print(dbgs());
4393           dbgs() << '\n';
4394         });
4395 
4396   assert(Solution.size() == Uses.size() && "Malformed solution!");
4397 }
4398 
4399 /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up
4400 /// the dominator tree far as we can go while still being dominated by the
4401 /// input positions. This helps canonicalize the insert position, which
4402 /// encourages sharing.
4403 BasicBlock::iterator
HoistInsertPosition(BasicBlock::iterator IP,const SmallVectorImpl<Instruction * > & Inputs) const4404 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
4405                                  const SmallVectorImpl<Instruction *> &Inputs)
4406                                                                          const {
4407   for (;;) {
4408     const Loop *IPLoop = LI.getLoopFor(IP->getParent());
4409     unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
4410 
4411     BasicBlock *IDom;
4412     for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
4413       if (!Rung) return IP;
4414       Rung = Rung->getIDom();
4415       if (!Rung) return IP;
4416       IDom = Rung->getBlock();
4417 
4418       // Don't climb into a loop though.
4419       const Loop *IDomLoop = LI.getLoopFor(IDom);
4420       unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
4421       if (IDomDepth <= IPLoopDepth &&
4422           (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
4423         break;
4424     }
4425 
4426     bool AllDominate = true;
4427     Instruction *BetterPos = nullptr;
4428     Instruction *Tentative = IDom->getTerminator();
4429     for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
4430          E = Inputs.end(); I != E; ++I) {
4431       Instruction *Inst = *I;
4432       if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
4433         AllDominate = false;
4434         break;
4435       }
4436       // Attempt to find an insert position in the middle of the block,
4437       // instead of at the end, so that it can be used for other expansions.
4438       if (IDom == Inst->getParent() &&
4439           (!BetterPos || !DT.dominates(Inst, BetterPos)))
4440         BetterPos = std::next(BasicBlock::iterator(Inst));
4441     }
4442     if (!AllDominate)
4443       break;
4444     if (BetterPos)
4445       IP = BetterPos;
4446     else
4447       IP = Tentative;
4448   }
4449 
4450   return IP;
4451 }
4452 
4453 /// AdjustInsertPositionForExpand - Determine an input position which will be
4454 /// dominated by the operands and which will dominate the result.
4455 BasicBlock::iterator
AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,const LSRFixup & LF,const LSRUse & LU,SCEVExpander & Rewriter) const4456 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
4457                                            const LSRFixup &LF,
4458                                            const LSRUse &LU,
4459                                            SCEVExpander &Rewriter) const {
4460   // Collect some instructions which must be dominated by the
4461   // expanding replacement. These must be dominated by any operands that
4462   // will be required in the expansion.
4463   SmallVector<Instruction *, 4> Inputs;
4464   if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
4465     Inputs.push_back(I);
4466   if (LU.Kind == LSRUse::ICmpZero)
4467     if (Instruction *I =
4468           dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
4469       Inputs.push_back(I);
4470   if (LF.PostIncLoops.count(L)) {
4471     if (LF.isUseFullyOutsideLoop(L))
4472       Inputs.push_back(L->getLoopLatch()->getTerminator());
4473     else
4474       Inputs.push_back(IVIncInsertPos);
4475   }
4476   // The expansion must also be dominated by the increment positions of any
4477   // loops it for which it is using post-inc mode.
4478   for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(),
4479        E = LF.PostIncLoops.end(); I != E; ++I) {
4480     const Loop *PIL = *I;
4481     if (PIL == L) continue;
4482 
4483     // Be dominated by the loop exit.
4484     SmallVector<BasicBlock *, 4> ExitingBlocks;
4485     PIL->getExitingBlocks(ExitingBlocks);
4486     if (!ExitingBlocks.empty()) {
4487       BasicBlock *BB = ExitingBlocks[0];
4488       for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
4489         BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
4490       Inputs.push_back(BB->getTerminator());
4491     }
4492   }
4493 
4494   assert(!isa<PHINode>(LowestIP) && !isa<LandingPadInst>(LowestIP)
4495          && !isa<DbgInfoIntrinsic>(LowestIP) &&
4496          "Insertion point must be a normal instruction");
4497 
4498   // Then, climb up the immediate dominator tree as far as we can go while
4499   // still being dominated by the input positions.
4500   BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs);
4501 
4502   // Don't insert instructions before PHI nodes.
4503   while (isa<PHINode>(IP)) ++IP;
4504 
4505   // Ignore landingpad instructions.
4506   while (isa<LandingPadInst>(IP)) ++IP;
4507 
4508   // Ignore debug intrinsics.
4509   while (isa<DbgInfoIntrinsic>(IP)) ++IP;
4510 
4511   // Set IP below instructions recently inserted by SCEVExpander. This keeps the
4512   // IP consistent across expansions and allows the previously inserted
4513   // instructions to be reused by subsequent expansion.
4514   while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP;
4515 
4516   return IP;
4517 }
4518 
4519 /// Expand - Emit instructions for the leading candidate expression for this
4520 /// LSRUse (this is called "expanding").
Expand(const LSRFixup & LF,const Formula & F,BasicBlock::iterator IP,SCEVExpander & Rewriter,SmallVectorImpl<WeakVH> & DeadInsts) const4521 Value *LSRInstance::Expand(const LSRFixup &LF,
4522                            const Formula &F,
4523                            BasicBlock::iterator IP,
4524                            SCEVExpander &Rewriter,
4525                            SmallVectorImpl<WeakVH> &DeadInsts) const {
4526   const LSRUse &LU = Uses[LF.LUIdx];
4527   if (LU.RigidFormula)
4528     return LF.OperandValToReplace;
4529 
4530   // Determine an input position which will be dominated by the operands and
4531   // which will dominate the result.
4532   IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter);
4533 
4534   // Inform the Rewriter if we have a post-increment use, so that it can
4535   // perform an advantageous expansion.
4536   Rewriter.setPostInc(LF.PostIncLoops);
4537 
4538   // This is the type that the user actually needs.
4539   Type *OpTy = LF.OperandValToReplace->getType();
4540   // This will be the type that we'll initially expand to.
4541   Type *Ty = F.getType();
4542   if (!Ty)
4543     // No type known; just expand directly to the ultimate type.
4544     Ty = OpTy;
4545   else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
4546     // Expand directly to the ultimate type if it's the right size.
4547     Ty = OpTy;
4548   // This is the type to do integer arithmetic in.
4549   Type *IntTy = SE.getEffectiveSCEVType(Ty);
4550 
4551   // Build up a list of operands to add together to form the full base.
4552   SmallVector<const SCEV *, 8> Ops;
4553 
4554   // Expand the BaseRegs portion.
4555   for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
4556        E = F.BaseRegs.end(); I != E; ++I) {
4557     const SCEV *Reg = *I;
4558     assert(!Reg->isZero() && "Zero allocated in a base register!");
4559 
4560     // If we're expanding for a post-inc user, make the post-inc adjustment.
4561     PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
4562     Reg = TransformForPostIncUse(Denormalize, Reg,
4563                                  LF.UserInst, LF.OperandValToReplace,
4564                                  Loops, SE, DT);
4565 
4566     Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, IP)));
4567   }
4568 
4569   // Expand the ScaledReg portion.
4570   Value *ICmpScaledV = nullptr;
4571   if (F.Scale != 0) {
4572     const SCEV *ScaledS = F.ScaledReg;
4573 
4574     // If we're expanding for a post-inc user, make the post-inc adjustment.
4575     PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
4576     ScaledS = TransformForPostIncUse(Denormalize, ScaledS,
4577                                      LF.UserInst, LF.OperandValToReplace,
4578                                      Loops, SE, DT);
4579 
4580     if (LU.Kind == LSRUse::ICmpZero) {
4581       // Expand ScaleReg as if it was part of the base regs.
4582       if (F.Scale == 1)
4583         Ops.push_back(
4584             SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP)));
4585       else {
4586         // An interesting way of "folding" with an icmp is to use a negated
4587         // scale, which we'll implement by inserting it into the other operand
4588         // of the icmp.
4589         assert(F.Scale == -1 &&
4590                "The only scale supported by ICmpZero uses is -1!");
4591         ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, IP);
4592       }
4593     } else {
4594       // Otherwise just expand the scaled register and an explicit scale,
4595       // which is expected to be matched as part of the address.
4596 
4597       // Flush the operand list to suppress SCEVExpander hoisting address modes.
4598       // Unless the addressing mode will not be folded.
4599       if (!Ops.empty() && LU.Kind == LSRUse::Address &&
4600           isAMCompletelyFolded(TTI, LU, F)) {
4601         Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
4602         Ops.clear();
4603         Ops.push_back(SE.getUnknown(FullV));
4604       }
4605       ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP));
4606       if (F.Scale != 1)
4607         ScaledS =
4608             SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale));
4609       Ops.push_back(ScaledS);
4610     }
4611   }
4612 
4613   // Expand the GV portion.
4614   if (F.BaseGV) {
4615     // Flush the operand list to suppress SCEVExpander hoisting.
4616     if (!Ops.empty()) {
4617       Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
4618       Ops.clear();
4619       Ops.push_back(SE.getUnknown(FullV));
4620     }
4621     Ops.push_back(SE.getUnknown(F.BaseGV));
4622   }
4623 
4624   // Flush the operand list to suppress SCEVExpander hoisting of both folded and
4625   // unfolded offsets. LSR assumes they both live next to their uses.
4626   if (!Ops.empty()) {
4627     Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
4628     Ops.clear();
4629     Ops.push_back(SE.getUnknown(FullV));
4630   }
4631 
4632   // Expand the immediate portion.
4633   int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset;
4634   if (Offset != 0) {
4635     if (LU.Kind == LSRUse::ICmpZero) {
4636       // The other interesting way of "folding" with an ICmpZero is to use a
4637       // negated immediate.
4638       if (!ICmpScaledV)
4639         ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset);
4640       else {
4641         Ops.push_back(SE.getUnknown(ICmpScaledV));
4642         ICmpScaledV = ConstantInt::get(IntTy, Offset);
4643       }
4644     } else {
4645       // Just add the immediate values. These again are expected to be matched
4646       // as part of the address.
4647       Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset)));
4648     }
4649   }
4650 
4651   // Expand the unfolded offset portion.
4652   int64_t UnfoldedOffset = F.UnfoldedOffset;
4653   if (UnfoldedOffset != 0) {
4654     // Just add the immediate values.
4655     Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy,
4656                                                        UnfoldedOffset)));
4657   }
4658 
4659   // Emit instructions summing all the operands.
4660   const SCEV *FullS = Ops.empty() ?
4661                       SE.getConstant(IntTy, 0) :
4662                       SE.getAddExpr(Ops);
4663   Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP);
4664 
4665   // We're done expanding now, so reset the rewriter.
4666   Rewriter.clearPostInc();
4667 
4668   // An ICmpZero Formula represents an ICmp which we're handling as a
4669   // comparison against zero. Now that we've expanded an expression for that
4670   // form, update the ICmp's other operand.
4671   if (LU.Kind == LSRUse::ICmpZero) {
4672     ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
4673     DeadInsts.push_back(CI->getOperand(1));
4674     assert(!F.BaseGV && "ICmp does not support folding a global value and "
4675                            "a scale at the same time!");
4676     if (F.Scale == -1) {
4677       if (ICmpScaledV->getType() != OpTy) {
4678         Instruction *Cast =
4679           CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
4680                                                    OpTy, false),
4681                            ICmpScaledV, OpTy, "tmp", CI);
4682         ICmpScaledV = Cast;
4683       }
4684       CI->setOperand(1, ICmpScaledV);
4685     } else {
4686       // A scale of 1 means that the scale has been expanded as part of the
4687       // base regs.
4688       assert((F.Scale == 0 || F.Scale == 1) &&
4689              "ICmp does not support folding a global value and "
4690              "a scale at the same time!");
4691       Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
4692                                            -(uint64_t)Offset);
4693       if (C->getType() != OpTy)
4694         C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4695                                                           OpTy, false),
4696                                   C, OpTy);
4697 
4698       CI->setOperand(1, C);
4699     }
4700   }
4701 
4702   return FullV;
4703 }
4704 
4705 /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use
4706 /// of their operands effectively happens in their predecessor blocks, so the
4707 /// expression may need to be expanded in multiple places.
RewriteForPHI(PHINode * PN,const LSRFixup & LF,const Formula & F,SCEVExpander & Rewriter,SmallVectorImpl<WeakVH> & DeadInsts,Pass * P) const4708 void LSRInstance::RewriteForPHI(PHINode *PN,
4709                                 const LSRFixup &LF,
4710                                 const Formula &F,
4711                                 SCEVExpander &Rewriter,
4712                                 SmallVectorImpl<WeakVH> &DeadInsts,
4713                                 Pass *P) const {
4714   DenseMap<BasicBlock *, Value *> Inserted;
4715   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
4716     if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
4717       BasicBlock *BB = PN->getIncomingBlock(i);
4718 
4719       // If this is a critical edge, split the edge so that we do not insert
4720       // the code on all predecessor/successor paths.  We do this unless this
4721       // is the canonical backedge for this loop, which complicates post-inc
4722       // users.
4723       if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
4724           !isa<IndirectBrInst>(BB->getTerminator())) {
4725         BasicBlock *Parent = PN->getParent();
4726         Loop *PNLoop = LI.getLoopFor(Parent);
4727         if (!PNLoop || Parent != PNLoop->getHeader()) {
4728           // Split the critical edge.
4729           BasicBlock *NewBB = nullptr;
4730           if (!Parent->isLandingPad()) {
4731             NewBB = SplitCriticalEdge(BB, Parent, P,
4732                                       /*MergeIdenticalEdges=*/true,
4733                                       /*DontDeleteUselessPhis=*/true);
4734           } else {
4735             SmallVector<BasicBlock*, 2> NewBBs;
4736             SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs);
4737             NewBB = NewBBs[0];
4738           }
4739           // If NewBB==NULL, then SplitCriticalEdge refused to split because all
4740           // phi predecessors are identical. The simple thing to do is skip
4741           // splitting in this case rather than complicate the API.
4742           if (NewBB) {
4743             // If PN is outside of the loop and BB is in the loop, we want to
4744             // move the block to be immediately before the PHI block, not
4745             // immediately after BB.
4746             if (L->contains(BB) && !L->contains(PN))
4747               NewBB->moveBefore(PN->getParent());
4748 
4749             // Splitting the edge can reduce the number of PHI entries we have.
4750             e = PN->getNumIncomingValues();
4751             BB = NewBB;
4752             i = PN->getBasicBlockIndex(BB);
4753           }
4754         }
4755       }
4756 
4757       std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
4758         Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr)));
4759       if (!Pair.second)
4760         PN->setIncomingValue(i, Pair.first->second);
4761       else {
4762         Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts);
4763 
4764         // If this is reuse-by-noop-cast, insert the noop cast.
4765         Type *OpTy = LF.OperandValToReplace->getType();
4766         if (FullV->getType() != OpTy)
4767           FullV =
4768             CastInst::Create(CastInst::getCastOpcode(FullV, false,
4769                                                      OpTy, false),
4770                              FullV, LF.OperandValToReplace->getType(),
4771                              "tmp", BB->getTerminator());
4772 
4773         PN->setIncomingValue(i, FullV);
4774         Pair.first->second = FullV;
4775       }
4776     }
4777 }
4778 
4779 /// Rewrite - Emit instructions for the leading candidate expression for this
4780 /// LSRUse (this is called "expanding"), and update the UserInst to reference
4781 /// the newly expanded value.
Rewrite(const LSRFixup & LF,const Formula & F,SCEVExpander & Rewriter,SmallVectorImpl<WeakVH> & DeadInsts,Pass * P) const4782 void LSRInstance::Rewrite(const LSRFixup &LF,
4783                           const Formula &F,
4784                           SCEVExpander &Rewriter,
4785                           SmallVectorImpl<WeakVH> &DeadInsts,
4786                           Pass *P) const {
4787   // First, find an insertion point that dominates UserInst. For PHI nodes,
4788   // find the nearest block which dominates all the relevant uses.
4789   if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
4790     RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P);
4791   } else {
4792     Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts);
4793 
4794     // If this is reuse-by-noop-cast, insert the noop cast.
4795     Type *OpTy = LF.OperandValToReplace->getType();
4796     if (FullV->getType() != OpTy) {
4797       Instruction *Cast =
4798         CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
4799                          FullV, OpTy, "tmp", LF.UserInst);
4800       FullV = Cast;
4801     }
4802 
4803     // Update the user. ICmpZero is handled specially here (for now) because
4804     // Expand may have updated one of the operands of the icmp already, and
4805     // its new value may happen to be equal to LF.OperandValToReplace, in
4806     // which case doing replaceUsesOfWith leads to replacing both operands
4807     // with the same value. TODO: Reorganize this.
4808     if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero)
4809       LF.UserInst->setOperand(0, FullV);
4810     else
4811       LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
4812   }
4813 
4814   DeadInsts.push_back(LF.OperandValToReplace);
4815 }
4816 
4817 /// ImplementSolution - Rewrite all the fixup locations with new values,
4818 /// following the chosen solution.
4819 void
ImplementSolution(const SmallVectorImpl<const Formula * > & Solution,Pass * P)4820 LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
4821                                Pass *P) {
4822   // Keep track of instructions we may have made dead, so that
4823   // we can remove them after we are done working.
4824   SmallVector<WeakVH, 16> DeadInsts;
4825 
4826   SCEVExpander Rewriter(SE, "lsr");
4827 #ifndef NDEBUG
4828   Rewriter.setDebugType(DEBUG_TYPE);
4829 #endif
4830   Rewriter.disableCanonicalMode();
4831   Rewriter.enableLSRMode();
4832   Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
4833 
4834   // Mark phi nodes that terminate chains so the expander tries to reuse them.
4835   for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
4836          ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
4837     if (PHINode *PN = dyn_cast<PHINode>(ChainI->tailUserInst()))
4838       Rewriter.setChainedPhi(PN);
4839   }
4840 
4841   // Expand the new value definitions and update the users.
4842   for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
4843        E = Fixups.end(); I != E; ++I) {
4844     const LSRFixup &Fixup = *I;
4845 
4846     Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P);
4847 
4848     Changed = true;
4849   }
4850 
4851   for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(),
4852          ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) {
4853     GenerateIVChain(*ChainI, Rewriter, DeadInsts);
4854     Changed = true;
4855   }
4856   // Clean up after ourselves. This must be done before deleting any
4857   // instructions.
4858   Rewriter.clear();
4859 
4860   Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
4861 }
4862 
LSRInstance(Loop * L,Pass * P)4863 LSRInstance::LSRInstance(Loop *L, Pass *P)
4864     : IU(P->getAnalysis<IVUsers>()), SE(P->getAnalysis<ScalarEvolution>()),
4865       DT(P->getAnalysis<DominatorTreeWrapperPass>().getDomTree()),
4866       LI(P->getAnalysis<LoopInfo>()),
4867       TTI(P->getAnalysis<TargetTransformInfo>()), L(L), Changed(false),
4868       IVIncInsertPos(nullptr) {
4869   // If LoopSimplify form is not available, stay out of trouble.
4870   if (!L->isLoopSimplifyForm())
4871     return;
4872 
4873   // If there's no interesting work to be done, bail early.
4874   if (IU.empty()) return;
4875 
4876   // If there's too much analysis to be done, bail early. We won't be able to
4877   // model the problem anyway.
4878   unsigned NumUsers = 0;
4879   for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
4880     if (++NumUsers > MaxIVUsers) {
4881       DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L
4882             << "\n");
4883       return;
4884     }
4885   }
4886 
4887 #ifndef NDEBUG
4888   // All dominating loops must have preheaders, or SCEVExpander may not be able
4889   // to materialize an AddRecExpr whose Start is an outer AddRecExpr.
4890   //
4891   // IVUsers analysis should only create users that are dominated by simple loop
4892   // headers. Since this loop should dominate all of its users, its user list
4893   // should be empty if this loop itself is not within a simple loop nest.
4894   for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader());
4895        Rung; Rung = Rung->getIDom()) {
4896     BasicBlock *BB = Rung->getBlock();
4897     const Loop *DomLoop = LI.getLoopFor(BB);
4898     if (DomLoop && DomLoop->getHeader() == BB) {
4899       assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest");
4900     }
4901   }
4902 #endif // DEBUG
4903 
4904   DEBUG(dbgs() << "\nLSR on loop ";
4905         L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
4906         dbgs() << ":\n");
4907 
4908   // First, perform some low-level loop optimizations.
4909   OptimizeShadowIV();
4910   OptimizeLoopTermCond();
4911 
4912   // If loop preparation eliminates all interesting IV users, bail.
4913   if (IU.empty()) return;
4914 
4915   // Skip nested loops until we can model them better with formulae.
4916   if (!L->empty()) {
4917     DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
4918     return;
4919   }
4920 
4921   // Start collecting data and preparing for the solver.
4922   CollectChains();
4923   CollectInterestingTypesAndFactors();
4924   CollectFixupsAndInitialFormulae();
4925   CollectLoopInvariantFixupsAndFormulae();
4926 
4927   assert(!Uses.empty() && "IVUsers reported at least one use");
4928   DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
4929         print_uses(dbgs()));
4930 
4931   // Now use the reuse data to generate a bunch of interesting ways
4932   // to formulate the values needed for the uses.
4933   GenerateAllReuseFormulae();
4934 
4935   FilterOutUndesirableDedicatedRegisters();
4936   NarrowSearchSpaceUsingHeuristics();
4937 
4938   SmallVector<const Formula *, 8> Solution;
4939   Solve(Solution);
4940 
4941   // Release memory that is no longer needed.
4942   Factors.clear();
4943   Types.clear();
4944   RegUses.clear();
4945 
4946   if (Solution.empty())
4947     return;
4948 
4949 #ifndef NDEBUG
4950   // Formulae should be legal.
4951   for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), E = Uses.end();
4952        I != E; ++I) {
4953     const LSRUse &LU = *I;
4954     for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
4955                                                   JE = LU.Formulae.end();
4956          J != JE; ++J)
4957       assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4958                         *J) && "Illegal formula generated!");
4959   };
4960 #endif
4961 
4962   // Now that we've decided what we want, make it so.
4963   ImplementSolution(Solution, P);
4964 }
4965 
print_factors_and_types(raw_ostream & OS) const4966 void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
4967   if (Factors.empty() && Types.empty()) return;
4968 
4969   OS << "LSR has identified the following interesting factors and types: ";
4970   bool First = true;
4971 
4972   for (SmallSetVector<int64_t, 8>::const_iterator
4973        I = Factors.begin(), E = Factors.end(); I != E; ++I) {
4974     if (!First) OS << ", ";
4975     First = false;
4976     OS << '*' << *I;
4977   }
4978 
4979   for (SmallSetVector<Type *, 4>::const_iterator
4980        I = Types.begin(), E = Types.end(); I != E; ++I) {
4981     if (!First) OS << ", ";
4982     First = false;
4983     OS << '(' << **I << ')';
4984   }
4985   OS << '\n';
4986 }
4987 
print_fixups(raw_ostream & OS) const4988 void LSRInstance::print_fixups(raw_ostream &OS) const {
4989   OS << "LSR is examining the following fixup sites:\n";
4990   for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
4991        E = Fixups.end(); I != E; ++I) {
4992     dbgs() << "  ";
4993     I->print(OS);
4994     OS << '\n';
4995   }
4996 }
4997 
print_uses(raw_ostream & OS) const4998 void LSRInstance::print_uses(raw_ostream &OS) const {
4999   OS << "LSR is examining the following uses:\n";
5000   for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
5001        E = Uses.end(); I != E; ++I) {
5002     const LSRUse &LU = *I;
5003     dbgs() << "  ";
5004     LU.print(OS);
5005     OS << '\n';
5006     for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
5007          JE = LU.Formulae.end(); J != JE; ++J) {
5008       OS << "    ";
5009       J->print(OS);
5010       OS << '\n';
5011     }
5012   }
5013 }
5014 
print(raw_ostream & OS) const5015 void LSRInstance::print(raw_ostream &OS) const {
5016   print_factors_and_types(OS);
5017   print_fixups(OS);
5018   print_uses(OS);
5019 }
5020 
5021 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const5022 void LSRInstance::dump() const {
5023   print(errs()); errs() << '\n';
5024 }
5025 #endif
5026 
5027 namespace {
5028 
5029 class LoopStrengthReduce : public LoopPass {
5030 public:
5031   static char ID; // Pass ID, replacement for typeid
5032   LoopStrengthReduce();
5033 
5034 private:
5035   bool runOnLoop(Loop *L, LPPassManager &LPM) override;
5036   void getAnalysisUsage(AnalysisUsage &AU) const override;
5037 };
5038 
5039 }
5040 
5041 char LoopStrengthReduce::ID = 0;
5042 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
5043                 "Loop Strength Reduction", false, false)
INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)5044 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
5045 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
5046 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
5047 INITIALIZE_PASS_DEPENDENCY(IVUsers)
5048 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
5049 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
5050 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
5051                 "Loop Strength Reduction", false, false)
5052 
5053 
5054 Pass *llvm::createLoopStrengthReducePass() {
5055   return new LoopStrengthReduce();
5056 }
5057 
LoopStrengthReduce()5058 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) {
5059   initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
5060 }
5061 
getAnalysisUsage(AnalysisUsage & AU) const5062 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
5063   // We split critical edges, so we change the CFG.  However, we do update
5064   // many analyses if they are around.
5065   AU.addPreservedID(LoopSimplifyID);
5066 
5067   AU.addRequired<LoopInfo>();
5068   AU.addPreserved<LoopInfo>();
5069   AU.addRequiredID(LoopSimplifyID);
5070   AU.addRequired<DominatorTreeWrapperPass>();
5071   AU.addPreserved<DominatorTreeWrapperPass>();
5072   AU.addRequired<ScalarEvolution>();
5073   AU.addPreserved<ScalarEvolution>();
5074   // Requiring LoopSimplify a second time here prevents IVUsers from running
5075   // twice, since LoopSimplify was invalidated by running ScalarEvolution.
5076   AU.addRequiredID(LoopSimplifyID);
5077   AU.addRequired<IVUsers>();
5078   AU.addPreserved<IVUsers>();
5079   AU.addRequired<TargetTransformInfo>();
5080 }
5081 
runOnLoop(Loop * L,LPPassManager &)5082 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
5083   if (skipOptnoneFunction(L))
5084     return false;
5085 
5086   bool Changed = false;
5087 
5088   // Run the main LSR transformation.
5089   Changed |= LSRInstance(L, this).getChanged();
5090 
5091   // Remove any extra phis created by processing inner loops.
5092   Changed |= DeleteDeadPHIs(L->getHeader());
5093   if (EnablePhiElim && L->isLoopSimplifyForm()) {
5094     SmallVector<WeakVH, 16> DeadInsts;
5095     SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), "lsr");
5096 #ifndef NDEBUG
5097     Rewriter.setDebugType(DEBUG_TYPE);
5098 #endif
5099     unsigned numFolded = Rewriter.replaceCongruentIVs(
5100         L, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), DeadInsts,
5101         &getAnalysis<TargetTransformInfo>());
5102     if (numFolded) {
5103       Changed = true;
5104       DeleteTriviallyDeadInstructions(DeadInsts);
5105       DeleteDeadPHIs(L->getHeader());
5106     }
5107   }
5108   return Changed;
5109 }
5110