1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This transformation analyzes and transforms the induction variables (and
10 // computations derived from them) into forms suitable for efficient execution
11 // on the target.
12 //
13 // This pass performs a strength reduction on array references inside loops that
14 // have as one or more of their components the loop induction variable, it
15 // rewrites expressions to take advantage of scaled-index addressing modes
16 // available on the target, and it performs a variety of other optimizations
17 // related to loop induction variables.
18 //
19 // Terminology note: this code has a lot of handling for "post-increment" or
20 // "post-inc" users. This is not talking about post-increment addressing modes;
21 // it is instead talking about code like this:
22 //
23 // %i = phi [ 0, %entry ], [ %i.next, %latch ]
24 // ...
25 // %i.next = add %i, 1
26 // %c = icmp eq %i.next, %n
27 //
28 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
29 // it's useful to think about these as the same register, with some uses using
30 // the value of the register before the add and some using it after. In this
31 // example, the icmp is a post-increment user, since it uses %i.next, which is
32 // the value of the induction variable after the increment. The other common
33 // case of post-increment users is users outside the loop.
34 //
35 // TODO: More sophistication in the way Formulae are generated and filtered.
36 //
37 // TODO: Handle multiple loops at a time.
38 //
39 // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead
40 // of a GlobalValue?
41 //
42 // TODO: When truncation is free, truncate ICmp users' operands to make it a
43 // smaller encoding (on x86 at least).
44 //
45 // TODO: When a negated register is used by an add (such as in a list of
46 // multiple base registers, or as the increment expression in an addrec),
47 // we may not actually need both reg and (-1 * reg) in registers; the
48 // negation can be implemented by using a sub instead of an add. The
49 // lack of support for taking this into consideration when making
50 // register pressure decisions is partly worked around by the "Special"
51 // use kind.
52 //
53 //===----------------------------------------------------------------------===//
54
55 #include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
56 #include "llvm/ADT/APInt.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/DenseSet.h"
59 #include "llvm/ADT/Hashing.h"
60 #include "llvm/ADT/PointerIntPair.h"
61 #include "llvm/ADT/STLExtras.h"
62 #include "llvm/ADT/SetOperations.h"
63 #include "llvm/ADT/SetVector.h"
64 #include "llvm/ADT/SmallBitVector.h"
65 #include "llvm/ADT/SmallPtrSet.h"
66 #include "llvm/ADT/SmallSet.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/iterator_range.h"
69 #include "llvm/Analysis/AssumptionCache.h"
70 #include "llvm/Analysis/IVUsers.h"
71 #include "llvm/Analysis/LoopAnalysisManager.h"
72 #include "llvm/Analysis/LoopInfo.h"
73 #include "llvm/Analysis/LoopPass.h"
74 #include "llvm/Analysis/MemorySSA.h"
75 #include "llvm/Analysis/MemorySSAUpdater.h"
76 #include "llvm/Analysis/ScalarEvolution.h"
77 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
78 #include "llvm/Analysis/ScalarEvolutionNormalization.h"
79 #include "llvm/Analysis/TargetTransformInfo.h"
80 #include "llvm/Config/llvm-config.h"
81 #include "llvm/IR/BasicBlock.h"
82 #include "llvm/IR/Constant.h"
83 #include "llvm/IR/Constants.h"
84 #include "llvm/IR/DebugInfoMetadata.h"
85 #include "llvm/IR/DerivedTypes.h"
86 #include "llvm/IR/Dominators.h"
87 #include "llvm/IR/GlobalValue.h"
88 #include "llvm/IR/IRBuilder.h"
89 #include "llvm/IR/InstrTypes.h"
90 #include "llvm/IR/Instruction.h"
91 #include "llvm/IR/Instructions.h"
92 #include "llvm/IR/IntrinsicInst.h"
93 #include "llvm/IR/Intrinsics.h"
94 #include "llvm/IR/Module.h"
95 #include "llvm/IR/OperandTraits.h"
96 #include "llvm/IR/Operator.h"
97 #include "llvm/IR/PassManager.h"
98 #include "llvm/IR/Type.h"
99 #include "llvm/IR/Use.h"
100 #include "llvm/IR/User.h"
101 #include "llvm/IR/Value.h"
102 #include "llvm/IR/ValueHandle.h"
103 #include "llvm/InitializePasses.h"
104 #include "llvm/Pass.h"
105 #include "llvm/Support/Casting.h"
106 #include "llvm/Support/CommandLine.h"
107 #include "llvm/Support/Compiler.h"
108 #include "llvm/Support/Debug.h"
109 #include "llvm/Support/ErrorHandling.h"
110 #include "llvm/Support/MathExtras.h"
111 #include "llvm/Support/raw_ostream.h"
112 #include "llvm/Transforms/Scalar.h"
113 #include "llvm/Transforms/Utils.h"
114 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
115 #include "llvm/Transforms/Utils/Local.h"
116 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
117 #include <algorithm>
118 #include <cassert>
119 #include <cstddef>
120 #include <cstdint>
121 #include <cstdlib>
122 #include <iterator>
123 #include <limits>
124 #include <map>
125 #include <numeric>
126 #include <utility>
127
128 using namespace llvm;
129
130 #define DEBUG_TYPE "loop-reduce"
131
132 /// MaxIVUsers is an arbitrary threshold that provides an early opportunity for
133 /// bail out. This threshold is far beyond the number of users that LSR can
134 /// conceivably solve, so it should not affect generated code, but catches the
135 /// worst cases before LSR burns too much compile time and stack space.
136 static const unsigned MaxIVUsers = 200;
137
138 // Temporary flag to cleanup congruent phis after LSR phi expansion.
139 // It's currently disabled until we can determine whether it's truly useful or
140 // not. The flag should be removed after the v3.0 release.
141 // This is now needed for ivchains.
142 static cl::opt<bool> EnablePhiElim(
143 "enable-lsr-phielim", cl::Hidden, cl::init(true),
144 cl::desc("Enable LSR phi elimination"));
145
146 // The flag adds instruction count to solutions cost comparision.
147 static cl::opt<bool> InsnsCost(
148 "lsr-insns-cost", cl::Hidden, cl::init(true),
149 cl::desc("Add instruction count to a LSR cost model"));
150
151 // Flag to choose how to narrow complex lsr solution
152 static cl::opt<bool> LSRExpNarrow(
153 "lsr-exp-narrow", cl::Hidden, cl::init(false),
154 cl::desc("Narrow LSR complex solution using"
155 " expectation of registers number"));
156
157 // Flag to narrow search space by filtering non-optimal formulae with
158 // the same ScaledReg and Scale.
159 static cl::opt<bool> FilterSameScaledReg(
160 "lsr-filter-same-scaled-reg", cl::Hidden, cl::init(true),
161 cl::desc("Narrow LSR search space by filtering non-optimal formulae"
162 " with the same ScaledReg and Scale"));
163
164 static cl::opt<bool> EnableBackedgeIndexing(
165 "lsr-backedge-indexing", cl::Hidden, cl::init(true),
166 cl::desc("Enable the generation of cross iteration indexed memops"));
167
168 static cl::opt<unsigned> ComplexityLimit(
169 "lsr-complexity-limit", cl::Hidden,
170 cl::init(std::numeric_limits<uint16_t>::max()),
171 cl::desc("LSR search space complexity limit"));
172
173 static cl::opt<unsigned> SetupCostDepthLimit(
174 "lsr-setupcost-depth-limit", cl::Hidden, cl::init(7),
175 cl::desc("The limit on recursion depth for LSRs setup cost"));
176
177 #ifndef NDEBUG
178 // Stress test IV chain generation.
179 static cl::opt<bool> StressIVChain(
180 "stress-ivchain", cl::Hidden, cl::init(false),
181 cl::desc("Stress test LSR IV chains"));
182 #else
183 static bool StressIVChain = false;
184 #endif
185
186 namespace {
187
188 struct MemAccessTy {
189 /// Used in situations where the accessed memory type is unknown.
190 static const unsigned UnknownAddressSpace =
191 std::numeric_limits<unsigned>::max();
192
193 Type *MemTy = nullptr;
194 unsigned AddrSpace = UnknownAddressSpace;
195
196 MemAccessTy() = default;
MemAccessTy__anondbe16f4d0111::MemAccessTy197 MemAccessTy(Type *Ty, unsigned AS) : MemTy(Ty), AddrSpace(AS) {}
198
operator ==__anondbe16f4d0111::MemAccessTy199 bool operator==(MemAccessTy Other) const {
200 return MemTy == Other.MemTy && AddrSpace == Other.AddrSpace;
201 }
202
operator !=__anondbe16f4d0111::MemAccessTy203 bool operator!=(MemAccessTy Other) const { return !(*this == Other); }
204
getUnknown__anondbe16f4d0111::MemAccessTy205 static MemAccessTy getUnknown(LLVMContext &Ctx,
206 unsigned AS = UnknownAddressSpace) {
207 return MemAccessTy(Type::getVoidTy(Ctx), AS);
208 }
209
getType__anondbe16f4d0111::MemAccessTy210 Type *getType() { return MemTy; }
211 };
212
213 /// This class holds data which is used to order reuse candidates.
214 class RegSortData {
215 public:
216 /// This represents the set of LSRUse indices which reference
217 /// a particular register.
218 SmallBitVector UsedByIndices;
219
220 void print(raw_ostream &OS) const;
221 void dump() const;
222 };
223
224 } // end anonymous namespace
225
226 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const227 void RegSortData::print(raw_ostream &OS) const {
228 OS << "[NumUses=" << UsedByIndices.count() << ']';
229 }
230
dump() const231 LLVM_DUMP_METHOD void RegSortData::dump() const {
232 print(errs()); errs() << '\n';
233 }
234 #endif
235
236 namespace {
237
238 /// Map register candidates to information about how they are used.
239 class RegUseTracker {
240 using RegUsesTy = DenseMap<const SCEV *, RegSortData>;
241
242 RegUsesTy RegUsesMap;
243 SmallVector<const SCEV *, 16> RegSequence;
244
245 public:
246 void countRegister(const SCEV *Reg, size_t LUIdx);
247 void dropRegister(const SCEV *Reg, size_t LUIdx);
248 void swapAndDropUse(size_t LUIdx, size_t LastLUIdx);
249
250 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
251
252 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
253
254 void clear();
255
256 using iterator = SmallVectorImpl<const SCEV *>::iterator;
257 using const_iterator = SmallVectorImpl<const SCEV *>::const_iterator;
258
begin()259 iterator begin() { return RegSequence.begin(); }
end()260 iterator end() { return RegSequence.end(); }
begin() const261 const_iterator begin() const { return RegSequence.begin(); }
end() const262 const_iterator end() const { return RegSequence.end(); }
263 };
264
265 } // end anonymous namespace
266
267 void
countRegister(const SCEV * Reg,size_t LUIdx)268 RegUseTracker::countRegister(const SCEV *Reg, size_t LUIdx) {
269 std::pair<RegUsesTy::iterator, bool> Pair =
270 RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
271 RegSortData &RSD = Pair.first->second;
272 if (Pair.second)
273 RegSequence.push_back(Reg);
274 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
275 RSD.UsedByIndices.set(LUIdx);
276 }
277
278 void
dropRegister(const SCEV * Reg,size_t LUIdx)279 RegUseTracker::dropRegister(const SCEV *Reg, size_t LUIdx) {
280 RegUsesTy::iterator It = RegUsesMap.find(Reg);
281 assert(It != RegUsesMap.end());
282 RegSortData &RSD = It->second;
283 assert(RSD.UsedByIndices.size() > LUIdx);
284 RSD.UsedByIndices.reset(LUIdx);
285 }
286
287 void
swapAndDropUse(size_t LUIdx,size_t LastLUIdx)288 RegUseTracker::swapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
289 assert(LUIdx <= LastLUIdx);
290
291 // Update RegUses. The data structure is not optimized for this purpose;
292 // we must iterate through it and update each of the bit vectors.
293 for (auto &Pair : RegUsesMap) {
294 SmallBitVector &UsedByIndices = Pair.second.UsedByIndices;
295 if (LUIdx < UsedByIndices.size())
296 UsedByIndices[LUIdx] =
297 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : false;
298 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
299 }
300 }
301
302 bool
isRegUsedByUsesOtherThan(const SCEV * Reg,size_t LUIdx) const303 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
304 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
305 if (I == RegUsesMap.end())
306 return false;
307 const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
308 int i = UsedByIndices.find_first();
309 if (i == -1) return false;
310 if ((size_t)i != LUIdx) return true;
311 return UsedByIndices.find_next(i) != -1;
312 }
313
getUsedByIndices(const SCEV * Reg) const314 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
315 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
316 assert(I != RegUsesMap.end() && "Unknown register!");
317 return I->second.UsedByIndices;
318 }
319
clear()320 void RegUseTracker::clear() {
321 RegUsesMap.clear();
322 RegSequence.clear();
323 }
324
325 namespace {
326
327 /// This class holds information that describes a formula for computing
328 /// satisfying a use. It may include broken-out immediates and scaled registers.
329 struct Formula {
330 /// Global base address used for complex addressing.
331 GlobalValue *BaseGV = nullptr;
332
333 /// Base offset for complex addressing.
334 int64_t BaseOffset = 0;
335
336 /// Whether any complex addressing has a base register.
337 bool HasBaseReg = false;
338
339 /// The scale of any complex addressing.
340 int64_t Scale = 0;
341
342 /// The list of "base" registers for this use. When this is non-empty. The
343 /// canonical representation of a formula is
344 /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and
345 /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty().
346 /// 3. The reg containing recurrent expr related with currect loop in the
347 /// formula should be put in the ScaledReg.
348 /// #1 enforces that the scaled register is always used when at least two
349 /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2.
350 /// #2 enforces that 1 * reg is reg.
351 /// #3 ensures invariant regs with respect to current loop can be combined
352 /// together in LSR codegen.
353 /// This invariant can be temporarily broken while building a formula.
354 /// However, every formula inserted into the LSRInstance must be in canonical
355 /// form.
356 SmallVector<const SCEV *, 4> BaseRegs;
357
358 /// The 'scaled' register for this use. This should be non-null when Scale is
359 /// not zero.
360 const SCEV *ScaledReg = nullptr;
361
362 /// An additional constant offset which added near the use. This requires a
363 /// temporary register, but the offset itself can live in an add immediate
364 /// field rather than a register.
365 int64_t UnfoldedOffset = 0;
366
367 Formula() = default;
368
369 void initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
370
371 bool isCanonical(const Loop &L) const;
372
373 void canonicalize(const Loop &L);
374
375 bool unscale();
376
377 bool hasZeroEnd() const;
378
379 size_t getNumRegs() const;
380 Type *getType() const;
381
382 void deleteBaseReg(const SCEV *&S);
383
384 bool referencesReg(const SCEV *S) const;
385 bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
386 const RegUseTracker &RegUses) const;
387
388 void print(raw_ostream &OS) const;
389 void dump() const;
390 };
391
392 } // end anonymous namespace
393
394 /// Recursion helper for initialMatch.
DoInitialMatch(const SCEV * S,Loop * L,SmallVectorImpl<const SCEV * > & Good,SmallVectorImpl<const SCEV * > & Bad,ScalarEvolution & SE)395 static void DoInitialMatch(const SCEV *S, Loop *L,
396 SmallVectorImpl<const SCEV *> &Good,
397 SmallVectorImpl<const SCEV *> &Bad,
398 ScalarEvolution &SE) {
399 // Collect expressions which properly dominate the loop header.
400 if (SE.properlyDominates(S, L->getHeader())) {
401 Good.push_back(S);
402 return;
403 }
404
405 // Look at add operands.
406 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
407 for (const SCEV *S : Add->operands())
408 DoInitialMatch(S, L, Good, Bad, SE);
409 return;
410 }
411
412 // Look at addrec operands.
413 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
414 if (!AR->getStart()->isZero() && AR->isAffine()) {
415 DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
416 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
417 AR->getStepRecurrence(SE),
418 // FIXME: AR->getNoWrapFlags()
419 AR->getLoop(), SCEV::FlagAnyWrap),
420 L, Good, Bad, SE);
421 return;
422 }
423
424 // Handle a multiplication by -1 (negation) if it didn't fold.
425 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
426 if (Mul->getOperand(0)->isAllOnesValue()) {
427 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end());
428 const SCEV *NewMul = SE.getMulExpr(Ops);
429
430 SmallVector<const SCEV *, 4> MyGood;
431 SmallVector<const SCEV *, 4> MyBad;
432 DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
433 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
434 SE.getEffectiveSCEVType(NewMul->getType())));
435 for (const SCEV *S : MyGood)
436 Good.push_back(SE.getMulExpr(NegOne, S));
437 for (const SCEV *S : MyBad)
438 Bad.push_back(SE.getMulExpr(NegOne, S));
439 return;
440 }
441
442 // Ok, we can't do anything interesting. Just stuff the whole thing into a
443 // register and hope for the best.
444 Bad.push_back(S);
445 }
446
447 /// Incorporate loop-variant parts of S into this Formula, attempting to keep
448 /// all loop-invariant and loop-computable values in a single base register.
initialMatch(const SCEV * S,Loop * L,ScalarEvolution & SE)449 void Formula::initialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
450 SmallVector<const SCEV *, 4> Good;
451 SmallVector<const SCEV *, 4> Bad;
452 DoInitialMatch(S, L, Good, Bad, SE);
453 if (!Good.empty()) {
454 const SCEV *Sum = SE.getAddExpr(Good);
455 if (!Sum->isZero())
456 BaseRegs.push_back(Sum);
457 HasBaseReg = true;
458 }
459 if (!Bad.empty()) {
460 const SCEV *Sum = SE.getAddExpr(Bad);
461 if (!Sum->isZero())
462 BaseRegs.push_back(Sum);
463 HasBaseReg = true;
464 }
465 canonicalize(*L);
466 }
467
468 /// Check whether or not this formula satisfies the canonical
469 /// representation.
470 /// \see Formula::BaseRegs.
isCanonical(const Loop & L) const471 bool Formula::isCanonical(const Loop &L) const {
472 if (!ScaledReg)
473 return BaseRegs.size() <= 1;
474
475 if (Scale != 1)
476 return true;
477
478 if (Scale == 1 && BaseRegs.empty())
479 return false;
480
481 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg);
482 if (SAR && SAR->getLoop() == &L)
483 return true;
484
485 // If ScaledReg is not a recurrent expr, or it is but its loop is not current
486 // loop, meanwhile BaseRegs contains a recurrent expr reg related with current
487 // loop, we want to swap the reg in BaseRegs with ScaledReg.
488 auto I =
489 find_if(make_range(BaseRegs.begin(), BaseRegs.end()), [&](const SCEV *S) {
490 return isa<const SCEVAddRecExpr>(S) &&
491 (cast<SCEVAddRecExpr>(S)->getLoop() == &L);
492 });
493 return I == BaseRegs.end();
494 }
495
496 /// Helper method to morph a formula into its canonical representation.
497 /// \see Formula::BaseRegs.
498 /// Every formula having more than one base register, must use the ScaledReg
499 /// field. Otherwise, we would have to do special cases everywhere in LSR
500 /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ...
501 /// On the other hand, 1*reg should be canonicalized into reg.
canonicalize(const Loop & L)502 void Formula::canonicalize(const Loop &L) {
503 if (isCanonical(L))
504 return;
505 // So far we did not need this case. This is easy to implement but it is
506 // useless to maintain dead code. Beside it could hurt compile time.
507 assert(!BaseRegs.empty() && "1*reg => reg, should not be needed.");
508
509 // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg.
510 if (!ScaledReg) {
511 ScaledReg = BaseRegs.back();
512 BaseRegs.pop_back();
513 Scale = 1;
514 }
515
516 // If ScaledReg is an invariant with respect to L, find the reg from
517 // BaseRegs containing the recurrent expr related with Loop L. Swap the
518 // reg with ScaledReg.
519 const SCEVAddRecExpr *SAR = dyn_cast<const SCEVAddRecExpr>(ScaledReg);
520 if (!SAR || SAR->getLoop() != &L) {
521 auto I = find_if(make_range(BaseRegs.begin(), BaseRegs.end()),
522 [&](const SCEV *S) {
523 return isa<const SCEVAddRecExpr>(S) &&
524 (cast<SCEVAddRecExpr>(S)->getLoop() == &L);
525 });
526 if (I != BaseRegs.end())
527 std::swap(ScaledReg, *I);
528 }
529 }
530
531 /// Get rid of the scale in the formula.
532 /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2.
533 /// \return true if it was possible to get rid of the scale, false otherwise.
534 /// \note After this operation the formula may not be in the canonical form.
unscale()535 bool Formula::unscale() {
536 if (Scale != 1)
537 return false;
538 Scale = 0;
539 BaseRegs.push_back(ScaledReg);
540 ScaledReg = nullptr;
541 return true;
542 }
543
hasZeroEnd() const544 bool Formula::hasZeroEnd() const {
545 if (UnfoldedOffset || BaseOffset)
546 return false;
547 if (BaseRegs.size() != 1 || ScaledReg)
548 return false;
549 return true;
550 }
551
552 /// Return the total number of register operands used by this formula. This does
553 /// not include register uses implied by non-constant addrec strides.
getNumRegs() const554 size_t Formula::getNumRegs() const {
555 return !!ScaledReg + BaseRegs.size();
556 }
557
558 /// Return the type of this formula, if it has one, or null otherwise. This type
559 /// is meaningless except for the bit size.
getType() const560 Type *Formula::getType() const {
561 return !BaseRegs.empty() ? BaseRegs.front()->getType() :
562 ScaledReg ? ScaledReg->getType() :
563 BaseGV ? BaseGV->getType() :
564 nullptr;
565 }
566
567 /// Delete the given base reg from the BaseRegs list.
deleteBaseReg(const SCEV * & S)568 void Formula::deleteBaseReg(const SCEV *&S) {
569 if (&S != &BaseRegs.back())
570 std::swap(S, BaseRegs.back());
571 BaseRegs.pop_back();
572 }
573
574 /// Test if this formula references the given register.
referencesReg(const SCEV * S) const575 bool Formula::referencesReg(const SCEV *S) const {
576 return S == ScaledReg || is_contained(BaseRegs, S);
577 }
578
579 /// Test whether this formula uses registers which are used by uses other than
580 /// the use with the given index.
hasRegsUsedByUsesOtherThan(size_t LUIdx,const RegUseTracker & RegUses) const581 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
582 const RegUseTracker &RegUses) const {
583 if (ScaledReg)
584 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
585 return true;
586 for (const SCEV *BaseReg : BaseRegs)
587 if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx))
588 return true;
589 return false;
590 }
591
592 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const593 void Formula::print(raw_ostream &OS) const {
594 bool First = true;
595 if (BaseGV) {
596 if (!First) OS << " + "; else First = false;
597 BaseGV->printAsOperand(OS, /*PrintType=*/false);
598 }
599 if (BaseOffset != 0) {
600 if (!First) OS << " + "; else First = false;
601 OS << BaseOffset;
602 }
603 for (const SCEV *BaseReg : BaseRegs) {
604 if (!First) OS << " + "; else First = false;
605 OS << "reg(" << *BaseReg << ')';
606 }
607 if (HasBaseReg && BaseRegs.empty()) {
608 if (!First) OS << " + "; else First = false;
609 OS << "**error: HasBaseReg**";
610 } else if (!HasBaseReg && !BaseRegs.empty()) {
611 if (!First) OS << " + "; else First = false;
612 OS << "**error: !HasBaseReg**";
613 }
614 if (Scale != 0) {
615 if (!First) OS << " + "; else First = false;
616 OS << Scale << "*reg(";
617 if (ScaledReg)
618 OS << *ScaledReg;
619 else
620 OS << "<unknown>";
621 OS << ')';
622 }
623 if (UnfoldedOffset != 0) {
624 if (!First) OS << " + ";
625 OS << "imm(" << UnfoldedOffset << ')';
626 }
627 }
628
dump() const629 LLVM_DUMP_METHOD void Formula::dump() const {
630 print(errs()); errs() << '\n';
631 }
632 #endif
633
634 /// Return true if the given addrec can be sign-extended without changing its
635 /// value.
isAddRecSExtable(const SCEVAddRecExpr * AR,ScalarEvolution & SE)636 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
637 Type *WideTy =
638 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
639 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
640 }
641
642 /// Return true if the given add can be sign-extended without changing its
643 /// value.
isAddSExtable(const SCEVAddExpr * A,ScalarEvolution & SE)644 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
645 Type *WideTy =
646 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
647 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
648 }
649
650 /// Return true if the given mul can be sign-extended without changing its
651 /// value.
isMulSExtable(const SCEVMulExpr * M,ScalarEvolution & SE)652 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
653 Type *WideTy =
654 IntegerType::get(SE.getContext(),
655 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
656 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
657 }
658
659 /// Return an expression for LHS /s RHS, if it can be determined and if the
660 /// remainder is known to be zero, or null otherwise. If IgnoreSignificantBits
661 /// is true, expressions like (X * Y) /s Y are simplified to Y, ignoring that
662 /// the multiplication may overflow, which is useful when the result will be
663 /// used in a context where the most significant bits are ignored.
getExactSDiv(const SCEV * LHS,const SCEV * RHS,ScalarEvolution & SE,bool IgnoreSignificantBits=false)664 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
665 ScalarEvolution &SE,
666 bool IgnoreSignificantBits = false) {
667 // Handle the trivial case, which works for any SCEV type.
668 if (LHS == RHS)
669 return SE.getConstant(LHS->getType(), 1);
670
671 // Handle a few RHS special cases.
672 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
673 if (RC) {
674 const APInt &RA = RC->getAPInt();
675 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
676 // some folding.
677 if (RA.isAllOnesValue())
678 return SE.getMulExpr(LHS, RC);
679 // Handle x /s 1 as x.
680 if (RA == 1)
681 return LHS;
682 }
683
684 // Check for a division of a constant by a constant.
685 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
686 if (!RC)
687 return nullptr;
688 const APInt &LA = C->getAPInt();
689 const APInt &RA = RC->getAPInt();
690 if (LA.srem(RA) != 0)
691 return nullptr;
692 return SE.getConstant(LA.sdiv(RA));
693 }
694
695 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
696 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
697 if ((IgnoreSignificantBits || isAddRecSExtable(AR, SE)) && AR->isAffine()) {
698 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
699 IgnoreSignificantBits);
700 if (!Step) return nullptr;
701 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
702 IgnoreSignificantBits);
703 if (!Start) return nullptr;
704 // FlagNW is independent of the start value, step direction, and is
705 // preserved with smaller magnitude steps.
706 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
707 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
708 }
709 return nullptr;
710 }
711
712 // Distribute the sdiv over add operands, if the add doesn't overflow.
713 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
714 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
715 SmallVector<const SCEV *, 8> Ops;
716 for (const SCEV *S : Add->operands()) {
717 const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits);
718 if (!Op) return nullptr;
719 Ops.push_back(Op);
720 }
721 return SE.getAddExpr(Ops);
722 }
723 return nullptr;
724 }
725
726 // Check for a multiply operand that we can pull RHS out of.
727 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
728 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
729 SmallVector<const SCEV *, 4> Ops;
730 bool Found = false;
731 for (const SCEV *S : Mul->operands()) {
732 if (!Found)
733 if (const SCEV *Q = getExactSDiv(S, RHS, SE,
734 IgnoreSignificantBits)) {
735 S = Q;
736 Found = true;
737 }
738 Ops.push_back(S);
739 }
740 return Found ? SE.getMulExpr(Ops) : nullptr;
741 }
742 return nullptr;
743 }
744
745 // Otherwise we don't know.
746 return nullptr;
747 }
748
749 /// If S involves the addition of a constant integer value, return that integer
750 /// value, and mutate S to point to a new SCEV with that value excluded.
ExtractImmediate(const SCEV * & S,ScalarEvolution & SE)751 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
752 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
753 if (C->getAPInt().getMinSignedBits() <= 64) {
754 S = SE.getConstant(C->getType(), 0);
755 return C->getValue()->getSExtValue();
756 }
757 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
758 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
759 int64_t Result = ExtractImmediate(NewOps.front(), SE);
760 if (Result != 0)
761 S = SE.getAddExpr(NewOps);
762 return Result;
763 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
764 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
765 int64_t Result = ExtractImmediate(NewOps.front(), SE);
766 if (Result != 0)
767 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
768 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
769 SCEV::FlagAnyWrap);
770 return Result;
771 }
772 return 0;
773 }
774
775 /// If S involves the addition of a GlobalValue address, return that symbol, and
776 /// mutate S to point to a new SCEV with that value excluded.
ExtractSymbol(const SCEV * & S,ScalarEvolution & SE)777 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
778 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
779 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
780 S = SE.getConstant(GV->getType(), 0);
781 return GV;
782 }
783 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
784 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
785 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
786 if (Result)
787 S = SE.getAddExpr(NewOps);
788 return Result;
789 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
790 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
791 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
792 if (Result)
793 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
794 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
795 SCEV::FlagAnyWrap);
796 return Result;
797 }
798 return nullptr;
799 }
800
801 /// Returns true if the specified instruction is using the specified value as an
802 /// address.
isAddressUse(const TargetTransformInfo & TTI,Instruction * Inst,Value * OperandVal)803 static bool isAddressUse(const TargetTransformInfo &TTI,
804 Instruction *Inst, Value *OperandVal) {
805 bool isAddress = isa<LoadInst>(Inst);
806 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
807 if (SI->getPointerOperand() == OperandVal)
808 isAddress = true;
809 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
810 // Addressing modes can also be folded into prefetches and a variety
811 // of intrinsics.
812 switch (II->getIntrinsicID()) {
813 case Intrinsic::memset:
814 case Intrinsic::prefetch:
815 case Intrinsic::masked_load:
816 if (II->getArgOperand(0) == OperandVal)
817 isAddress = true;
818 break;
819 case Intrinsic::masked_store:
820 if (II->getArgOperand(1) == OperandVal)
821 isAddress = true;
822 break;
823 case Intrinsic::memmove:
824 case Intrinsic::memcpy:
825 if (II->getArgOperand(0) == OperandVal ||
826 II->getArgOperand(1) == OperandVal)
827 isAddress = true;
828 break;
829 default: {
830 MemIntrinsicInfo IntrInfo;
831 if (TTI.getTgtMemIntrinsic(II, IntrInfo)) {
832 if (IntrInfo.PtrVal == OperandVal)
833 isAddress = true;
834 }
835 }
836 }
837 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
838 if (RMW->getPointerOperand() == OperandVal)
839 isAddress = true;
840 } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
841 if (CmpX->getPointerOperand() == OperandVal)
842 isAddress = true;
843 }
844 return isAddress;
845 }
846
847 /// Return the type of the memory being accessed.
getAccessType(const TargetTransformInfo & TTI,Instruction * Inst,Value * OperandVal)848 static MemAccessTy getAccessType(const TargetTransformInfo &TTI,
849 Instruction *Inst, Value *OperandVal) {
850 MemAccessTy AccessTy(Inst->getType(), MemAccessTy::UnknownAddressSpace);
851 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
852 AccessTy.MemTy = SI->getOperand(0)->getType();
853 AccessTy.AddrSpace = SI->getPointerAddressSpace();
854 } else if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
855 AccessTy.AddrSpace = LI->getPointerAddressSpace();
856 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
857 AccessTy.AddrSpace = RMW->getPointerAddressSpace();
858 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
859 AccessTy.AddrSpace = CmpX->getPointerAddressSpace();
860 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
861 switch (II->getIntrinsicID()) {
862 case Intrinsic::prefetch:
863 case Intrinsic::memset:
864 AccessTy.AddrSpace = II->getArgOperand(0)->getType()->getPointerAddressSpace();
865 AccessTy.MemTy = OperandVal->getType();
866 break;
867 case Intrinsic::memmove:
868 case Intrinsic::memcpy:
869 AccessTy.AddrSpace = OperandVal->getType()->getPointerAddressSpace();
870 AccessTy.MemTy = OperandVal->getType();
871 break;
872 case Intrinsic::masked_load:
873 AccessTy.AddrSpace =
874 II->getArgOperand(0)->getType()->getPointerAddressSpace();
875 break;
876 case Intrinsic::masked_store:
877 AccessTy.MemTy = II->getOperand(0)->getType();
878 AccessTy.AddrSpace =
879 II->getArgOperand(1)->getType()->getPointerAddressSpace();
880 break;
881 default: {
882 MemIntrinsicInfo IntrInfo;
883 if (TTI.getTgtMemIntrinsic(II, IntrInfo) && IntrInfo.PtrVal) {
884 AccessTy.AddrSpace
885 = IntrInfo.PtrVal->getType()->getPointerAddressSpace();
886 }
887
888 break;
889 }
890 }
891 }
892
893 // All pointers have the same requirements, so canonicalize them to an
894 // arbitrary pointer type to minimize variation.
895 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy.MemTy))
896 AccessTy.MemTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
897 PTy->getAddressSpace());
898
899 return AccessTy;
900 }
901
902 /// Return true if this AddRec is already a phi in its loop.
isExistingPhi(const SCEVAddRecExpr * AR,ScalarEvolution & SE)903 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
904 for (PHINode &PN : AR->getLoop()->getHeader()->phis()) {
905 if (SE.isSCEVable(PN.getType()) &&
906 (SE.getEffectiveSCEVType(PN.getType()) ==
907 SE.getEffectiveSCEVType(AR->getType())) &&
908 SE.getSCEV(&PN) == AR)
909 return true;
910 }
911 return false;
912 }
913
914 /// Check if expanding this expression is likely to incur significant cost. This
915 /// is tricky because SCEV doesn't track which expressions are actually computed
916 /// by the current IR.
917 ///
918 /// We currently allow expansion of IV increments that involve adds,
919 /// multiplication by constants, and AddRecs from existing phis.
920 ///
921 /// TODO: Allow UDivExpr if we can find an existing IV increment that is an
922 /// obvious multiple of the UDivExpr.
isHighCostExpansion(const SCEV * S,SmallPtrSetImpl<const SCEV * > & Processed,ScalarEvolution & SE)923 static bool isHighCostExpansion(const SCEV *S,
924 SmallPtrSetImpl<const SCEV*> &Processed,
925 ScalarEvolution &SE) {
926 // Zero/One operand expressions
927 switch (S->getSCEVType()) {
928 case scUnknown:
929 case scConstant:
930 return false;
931 case scTruncate:
932 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(),
933 Processed, SE);
934 case scZeroExtend:
935 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(),
936 Processed, SE);
937 case scSignExtend:
938 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(),
939 Processed, SE);
940 default:
941 break;
942 }
943
944 if (!Processed.insert(S).second)
945 return false;
946
947 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
948 for (const SCEV *S : Add->operands()) {
949 if (isHighCostExpansion(S, Processed, SE))
950 return true;
951 }
952 return false;
953 }
954
955 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
956 if (Mul->getNumOperands() == 2) {
957 // Multiplication by a constant is ok
958 if (isa<SCEVConstant>(Mul->getOperand(0)))
959 return isHighCostExpansion(Mul->getOperand(1), Processed, SE);
960
961 // If we have the value of one operand, check if an existing
962 // multiplication already generates this expression.
963 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
964 Value *UVal = U->getValue();
965 for (User *UR : UVal->users()) {
966 // If U is a constant, it may be used by a ConstantExpr.
967 Instruction *UI = dyn_cast<Instruction>(UR);
968 if (UI && UI->getOpcode() == Instruction::Mul &&
969 SE.isSCEVable(UI->getType())) {
970 return SE.getSCEV(UI) == Mul;
971 }
972 }
973 }
974 }
975 }
976
977 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
978 if (isExistingPhi(AR, SE))
979 return false;
980 }
981
982 // Fow now, consider any other type of expression (div/mul/min/max) high cost.
983 return true;
984 }
985
986 namespace {
987
988 class LSRUse;
989
990 } // end anonymous namespace
991
992 /// Check if the addressing mode defined by \p F is completely
993 /// folded in \p LU at isel time.
994 /// This includes address-mode folding and special icmp tricks.
995 /// This function returns true if \p LU can accommodate what \p F
996 /// defines and up to 1 base + 1 scaled + offset.
997 /// In other words, if \p F has several base registers, this function may
998 /// still return true. Therefore, users still need to account for
999 /// additional base registers and/or unfolded offsets to derive an
1000 /// accurate cost model.
1001 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1002 const LSRUse &LU, const Formula &F);
1003
1004 // Get the cost of the scaling factor used in F for LU.
1005 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
1006 const LSRUse &LU, const Formula &F,
1007 const Loop &L);
1008
1009 namespace {
1010
1011 /// This class is used to measure and compare candidate formulae.
1012 class Cost {
1013 const Loop *L = nullptr;
1014 ScalarEvolution *SE = nullptr;
1015 const TargetTransformInfo *TTI = nullptr;
1016 TargetTransformInfo::LSRCost C;
1017
1018 public:
1019 Cost() = delete;
Cost(const Loop * L,ScalarEvolution & SE,const TargetTransformInfo & TTI)1020 Cost(const Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI) :
1021 L(L), SE(&SE), TTI(&TTI) {
1022 C.Insns = 0;
1023 C.NumRegs = 0;
1024 C.AddRecCost = 0;
1025 C.NumIVMuls = 0;
1026 C.NumBaseAdds = 0;
1027 C.ImmCost = 0;
1028 C.SetupCost = 0;
1029 C.ScaleCost = 0;
1030 }
1031
1032 bool isLess(Cost &Other);
1033
1034 void Lose();
1035
1036 #ifndef NDEBUG
1037 // Once any of the metrics loses, they must all remain losers.
isValid()1038 bool isValid() {
1039 return ((C.Insns | C.NumRegs | C.AddRecCost | C.NumIVMuls | C.NumBaseAdds
1040 | C.ImmCost | C.SetupCost | C.ScaleCost) != ~0u)
1041 || ((C.Insns & C.NumRegs & C.AddRecCost & C.NumIVMuls & C.NumBaseAdds
1042 & C.ImmCost & C.SetupCost & C.ScaleCost) == ~0u);
1043 }
1044 #endif
1045
isLoser()1046 bool isLoser() {
1047 assert(isValid() && "invalid cost");
1048 return C.NumRegs == ~0u;
1049 }
1050
1051 void RateFormula(const Formula &F,
1052 SmallPtrSetImpl<const SCEV *> &Regs,
1053 const DenseSet<const SCEV *> &VisitedRegs,
1054 const LSRUse &LU,
1055 SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr);
1056
1057 void print(raw_ostream &OS) const;
1058 void dump() const;
1059
1060 private:
1061 void RateRegister(const Formula &F, const SCEV *Reg,
1062 SmallPtrSetImpl<const SCEV *> &Regs);
1063 void RatePrimaryRegister(const Formula &F, const SCEV *Reg,
1064 SmallPtrSetImpl<const SCEV *> &Regs,
1065 SmallPtrSetImpl<const SCEV *> *LoserRegs);
1066 };
1067
1068 /// An operand value in an instruction which is to be replaced with some
1069 /// equivalent, possibly strength-reduced, replacement.
1070 struct LSRFixup {
1071 /// The instruction which will be updated.
1072 Instruction *UserInst = nullptr;
1073
1074 /// The operand of the instruction which will be replaced. The operand may be
1075 /// used more than once; every instance will be replaced.
1076 Value *OperandValToReplace = nullptr;
1077
1078 /// If this user is to use the post-incremented value of an induction
1079 /// variable, this set is non-empty and holds the loops associated with the
1080 /// induction variable.
1081 PostIncLoopSet PostIncLoops;
1082
1083 /// A constant offset to be added to the LSRUse expression. This allows
1084 /// multiple fixups to share the same LSRUse with different offsets, for
1085 /// example in an unrolled loop.
1086 int64_t Offset = 0;
1087
1088 LSRFixup() = default;
1089
1090 bool isUseFullyOutsideLoop(const Loop *L) const;
1091
1092 void print(raw_ostream &OS) const;
1093 void dump() const;
1094 };
1095
1096 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of sorted
1097 /// SmallVectors of const SCEV*.
1098 struct UniquifierDenseMapInfo {
getEmptyKey__anondbe16f4d0711::UniquifierDenseMapInfo1099 static SmallVector<const SCEV *, 4> getEmptyKey() {
1100 SmallVector<const SCEV *, 4> V;
1101 V.push_back(reinterpret_cast<const SCEV *>(-1));
1102 return V;
1103 }
1104
getTombstoneKey__anondbe16f4d0711::UniquifierDenseMapInfo1105 static SmallVector<const SCEV *, 4> getTombstoneKey() {
1106 SmallVector<const SCEV *, 4> V;
1107 V.push_back(reinterpret_cast<const SCEV *>(-2));
1108 return V;
1109 }
1110
getHashValue__anondbe16f4d0711::UniquifierDenseMapInfo1111 static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) {
1112 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
1113 }
1114
isEqual__anondbe16f4d0711::UniquifierDenseMapInfo1115 static bool isEqual(const SmallVector<const SCEV *, 4> &LHS,
1116 const SmallVector<const SCEV *, 4> &RHS) {
1117 return LHS == RHS;
1118 }
1119 };
1120
1121 /// This class holds the state that LSR keeps for each use in IVUsers, as well
1122 /// as uses invented by LSR itself. It includes information about what kinds of
1123 /// things can be folded into the user, information about the user itself, and
1124 /// information about how the use may be satisfied. TODO: Represent multiple
1125 /// users of the same expression in common?
1126 class LSRUse {
1127 DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier;
1128
1129 public:
1130 /// An enum for a kind of use, indicating what types of scaled and immediate
1131 /// operands it might support.
1132 enum KindType {
1133 Basic, ///< A normal use, with no folding.
1134 Special, ///< A special case of basic, allowing -1 scales.
1135 Address, ///< An address use; folding according to TargetLowering
1136 ICmpZero ///< An equality icmp with both operands folded into one.
1137 // TODO: Add a generic icmp too?
1138 };
1139
1140 using SCEVUseKindPair = PointerIntPair<const SCEV *, 2, KindType>;
1141
1142 KindType Kind;
1143 MemAccessTy AccessTy;
1144
1145 /// The list of operands which are to be replaced.
1146 SmallVector<LSRFixup, 8> Fixups;
1147
1148 /// Keep track of the min and max offsets of the fixups.
1149 int64_t MinOffset = std::numeric_limits<int64_t>::max();
1150 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
1151
1152 /// This records whether all of the fixups using this LSRUse are outside of
1153 /// the loop, in which case some special-case heuristics may be used.
1154 bool AllFixupsOutsideLoop = true;
1155
1156 /// RigidFormula is set to true to guarantee that this use will be associated
1157 /// with a single formula--the one that initially matched. Some SCEV
1158 /// expressions cannot be expanded. This allows LSR to consider the registers
1159 /// used by those expressions without the need to expand them later after
1160 /// changing the formula.
1161 bool RigidFormula = false;
1162
1163 /// This records the widest use type for any fixup using this
1164 /// LSRUse. FindUseWithSimilarFormula can't consider uses with different max
1165 /// fixup widths to be equivalent, because the narrower one may be relying on
1166 /// the implicit truncation to truncate away bogus bits.
1167 Type *WidestFixupType = nullptr;
1168
1169 /// A list of ways to build a value that can satisfy this user. After the
1170 /// list is populated, one of these is selected heuristically and used to
1171 /// formulate a replacement for OperandValToReplace in UserInst.
1172 SmallVector<Formula, 12> Formulae;
1173
1174 /// The set of register candidates used by all formulae in this LSRUse.
1175 SmallPtrSet<const SCEV *, 4> Regs;
1176
LSRUse(KindType K,MemAccessTy AT)1177 LSRUse(KindType K, MemAccessTy AT) : Kind(K), AccessTy(AT) {}
1178
getNewFixup()1179 LSRFixup &getNewFixup() {
1180 Fixups.push_back(LSRFixup());
1181 return Fixups.back();
1182 }
1183
pushFixup(LSRFixup & f)1184 void pushFixup(LSRFixup &f) {
1185 Fixups.push_back(f);
1186 if (f.Offset > MaxOffset)
1187 MaxOffset = f.Offset;
1188 if (f.Offset < MinOffset)
1189 MinOffset = f.Offset;
1190 }
1191
1192 bool HasFormulaWithSameRegs(const Formula &F) const;
1193 float getNotSelectedProbability(const SCEV *Reg) const;
1194 bool InsertFormula(const Formula &F, const Loop &L);
1195 void DeleteFormula(Formula &F);
1196 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
1197
1198 void print(raw_ostream &OS) const;
1199 void dump() const;
1200 };
1201
1202 } // end anonymous namespace
1203
1204 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1205 LSRUse::KindType Kind, MemAccessTy AccessTy,
1206 GlobalValue *BaseGV, int64_t BaseOffset,
1207 bool HasBaseReg, int64_t Scale,
1208 Instruction *Fixup = nullptr);
1209
getSetupCost(const SCEV * Reg,unsigned Depth)1210 static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) {
1211 if (isa<SCEVUnknown>(Reg) || isa<SCEVConstant>(Reg))
1212 return 1;
1213 if (Depth == 0)
1214 return 0;
1215 if (const auto *S = dyn_cast<SCEVAddRecExpr>(Reg))
1216 return getSetupCost(S->getStart(), Depth - 1);
1217 if (auto S = dyn_cast<SCEVIntegralCastExpr>(Reg))
1218 return getSetupCost(S->getOperand(), Depth - 1);
1219 if (auto S = dyn_cast<SCEVNAryExpr>(Reg))
1220 return std::accumulate(S->op_begin(), S->op_end(), 0,
1221 [&](unsigned i, const SCEV *Reg) {
1222 return i + getSetupCost(Reg, Depth - 1);
1223 });
1224 if (auto S = dyn_cast<SCEVUDivExpr>(Reg))
1225 return getSetupCost(S->getLHS(), Depth - 1) +
1226 getSetupCost(S->getRHS(), Depth - 1);
1227 return 0;
1228 }
1229
1230 /// Tally up interesting quantities from the given register.
RateRegister(const Formula & F,const SCEV * Reg,SmallPtrSetImpl<const SCEV * > & Regs)1231 void Cost::RateRegister(const Formula &F, const SCEV *Reg,
1232 SmallPtrSetImpl<const SCEV *> &Regs) {
1233 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
1234 // If this is an addrec for another loop, it should be an invariant
1235 // with respect to L since L is the innermost loop (at least
1236 // for now LSR only handles innermost loops).
1237 if (AR->getLoop() != L) {
1238 // If the AddRec exists, consider it's register free and leave it alone.
1239 if (isExistingPhi(AR, *SE) && !TTI->shouldFavorPostInc())
1240 return;
1241
1242 // It is bad to allow LSR for current loop to add induction variables
1243 // for its sibling loops.
1244 if (!AR->getLoop()->contains(L)) {
1245 Lose();
1246 return;
1247 }
1248
1249 // Otherwise, it will be an invariant with respect to Loop L.
1250 ++C.NumRegs;
1251 return;
1252 }
1253
1254 unsigned LoopCost = 1;
1255 if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) ||
1256 TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) {
1257
1258 // If the step size matches the base offset, we could use pre-indexed
1259 // addressing.
1260 if (TTI->shouldFavorBackedgeIndex(L)) {
1261 if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)))
1262 if (Step->getAPInt() == F.BaseOffset)
1263 LoopCost = 0;
1264 }
1265
1266 if (TTI->shouldFavorPostInc()) {
1267 const SCEV *LoopStep = AR->getStepRecurrence(*SE);
1268 if (isa<SCEVConstant>(LoopStep)) {
1269 const SCEV *LoopStart = AR->getStart();
1270 if (!isa<SCEVConstant>(LoopStart) &&
1271 SE->isLoopInvariant(LoopStart, L))
1272 LoopCost = 0;
1273 }
1274 }
1275 }
1276 C.AddRecCost += LoopCost;
1277
1278 // Add the step value register, if it needs one.
1279 // TODO: The non-affine case isn't precisely modeled here.
1280 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) {
1281 if (!Regs.count(AR->getOperand(1))) {
1282 RateRegister(F, AR->getOperand(1), Regs);
1283 if (isLoser())
1284 return;
1285 }
1286 }
1287 }
1288 ++C.NumRegs;
1289
1290 // Rough heuristic; favor registers which don't require extra setup
1291 // instructions in the preheader.
1292 C.SetupCost += getSetupCost(Reg, SetupCostDepthLimit);
1293 // Ensure we don't, even with the recusion limit, produce invalid costs.
1294 C.SetupCost = std::min<unsigned>(C.SetupCost, 1 << 16);
1295
1296 C.NumIVMuls += isa<SCEVMulExpr>(Reg) &&
1297 SE->hasComputableLoopEvolution(Reg, L);
1298 }
1299
1300 /// Record this register in the set. If we haven't seen it before, rate
1301 /// it. Optional LoserRegs provides a way to declare any formula that refers to
1302 /// one of those regs an instant loser.
RatePrimaryRegister(const Formula & F,const SCEV * Reg,SmallPtrSetImpl<const SCEV * > & Regs,SmallPtrSetImpl<const SCEV * > * LoserRegs)1303 void Cost::RatePrimaryRegister(const Formula &F, const SCEV *Reg,
1304 SmallPtrSetImpl<const SCEV *> &Regs,
1305 SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1306 if (LoserRegs && LoserRegs->count(Reg)) {
1307 Lose();
1308 return;
1309 }
1310 if (Regs.insert(Reg).second) {
1311 RateRegister(F, Reg, Regs);
1312 if (LoserRegs && isLoser())
1313 LoserRegs->insert(Reg);
1314 }
1315 }
1316
RateFormula(const Formula & F,SmallPtrSetImpl<const SCEV * > & Regs,const DenseSet<const SCEV * > & VisitedRegs,const LSRUse & LU,SmallPtrSetImpl<const SCEV * > * LoserRegs)1317 void Cost::RateFormula(const Formula &F,
1318 SmallPtrSetImpl<const SCEV *> &Regs,
1319 const DenseSet<const SCEV *> &VisitedRegs,
1320 const LSRUse &LU,
1321 SmallPtrSetImpl<const SCEV *> *LoserRegs) {
1322 assert(F.isCanonical(*L) && "Cost is accurate only for canonical formula");
1323 // Tally up the registers.
1324 unsigned PrevAddRecCost = C.AddRecCost;
1325 unsigned PrevNumRegs = C.NumRegs;
1326 unsigned PrevNumBaseAdds = C.NumBaseAdds;
1327 if (const SCEV *ScaledReg = F.ScaledReg) {
1328 if (VisitedRegs.count(ScaledReg)) {
1329 Lose();
1330 return;
1331 }
1332 RatePrimaryRegister(F, ScaledReg, Regs, LoserRegs);
1333 if (isLoser())
1334 return;
1335 }
1336 for (const SCEV *BaseReg : F.BaseRegs) {
1337 if (VisitedRegs.count(BaseReg)) {
1338 Lose();
1339 return;
1340 }
1341 RatePrimaryRegister(F, BaseReg, Regs, LoserRegs);
1342 if (isLoser())
1343 return;
1344 }
1345
1346 // Determine how many (unfolded) adds we'll need inside the loop.
1347 size_t NumBaseParts = F.getNumRegs();
1348 if (NumBaseParts > 1)
1349 // Do not count the base and a possible second register if the target
1350 // allows to fold 2 registers.
1351 C.NumBaseAdds +=
1352 NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(*TTI, LU, F)));
1353 C.NumBaseAdds += (F.UnfoldedOffset != 0);
1354
1355 // Accumulate non-free scaling amounts.
1356 C.ScaleCost += getScalingFactorCost(*TTI, LU, F, *L);
1357
1358 // Tally up the non-zero immediates.
1359 for (const LSRFixup &Fixup : LU.Fixups) {
1360 int64_t O = Fixup.Offset;
1361 int64_t Offset = (uint64_t)O + F.BaseOffset;
1362 if (F.BaseGV)
1363 C.ImmCost += 64; // Handle symbolic values conservatively.
1364 // TODO: This should probably be the pointer size.
1365 else if (Offset != 0)
1366 C.ImmCost += APInt(64, Offset, true).getMinSignedBits();
1367
1368 // Check with target if this offset with this instruction is
1369 // specifically not supported.
1370 if (LU.Kind == LSRUse::Address && Offset != 0 &&
1371 !isAMCompletelyFolded(*TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
1372 Offset, F.HasBaseReg, F.Scale, Fixup.UserInst))
1373 C.NumBaseAdds++;
1374 }
1375
1376 // If we don't count instruction cost exit here.
1377 if (!InsnsCost) {
1378 assert(isValid() && "invalid cost");
1379 return;
1380 }
1381
1382 // Treat every new register that exceeds TTI.getNumberOfRegisters() - 1 as
1383 // additional instruction (at least fill).
1384 // TODO: Need distinguish register class?
1385 unsigned TTIRegNum = TTI->getNumberOfRegisters(
1386 TTI->getRegisterClassForType(false, F.getType())) - 1;
1387 if (C.NumRegs > TTIRegNum) {
1388 // Cost already exceeded TTIRegNum, then only newly added register can add
1389 // new instructions.
1390 if (PrevNumRegs > TTIRegNum)
1391 C.Insns += (C.NumRegs - PrevNumRegs);
1392 else
1393 C.Insns += (C.NumRegs - TTIRegNum);
1394 }
1395
1396 // If ICmpZero formula ends with not 0, it could not be replaced by
1397 // just add or sub. We'll need to compare final result of AddRec.
1398 // That means we'll need an additional instruction. But if the target can
1399 // macro-fuse a compare with a branch, don't count this extra instruction.
1400 // For -10 + {0, +, 1}:
1401 // i = i + 1;
1402 // cmp i, 10
1403 //
1404 // For {-10, +, 1}:
1405 // i = i + 1;
1406 if (LU.Kind == LSRUse::ICmpZero && !F.hasZeroEnd() &&
1407 !TTI->canMacroFuseCmp())
1408 C.Insns++;
1409 // Each new AddRec adds 1 instruction to calculation.
1410 C.Insns += (C.AddRecCost - PrevAddRecCost);
1411
1412 // BaseAdds adds instructions for unfolded registers.
1413 if (LU.Kind != LSRUse::ICmpZero)
1414 C.Insns += C.NumBaseAdds - PrevNumBaseAdds;
1415 assert(isValid() && "invalid cost");
1416 }
1417
1418 /// Set this cost to a losing value.
Lose()1419 void Cost::Lose() {
1420 C.Insns = std::numeric_limits<unsigned>::max();
1421 C.NumRegs = std::numeric_limits<unsigned>::max();
1422 C.AddRecCost = std::numeric_limits<unsigned>::max();
1423 C.NumIVMuls = std::numeric_limits<unsigned>::max();
1424 C.NumBaseAdds = std::numeric_limits<unsigned>::max();
1425 C.ImmCost = std::numeric_limits<unsigned>::max();
1426 C.SetupCost = std::numeric_limits<unsigned>::max();
1427 C.ScaleCost = std::numeric_limits<unsigned>::max();
1428 }
1429
1430 /// Choose the lower cost.
isLess(Cost & Other)1431 bool Cost::isLess(Cost &Other) {
1432 if (InsnsCost.getNumOccurrences() > 0 && InsnsCost &&
1433 C.Insns != Other.C.Insns)
1434 return C.Insns < Other.C.Insns;
1435 return TTI->isLSRCostLess(C, Other.C);
1436 }
1437
1438 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const1439 void Cost::print(raw_ostream &OS) const {
1440 if (InsnsCost)
1441 OS << C.Insns << " instruction" << (C.Insns == 1 ? " " : "s ");
1442 OS << C.NumRegs << " reg" << (C.NumRegs == 1 ? "" : "s");
1443 if (C.AddRecCost != 0)
1444 OS << ", with addrec cost " << C.AddRecCost;
1445 if (C.NumIVMuls != 0)
1446 OS << ", plus " << C.NumIVMuls << " IV mul"
1447 << (C.NumIVMuls == 1 ? "" : "s");
1448 if (C.NumBaseAdds != 0)
1449 OS << ", plus " << C.NumBaseAdds << " base add"
1450 << (C.NumBaseAdds == 1 ? "" : "s");
1451 if (C.ScaleCost != 0)
1452 OS << ", plus " << C.ScaleCost << " scale cost";
1453 if (C.ImmCost != 0)
1454 OS << ", plus " << C.ImmCost << " imm cost";
1455 if (C.SetupCost != 0)
1456 OS << ", plus " << C.SetupCost << " setup cost";
1457 }
1458
dump() const1459 LLVM_DUMP_METHOD void Cost::dump() const {
1460 print(errs()); errs() << '\n';
1461 }
1462 #endif
1463
1464 /// Test whether this fixup always uses its value outside of the given loop.
isUseFullyOutsideLoop(const Loop * L) const1465 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
1466 // PHI nodes use their value in their incoming blocks.
1467 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
1468 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1469 if (PN->getIncomingValue(i) == OperandValToReplace &&
1470 L->contains(PN->getIncomingBlock(i)))
1471 return false;
1472 return true;
1473 }
1474
1475 return !L->contains(UserInst);
1476 }
1477
1478 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const1479 void LSRFixup::print(raw_ostream &OS) const {
1480 OS << "UserInst=";
1481 // Store is common and interesting enough to be worth special-casing.
1482 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
1483 OS << "store ";
1484 Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false);
1485 } else if (UserInst->getType()->isVoidTy())
1486 OS << UserInst->getOpcodeName();
1487 else
1488 UserInst->printAsOperand(OS, /*PrintType=*/false);
1489
1490 OS << ", OperandValToReplace=";
1491 OperandValToReplace->printAsOperand(OS, /*PrintType=*/false);
1492
1493 for (const Loop *PIL : PostIncLoops) {
1494 OS << ", PostIncLoop=";
1495 PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
1496 }
1497
1498 if (Offset != 0)
1499 OS << ", Offset=" << Offset;
1500 }
1501
dump() const1502 LLVM_DUMP_METHOD void LSRFixup::dump() const {
1503 print(errs()); errs() << '\n';
1504 }
1505 #endif
1506
1507 /// Test whether this use as a formula which has the same registers as the given
1508 /// formula.
HasFormulaWithSameRegs(const Formula & F) const1509 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
1510 SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1511 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1512 // Unstable sort by host order ok, because this is only used for uniquifying.
1513 llvm::sort(Key);
1514 return Uniquifier.count(Key);
1515 }
1516
1517 /// The function returns a probability of selecting formula without Reg.
getNotSelectedProbability(const SCEV * Reg) const1518 float LSRUse::getNotSelectedProbability(const SCEV *Reg) const {
1519 unsigned FNum = 0;
1520 for (const Formula &F : Formulae)
1521 if (F.referencesReg(Reg))
1522 FNum++;
1523 return ((float)(Formulae.size() - FNum)) / Formulae.size();
1524 }
1525
1526 /// If the given formula has not yet been inserted, add it to the list, and
1527 /// return true. Return false otherwise. The formula must be in canonical form.
InsertFormula(const Formula & F,const Loop & L)1528 bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
1529 assert(F.isCanonical(L) && "Invalid canonical representation");
1530
1531 if (!Formulae.empty() && RigidFormula)
1532 return false;
1533
1534 SmallVector<const SCEV *, 4> Key = F.BaseRegs;
1535 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1536 // Unstable sort by host order ok, because this is only used for uniquifying.
1537 llvm::sort(Key);
1538
1539 if (!Uniquifier.insert(Key).second)
1540 return false;
1541
1542 // Using a register to hold the value of 0 is not profitable.
1543 assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
1544 "Zero allocated in a scaled register!");
1545 #ifndef NDEBUG
1546 for (const SCEV *BaseReg : F.BaseRegs)
1547 assert(!BaseReg->isZero() && "Zero allocated in a base register!");
1548 #endif
1549
1550 // Add the formula to the list.
1551 Formulae.push_back(F);
1552
1553 // Record registers now being used by this use.
1554 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1555 if (F.ScaledReg)
1556 Regs.insert(F.ScaledReg);
1557
1558 return true;
1559 }
1560
1561 /// Remove the given formula from this use's list.
DeleteFormula(Formula & F)1562 void LSRUse::DeleteFormula(Formula &F) {
1563 if (&F != &Formulae.back())
1564 std::swap(F, Formulae.back());
1565 Formulae.pop_back();
1566 }
1567
1568 /// Recompute the Regs field, and update RegUses.
RecomputeRegs(size_t LUIdx,RegUseTracker & RegUses)1569 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
1570 // Now that we've filtered out some formulae, recompute the Regs set.
1571 SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs);
1572 Regs.clear();
1573 for (const Formula &F : Formulae) {
1574 if (F.ScaledReg) Regs.insert(F.ScaledReg);
1575 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1576 }
1577
1578 // Update the RegTracker.
1579 for (const SCEV *S : OldRegs)
1580 if (!Regs.count(S))
1581 RegUses.dropRegister(S, LUIdx);
1582 }
1583
1584 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const1585 void LSRUse::print(raw_ostream &OS) const {
1586 OS << "LSR Use: Kind=";
1587 switch (Kind) {
1588 case Basic: OS << "Basic"; break;
1589 case Special: OS << "Special"; break;
1590 case ICmpZero: OS << "ICmpZero"; break;
1591 case Address:
1592 OS << "Address of ";
1593 if (AccessTy.MemTy->isPointerTy())
1594 OS << "pointer"; // the full pointer type could be really verbose
1595 else {
1596 OS << *AccessTy.MemTy;
1597 }
1598
1599 OS << " in addrspace(" << AccessTy.AddrSpace << ')';
1600 }
1601
1602 OS << ", Offsets={";
1603 bool NeedComma = false;
1604 for (const LSRFixup &Fixup : Fixups) {
1605 if (NeedComma) OS << ',';
1606 OS << Fixup.Offset;
1607 NeedComma = true;
1608 }
1609 OS << '}';
1610
1611 if (AllFixupsOutsideLoop)
1612 OS << ", all-fixups-outside-loop";
1613
1614 if (WidestFixupType)
1615 OS << ", widest fixup type: " << *WidestFixupType;
1616 }
1617
dump() const1618 LLVM_DUMP_METHOD void LSRUse::dump() const {
1619 print(errs()); errs() << '\n';
1620 }
1621 #endif
1622
isAMCompletelyFolded(const TargetTransformInfo & TTI,LSRUse::KindType Kind,MemAccessTy AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,Instruction * Fixup)1623 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1624 LSRUse::KindType Kind, MemAccessTy AccessTy,
1625 GlobalValue *BaseGV, int64_t BaseOffset,
1626 bool HasBaseReg, int64_t Scale,
1627 Instruction *Fixup/*= nullptr*/) {
1628 switch (Kind) {
1629 case LSRUse::Address:
1630 return TTI.isLegalAddressingMode(AccessTy.MemTy, BaseGV, BaseOffset,
1631 HasBaseReg, Scale, AccessTy.AddrSpace, Fixup);
1632
1633 case LSRUse::ICmpZero:
1634 // There's not even a target hook for querying whether it would be legal to
1635 // fold a GV into an ICmp.
1636 if (BaseGV)
1637 return false;
1638
1639 // ICmp only has two operands; don't allow more than two non-trivial parts.
1640 if (Scale != 0 && HasBaseReg && BaseOffset != 0)
1641 return false;
1642
1643 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1644 // putting the scaled register in the other operand of the icmp.
1645 if (Scale != 0 && Scale != -1)
1646 return false;
1647
1648 // If we have low-level target information, ask the target if it can fold an
1649 // integer immediate on an icmp.
1650 if (BaseOffset != 0) {
1651 // We have one of:
1652 // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset
1653 // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset
1654 // Offs is the ICmp immediate.
1655 if (Scale == 0)
1656 // The cast does the right thing with
1657 // std::numeric_limits<int64_t>::min().
1658 BaseOffset = -(uint64_t)BaseOffset;
1659 return TTI.isLegalICmpImmediate(BaseOffset);
1660 }
1661
1662 // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
1663 return true;
1664
1665 case LSRUse::Basic:
1666 // Only handle single-register values.
1667 return !BaseGV && Scale == 0 && BaseOffset == 0;
1668
1669 case LSRUse::Special:
1670 // Special case Basic to handle -1 scales.
1671 return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0;
1672 }
1673
1674 llvm_unreachable("Invalid LSRUse Kind!");
1675 }
1676
isAMCompletelyFolded(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,MemAccessTy AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale)1677 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1678 int64_t MinOffset, int64_t MaxOffset,
1679 LSRUse::KindType Kind, MemAccessTy AccessTy,
1680 GlobalValue *BaseGV, int64_t BaseOffset,
1681 bool HasBaseReg, int64_t Scale) {
1682 // Check for overflow.
1683 if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) !=
1684 (MinOffset > 0))
1685 return false;
1686 MinOffset = (uint64_t)BaseOffset + MinOffset;
1687 if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) !=
1688 (MaxOffset > 0))
1689 return false;
1690 MaxOffset = (uint64_t)BaseOffset + MaxOffset;
1691
1692 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset,
1693 HasBaseReg, Scale) &&
1694 isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset,
1695 HasBaseReg, Scale);
1696 }
1697
isAMCompletelyFolded(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,MemAccessTy AccessTy,const Formula & F,const Loop & L)1698 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1699 int64_t MinOffset, int64_t MaxOffset,
1700 LSRUse::KindType Kind, MemAccessTy AccessTy,
1701 const Formula &F, const Loop &L) {
1702 // For the purpose of isAMCompletelyFolded either having a canonical formula
1703 // or a scale not equal to zero is correct.
1704 // Problems may arise from non canonical formulae having a scale == 0.
1705 // Strictly speaking it would best to just rely on canonical formulae.
1706 // However, when we generate the scaled formulae, we first check that the
1707 // scaling factor is profitable before computing the actual ScaledReg for
1708 // compile time sake.
1709 assert((F.isCanonical(L) || F.Scale != 0));
1710 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1711 F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale);
1712 }
1713
1714 /// Test whether we know how to expand the current formula.
isLegalUse(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,MemAccessTy AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale)1715 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1716 int64_t MaxOffset, LSRUse::KindType Kind,
1717 MemAccessTy AccessTy, GlobalValue *BaseGV,
1718 int64_t BaseOffset, bool HasBaseReg, int64_t Scale) {
1719 // We know how to expand completely foldable formulae.
1720 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1721 BaseOffset, HasBaseReg, Scale) ||
1722 // Or formulae that use a base register produced by a sum of base
1723 // registers.
1724 (Scale == 1 &&
1725 isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy,
1726 BaseGV, BaseOffset, true, 0));
1727 }
1728
isLegalUse(const TargetTransformInfo & TTI,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,MemAccessTy AccessTy,const Formula & F)1729 static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset,
1730 int64_t MaxOffset, LSRUse::KindType Kind,
1731 MemAccessTy AccessTy, const Formula &F) {
1732 return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV,
1733 F.BaseOffset, F.HasBaseReg, F.Scale);
1734 }
1735
isAMCompletelyFolded(const TargetTransformInfo & TTI,const LSRUse & LU,const Formula & F)1736 static bool isAMCompletelyFolded(const TargetTransformInfo &TTI,
1737 const LSRUse &LU, const Formula &F) {
1738 // Target may want to look at the user instructions.
1739 if (LU.Kind == LSRUse::Address && TTI.LSRWithInstrQueries()) {
1740 for (const LSRFixup &Fixup : LU.Fixups)
1741 if (!isAMCompletelyFolded(TTI, LSRUse::Address, LU.AccessTy, F.BaseGV,
1742 (F.BaseOffset + Fixup.Offset), F.HasBaseReg,
1743 F.Scale, Fixup.UserInst))
1744 return false;
1745 return true;
1746 }
1747
1748 return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1749 LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg,
1750 F.Scale);
1751 }
1752
getScalingFactorCost(const TargetTransformInfo & TTI,const LSRUse & LU,const Formula & F,const Loop & L)1753 static unsigned getScalingFactorCost(const TargetTransformInfo &TTI,
1754 const LSRUse &LU, const Formula &F,
1755 const Loop &L) {
1756 if (!F.Scale)
1757 return 0;
1758
1759 // If the use is not completely folded in that instruction, we will have to
1760 // pay an extra cost only for scale != 1.
1761 if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind,
1762 LU.AccessTy, F, L))
1763 return F.Scale != 1;
1764
1765 switch (LU.Kind) {
1766 case LSRUse::Address: {
1767 // Check the scaling factor cost with both the min and max offsets.
1768 int ScaleCostMinOffset = TTI.getScalingFactorCost(
1769 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg,
1770 F.Scale, LU.AccessTy.AddrSpace);
1771 int ScaleCostMaxOffset = TTI.getScalingFactorCost(
1772 LU.AccessTy.MemTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg,
1773 F.Scale, LU.AccessTy.AddrSpace);
1774
1775 assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 &&
1776 "Legal addressing mode has an illegal cost!");
1777 return std::max(ScaleCostMinOffset, ScaleCostMaxOffset);
1778 }
1779 case LSRUse::ICmpZero:
1780 case LSRUse::Basic:
1781 case LSRUse::Special:
1782 // The use is completely folded, i.e., everything is folded into the
1783 // instruction.
1784 return 0;
1785 }
1786
1787 llvm_unreachable("Invalid LSRUse Kind!");
1788 }
1789
isAlwaysFoldable(const TargetTransformInfo & TTI,LSRUse::KindType Kind,MemAccessTy AccessTy,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg)1790 static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1791 LSRUse::KindType Kind, MemAccessTy AccessTy,
1792 GlobalValue *BaseGV, int64_t BaseOffset,
1793 bool HasBaseReg) {
1794 // Fast-path: zero is always foldable.
1795 if (BaseOffset == 0 && !BaseGV) return true;
1796
1797 // Conservatively, create an address with an immediate and a
1798 // base and a scale.
1799 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1800
1801 // Canonicalize a scale of 1 to a base register if the formula doesn't
1802 // already have a base register.
1803 if (!HasBaseReg && Scale == 1) {
1804 Scale = 0;
1805 HasBaseReg = true;
1806 }
1807
1808 return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset,
1809 HasBaseReg, Scale);
1810 }
1811
isAlwaysFoldable(const TargetTransformInfo & TTI,ScalarEvolution & SE,int64_t MinOffset,int64_t MaxOffset,LSRUse::KindType Kind,MemAccessTy AccessTy,const SCEV * S,bool HasBaseReg)1812 static bool isAlwaysFoldable(const TargetTransformInfo &TTI,
1813 ScalarEvolution &SE, int64_t MinOffset,
1814 int64_t MaxOffset, LSRUse::KindType Kind,
1815 MemAccessTy AccessTy, const SCEV *S,
1816 bool HasBaseReg) {
1817 // Fast-path: zero is always foldable.
1818 if (S->isZero()) return true;
1819
1820 // Conservatively, create an address with an immediate and a
1821 // base and a scale.
1822 int64_t BaseOffset = ExtractImmediate(S, SE);
1823 GlobalValue *BaseGV = ExtractSymbol(S, SE);
1824
1825 // If there's anything else involved, it's not foldable.
1826 if (!S->isZero()) return false;
1827
1828 // Fast-path: zero is always foldable.
1829 if (BaseOffset == 0 && !BaseGV) return true;
1830
1831 // Conservatively, create an address with an immediate and a
1832 // base and a scale.
1833 int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1834
1835 return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV,
1836 BaseOffset, HasBaseReg, Scale);
1837 }
1838
1839 namespace {
1840
1841 /// An individual increment in a Chain of IV increments. Relate an IV user to
1842 /// an expression that computes the IV it uses from the IV used by the previous
1843 /// link in the Chain.
1844 ///
1845 /// For the head of a chain, IncExpr holds the absolute SCEV expression for the
1846 /// original IVOperand. The head of the chain's IVOperand is only valid during
1847 /// chain collection, before LSR replaces IV users. During chain generation,
1848 /// IncExpr can be used to find the new IVOperand that computes the same
1849 /// expression.
1850 struct IVInc {
1851 Instruction *UserInst;
1852 Value* IVOperand;
1853 const SCEV *IncExpr;
1854
IVInc__anondbe16f4d0911::IVInc1855 IVInc(Instruction *U, Value *O, const SCEV *E)
1856 : UserInst(U), IVOperand(O), IncExpr(E) {}
1857 };
1858
1859 // The list of IV increments in program order. We typically add the head of a
1860 // chain without finding subsequent links.
1861 struct IVChain {
1862 SmallVector<IVInc, 1> Incs;
1863 const SCEV *ExprBase = nullptr;
1864
1865 IVChain() = default;
IVChain__anondbe16f4d0911::IVChain1866 IVChain(const IVInc &Head, const SCEV *Base)
1867 : Incs(1, Head), ExprBase(Base) {}
1868
1869 using const_iterator = SmallVectorImpl<IVInc>::const_iterator;
1870
1871 // Return the first increment in the chain.
begin__anondbe16f4d0911::IVChain1872 const_iterator begin() const {
1873 assert(!Incs.empty());
1874 return std::next(Incs.begin());
1875 }
end__anondbe16f4d0911::IVChain1876 const_iterator end() const {
1877 return Incs.end();
1878 }
1879
1880 // Returns true if this chain contains any increments.
hasIncs__anondbe16f4d0911::IVChain1881 bool hasIncs() const { return Incs.size() >= 2; }
1882
1883 // Add an IVInc to the end of this chain.
add__anondbe16f4d0911::IVChain1884 void add(const IVInc &X) { Incs.push_back(X); }
1885
1886 // Returns the last UserInst in the chain.
tailUserInst__anondbe16f4d0911::IVChain1887 Instruction *tailUserInst() const { return Incs.back().UserInst; }
1888
1889 // Returns true if IncExpr can be profitably added to this chain.
1890 bool isProfitableIncrement(const SCEV *OperExpr,
1891 const SCEV *IncExpr,
1892 ScalarEvolution&);
1893 };
1894
1895 /// Helper for CollectChains to track multiple IV increment uses. Distinguish
1896 /// between FarUsers that definitely cross IV increments and NearUsers that may
1897 /// be used between IV increments.
1898 struct ChainUsers {
1899 SmallPtrSet<Instruction*, 4> FarUsers;
1900 SmallPtrSet<Instruction*, 4> NearUsers;
1901 };
1902
1903 /// This class holds state for the main loop strength reduction logic.
1904 class LSRInstance {
1905 IVUsers &IU;
1906 ScalarEvolution &SE;
1907 DominatorTree &DT;
1908 LoopInfo &LI;
1909 AssumptionCache &AC;
1910 TargetLibraryInfo &TLI;
1911 const TargetTransformInfo &TTI;
1912 Loop *const L;
1913 MemorySSAUpdater *MSSAU;
1914 bool FavorBackedgeIndex = false;
1915 bool Changed = false;
1916
1917 /// This is the insert position that the current loop's induction variable
1918 /// increment should be placed. In simple loops, this is the latch block's
1919 /// terminator. But in more complicated cases, this is a position which will
1920 /// dominate all the in-loop post-increment users.
1921 Instruction *IVIncInsertPos = nullptr;
1922
1923 /// Interesting factors between use strides.
1924 ///
1925 /// We explicitly use a SetVector which contains a SmallSet, instead of the
1926 /// default, a SmallDenseSet, because we need to use the full range of
1927 /// int64_ts, and there's currently no good way of doing that with
1928 /// SmallDenseSet.
1929 SetVector<int64_t, SmallVector<int64_t, 8>, SmallSet<int64_t, 8>> Factors;
1930
1931 /// Interesting use types, to facilitate truncation reuse.
1932 SmallSetVector<Type *, 4> Types;
1933
1934 /// The list of interesting uses.
1935 mutable SmallVector<LSRUse, 16> Uses;
1936
1937 /// Track which uses use which register candidates.
1938 RegUseTracker RegUses;
1939
1940 // Limit the number of chains to avoid quadratic behavior. We don't expect to
1941 // have more than a few IV increment chains in a loop. Missing a Chain falls
1942 // back to normal LSR behavior for those uses.
1943 static const unsigned MaxChains = 8;
1944
1945 /// IV users can form a chain of IV increments.
1946 SmallVector<IVChain, MaxChains> IVChainVec;
1947
1948 /// IV users that belong to profitable IVChains.
1949 SmallPtrSet<Use*, MaxChains> IVIncSet;
1950
1951 void OptimizeShadowIV();
1952 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
1953 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
1954 void OptimizeLoopTermCond();
1955
1956 void ChainInstruction(Instruction *UserInst, Instruction *IVOper,
1957 SmallVectorImpl<ChainUsers> &ChainUsersVec);
1958 void FinalizeChain(IVChain &Chain);
1959 void CollectChains();
1960 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
1961 SmallVectorImpl<WeakTrackingVH> &DeadInsts);
1962
1963 void CollectInterestingTypesAndFactors();
1964 void CollectFixupsAndInitialFormulae();
1965
1966 // Support for sharing of LSRUses between LSRFixups.
1967 using UseMapTy = DenseMap<LSRUse::SCEVUseKindPair, size_t>;
1968 UseMapTy UseMap;
1969
1970 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
1971 LSRUse::KindType Kind, MemAccessTy AccessTy);
1972
1973 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind,
1974 MemAccessTy AccessTy);
1975
1976 void DeleteUse(LSRUse &LU, size_t LUIdx);
1977
1978 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
1979
1980 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1981 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1982 void CountRegisters(const Formula &F, size_t LUIdx);
1983 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
1984
1985 void CollectLoopInvariantFixupsAndFormulae();
1986
1987 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
1988 unsigned Depth = 0);
1989
1990 void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
1991 const Formula &Base, unsigned Depth,
1992 size_t Idx, bool IsScaledReg = false);
1993 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
1994 void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
1995 const Formula &Base, size_t Idx,
1996 bool IsScaledReg = false);
1997 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1998 void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx,
1999 const Formula &Base,
2000 const SmallVectorImpl<int64_t> &Worklist,
2001 size_t Idx, bool IsScaledReg = false);
2002 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
2003 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
2004 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
2005 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
2006 void GenerateCrossUseConstantOffsets();
2007 void GenerateAllReuseFormulae();
2008
2009 void FilterOutUndesirableDedicatedRegisters();
2010
2011 size_t EstimateSearchSpaceComplexity() const;
2012 void NarrowSearchSpaceByDetectingSupersets();
2013 void NarrowSearchSpaceByCollapsingUnrolledCode();
2014 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
2015 void NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
2016 void NarrowSearchSpaceByFilterPostInc();
2017 void NarrowSearchSpaceByDeletingCostlyFormulas();
2018 void NarrowSearchSpaceByPickingWinnerRegs();
2019 void NarrowSearchSpaceUsingHeuristics();
2020
2021 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
2022 Cost &SolutionCost,
2023 SmallVectorImpl<const Formula *> &Workspace,
2024 const Cost &CurCost,
2025 const SmallPtrSet<const SCEV *, 16> &CurRegs,
2026 DenseSet<const SCEV *> &VisitedRegs) const;
2027 void Solve(SmallVectorImpl<const Formula *> &Solution) const;
2028
2029 BasicBlock::iterator
2030 HoistInsertPosition(BasicBlock::iterator IP,
2031 const SmallVectorImpl<Instruction *> &Inputs) const;
2032 BasicBlock::iterator
2033 AdjustInsertPositionForExpand(BasicBlock::iterator IP,
2034 const LSRFixup &LF,
2035 const LSRUse &LU,
2036 SCEVExpander &Rewriter) const;
2037
2038 Value *Expand(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
2039 BasicBlock::iterator IP, SCEVExpander &Rewriter,
2040 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2041 void RewriteForPHI(PHINode *PN, const LSRUse &LU, const LSRFixup &LF,
2042 const Formula &F, SCEVExpander &Rewriter,
2043 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2044 void Rewrite(const LSRUse &LU, const LSRFixup &LF, const Formula &F,
2045 SCEVExpander &Rewriter,
2046 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const;
2047 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution);
2048
2049 public:
2050 LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE, DominatorTree &DT,
2051 LoopInfo &LI, const TargetTransformInfo &TTI, AssumptionCache &AC,
2052 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU);
2053
getChanged() const2054 bool getChanged() const { return Changed; }
2055
2056 void print_factors_and_types(raw_ostream &OS) const;
2057 void print_fixups(raw_ostream &OS) const;
2058 void print_uses(raw_ostream &OS) const;
2059 void print(raw_ostream &OS) const;
2060 void dump() const;
2061 };
2062
2063 } // end anonymous namespace
2064
2065 /// If IV is used in a int-to-float cast inside the loop then try to eliminate
2066 /// the cast operation.
OptimizeShadowIV()2067 void LSRInstance::OptimizeShadowIV() {
2068 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2069 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2070 return;
2071
2072 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
2073 UI != E; /* empty */) {
2074 IVUsers::const_iterator CandidateUI = UI;
2075 ++UI;
2076 Instruction *ShadowUse = CandidateUI->getUser();
2077 Type *DestTy = nullptr;
2078 bool IsSigned = false;
2079
2080 /* If shadow use is a int->float cast then insert a second IV
2081 to eliminate this cast.
2082
2083 for (unsigned i = 0; i < n; ++i)
2084 foo((double)i);
2085
2086 is transformed into
2087
2088 double d = 0.0;
2089 for (unsigned i = 0; i < n; ++i, ++d)
2090 foo(d);
2091 */
2092 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) {
2093 IsSigned = false;
2094 DestTy = UCast->getDestTy();
2095 }
2096 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) {
2097 IsSigned = true;
2098 DestTy = SCast->getDestTy();
2099 }
2100 if (!DestTy) continue;
2101
2102 // If target does not support DestTy natively then do not apply
2103 // this transformation.
2104 if (!TTI.isTypeLegal(DestTy)) continue;
2105
2106 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2107 if (!PH) continue;
2108 if (PH->getNumIncomingValues() != 2) continue;
2109
2110 // If the calculation in integers overflows, the result in FP type will
2111 // differ. So we only can do this transformation if we are guaranteed to not
2112 // deal with overflowing values
2113 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PH));
2114 if (!AR) continue;
2115 if (IsSigned && !AR->hasNoSignedWrap()) continue;
2116 if (!IsSigned && !AR->hasNoUnsignedWrap()) continue;
2117
2118 Type *SrcTy = PH->getType();
2119 int Mantissa = DestTy->getFPMantissaWidth();
2120 if (Mantissa == -1) continue;
2121 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
2122 continue;
2123
2124 unsigned Entry, Latch;
2125 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2126 Entry = 0;
2127 Latch = 1;
2128 } else {
2129 Entry = 1;
2130 Latch = 0;
2131 }
2132
2133 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2134 if (!Init) continue;
2135 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
2136 (double)Init->getSExtValue() :
2137 (double)Init->getZExtValue());
2138
2139 BinaryOperator *Incr =
2140 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2141 if (!Incr) continue;
2142 if (Incr->getOpcode() != Instruction::Add
2143 && Incr->getOpcode() != Instruction::Sub)
2144 continue;
2145
2146 /* Initialize new IV, double d = 0.0 in above example. */
2147 ConstantInt *C = nullptr;
2148 if (Incr->getOperand(0) == PH)
2149 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2150 else if (Incr->getOperand(1) == PH)
2151 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2152 else
2153 continue;
2154
2155 if (!C) continue;
2156
2157 // Ignore negative constants, as the code below doesn't handle them
2158 // correctly. TODO: Remove this restriction.
2159 if (!C->getValue().isStrictlyPositive()) continue;
2160
2161 /* Add new PHINode. */
2162 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH);
2163
2164 /* create new increment. '++d' in above example. */
2165 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2166 BinaryOperator *NewIncr =
2167 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2168 Instruction::FAdd : Instruction::FSub,
2169 NewPH, CFP, "IV.S.next.", Incr);
2170
2171 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2172 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2173
2174 /* Remove cast operation */
2175 ShadowUse->replaceAllUsesWith(NewPH);
2176 ShadowUse->eraseFromParent();
2177 Changed = true;
2178 break;
2179 }
2180 }
2181
2182 /// If Cond has an operand that is an expression of an IV, set the IV user and
2183 /// stride information and return true, otherwise return false.
FindIVUserForCond(ICmpInst * Cond,IVStrideUse * & CondUse)2184 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
2185 for (IVStrideUse &U : IU)
2186 if (U.getUser() == Cond) {
2187 // NOTE: we could handle setcc instructions with multiple uses here, but
2188 // InstCombine does it as well for simple uses, it's not clear that it
2189 // occurs enough in real life to handle.
2190 CondUse = &U;
2191 return true;
2192 }
2193 return false;
2194 }
2195
2196 /// Rewrite the loop's terminating condition if it uses a max computation.
2197 ///
2198 /// This is a narrow solution to a specific, but acute, problem. For loops
2199 /// like this:
2200 ///
2201 /// i = 0;
2202 /// do {
2203 /// p[i] = 0.0;
2204 /// } while (++i < n);
2205 ///
2206 /// the trip count isn't just 'n', because 'n' might not be positive. And
2207 /// unfortunately this can come up even for loops where the user didn't use
2208 /// a C do-while loop. For example, seemingly well-behaved top-test loops
2209 /// will commonly be lowered like this:
2210 ///
2211 /// if (n > 0) {
2212 /// i = 0;
2213 /// do {
2214 /// p[i] = 0.0;
2215 /// } while (++i < n);
2216 /// }
2217 ///
2218 /// and then it's possible for subsequent optimization to obscure the if
2219 /// test in such a way that indvars can't find it.
2220 ///
2221 /// When indvars can't find the if test in loops like this, it creates a
2222 /// max expression, which allows it to give the loop a canonical
2223 /// induction variable:
2224 ///
2225 /// i = 0;
2226 /// max = n < 1 ? 1 : n;
2227 /// do {
2228 /// p[i] = 0.0;
2229 /// } while (++i != max);
2230 ///
2231 /// Canonical induction variables are necessary because the loop passes
2232 /// are designed around them. The most obvious example of this is the
2233 /// LoopInfo analysis, which doesn't remember trip count values. It
2234 /// expects to be able to rediscover the trip count each time it is
2235 /// needed, and it does this using a simple analysis that only succeeds if
2236 /// the loop has a canonical induction variable.
2237 ///
2238 /// However, when it comes time to generate code, the maximum operation
2239 /// can be quite costly, especially if it's inside of an outer loop.
2240 ///
2241 /// This function solves this problem by detecting this type of loop and
2242 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2243 /// the instructions for the maximum computation.
OptimizeMax(ICmpInst * Cond,IVStrideUse * & CondUse)2244 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
2245 // Check that the loop matches the pattern we're looking for.
2246 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2247 Cond->getPredicate() != CmpInst::ICMP_NE)
2248 return Cond;
2249
2250 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2251 if (!Sel || !Sel->hasOneUse()) return Cond;
2252
2253 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2254 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2255 return Cond;
2256 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
2257
2258 // Add one to the backedge-taken count to get the trip count.
2259 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
2260 if (IterationCount != SE.getSCEV(Sel)) return Cond;
2261
2262 // Check for a max calculation that matches the pattern. There's no check
2263 // for ICMP_ULE here because the comparison would be with zero, which
2264 // isn't interesting.
2265 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
2266 const SCEVNAryExpr *Max = nullptr;
2267 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
2268 Pred = ICmpInst::ICMP_SLE;
2269 Max = S;
2270 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
2271 Pred = ICmpInst::ICMP_SLT;
2272 Max = S;
2273 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
2274 Pred = ICmpInst::ICMP_ULT;
2275 Max = U;
2276 } else {
2277 // No match; bail.
2278 return Cond;
2279 }
2280
2281 // To handle a max with more than two operands, this optimization would
2282 // require additional checking and setup.
2283 if (Max->getNumOperands() != 2)
2284 return Cond;
2285
2286 const SCEV *MaxLHS = Max->getOperand(0);
2287 const SCEV *MaxRHS = Max->getOperand(1);
2288
2289 // ScalarEvolution canonicalizes constants to the left. For < and >, look
2290 // for a comparison with 1. For <= and >=, a comparison with zero.
2291 if (!MaxLHS ||
2292 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
2293 return Cond;
2294
2295 // Check the relevant induction variable for conformance to
2296 // the pattern.
2297 const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
2298 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2299 if (!AR || !AR->isAffine() ||
2300 AR->getStart() != One ||
2301 AR->getStepRecurrence(SE) != One)
2302 return Cond;
2303
2304 assert(AR->getLoop() == L &&
2305 "Loop condition operand is an addrec in a different loop!");
2306
2307 // Check the right operand of the select, and remember it, as it will
2308 // be used in the new comparison instruction.
2309 Value *NewRHS = nullptr;
2310 if (ICmpInst::isTrueWhenEqual(Pred)) {
2311 // Look for n+1, and grab n.
2312 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
2313 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2314 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2315 NewRHS = BO->getOperand(0);
2316 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
2317 if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1)))
2318 if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS)
2319 NewRHS = BO->getOperand(0);
2320 if (!NewRHS)
2321 return Cond;
2322 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
2323 NewRHS = Sel->getOperand(1);
2324 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
2325 NewRHS = Sel->getOperand(2);
2326 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
2327 NewRHS = SU->getValue();
2328 else
2329 // Max doesn't match expected pattern.
2330 return Cond;
2331
2332 // Determine the new comparison opcode. It may be signed or unsigned,
2333 // and the original comparison may be either equality or inequality.
2334 if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2335 Pred = CmpInst::getInversePredicate(Pred);
2336
2337 // Ok, everything looks ok to change the condition into an SLT or SGE and
2338 // delete the max calculation.
2339 ICmpInst *NewCond =
2340 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2341
2342 // Delete the max calculation instructions.
2343 Cond->replaceAllUsesWith(NewCond);
2344 CondUse->setUser(NewCond);
2345 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2346 Cond->eraseFromParent();
2347 Sel->eraseFromParent();
2348 if (Cmp->use_empty())
2349 Cmp->eraseFromParent();
2350 return NewCond;
2351 }
2352
2353 /// Change loop terminating condition to use the postinc iv when possible.
2354 void
OptimizeLoopTermCond()2355 LSRInstance::OptimizeLoopTermCond() {
2356 SmallPtrSet<Instruction *, 4> PostIncs;
2357
2358 // We need a different set of heuristics for rotated and non-rotated loops.
2359 // If a loop is rotated then the latch is also the backedge, so inserting
2360 // post-inc expressions just before the latch is ideal. To reduce live ranges
2361 // it also makes sense to rewrite terminating conditions to use post-inc
2362 // expressions.
2363 //
2364 // If the loop is not rotated then the latch is not a backedge; the latch
2365 // check is done in the loop head. Adding post-inc expressions before the
2366 // latch will cause overlapping live-ranges of pre-inc and post-inc expressions
2367 // in the loop body. In this case we do *not* want to use post-inc expressions
2368 // in the latch check, and we want to insert post-inc expressions before
2369 // the backedge.
2370 BasicBlock *LatchBlock = L->getLoopLatch();
2371 SmallVector<BasicBlock*, 8> ExitingBlocks;
2372 L->getExitingBlocks(ExitingBlocks);
2373 if (llvm::all_of(ExitingBlocks, [&LatchBlock](const BasicBlock *BB) {
2374 return LatchBlock != BB;
2375 })) {
2376 // The backedge doesn't exit the loop; treat this as a head-tested loop.
2377 IVIncInsertPos = LatchBlock->getTerminator();
2378 return;
2379 }
2380
2381 // Otherwise treat this as a rotated loop.
2382 for (BasicBlock *ExitingBlock : ExitingBlocks) {
2383 // Get the terminating condition for the loop if possible. If we
2384 // can, we want to change it to use a post-incremented version of its
2385 // induction variable, to allow coalescing the live ranges for the IV into
2386 // one register value.
2387
2388 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2389 if (!TermBr)
2390 continue;
2391 // FIXME: Overly conservative, termination condition could be an 'or' etc..
2392 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2393 continue;
2394
2395 // Search IVUsesByStride to find Cond's IVUse if there is one.
2396 IVStrideUse *CondUse = nullptr;
2397 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2398 if (!FindIVUserForCond(Cond, CondUse))
2399 continue;
2400
2401 // If the trip count is computed in terms of a max (due to ScalarEvolution
2402 // being unable to find a sufficient guard, for example), change the loop
2403 // comparison to use SLT or ULT instead of NE.
2404 // One consequence of doing this now is that it disrupts the count-down
2405 // optimization. That's not always a bad thing though, because in such
2406 // cases it may still be worthwhile to avoid a max.
2407 Cond = OptimizeMax(Cond, CondUse);
2408
2409 // If this exiting block dominates the latch block, it may also use
2410 // the post-inc value if it won't be shared with other uses.
2411 // Check for dominance.
2412 if (!DT.dominates(ExitingBlock, LatchBlock))
2413 continue;
2414
2415 // Conservatively avoid trying to use the post-inc value in non-latch
2416 // exits if there may be pre-inc users in intervening blocks.
2417 if (LatchBlock != ExitingBlock)
2418 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
2419 // Test if the use is reachable from the exiting block. This dominator
2420 // query is a conservative approximation of reachability.
2421 if (&*UI != CondUse &&
2422 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
2423 // Conservatively assume there may be reuse if the quotient of their
2424 // strides could be a legal scale.
2425 const SCEV *A = IU.getStride(*CondUse, L);
2426 const SCEV *B = IU.getStride(*UI, L);
2427 if (!A || !B) continue;
2428 if (SE.getTypeSizeInBits(A->getType()) !=
2429 SE.getTypeSizeInBits(B->getType())) {
2430 if (SE.getTypeSizeInBits(A->getType()) >
2431 SE.getTypeSizeInBits(B->getType()))
2432 B = SE.getSignExtendExpr(B, A->getType());
2433 else
2434 A = SE.getSignExtendExpr(A, B->getType());
2435 }
2436 if (const SCEVConstant *D =
2437 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
2438 const ConstantInt *C = D->getValue();
2439 // Stride of one or negative one can have reuse with non-addresses.
2440 if (C->isOne() || C->isMinusOne())
2441 goto decline_post_inc;
2442 // Avoid weird situations.
2443 if (C->getValue().getMinSignedBits() >= 64 ||
2444 C->getValue().isMinSignedValue())
2445 goto decline_post_inc;
2446 // Check for possible scaled-address reuse.
2447 if (isAddressUse(TTI, UI->getUser(), UI->getOperandValToReplace())) {
2448 MemAccessTy AccessTy = getAccessType(
2449 TTI, UI->getUser(), UI->getOperandValToReplace());
2450 int64_t Scale = C->getSExtValue();
2451 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr,
2452 /*BaseOffset=*/0,
2453 /*HasBaseReg=*/false, Scale,
2454 AccessTy.AddrSpace))
2455 goto decline_post_inc;
2456 Scale = -Scale;
2457 if (TTI.isLegalAddressingMode(AccessTy.MemTy, /*BaseGV=*/nullptr,
2458 /*BaseOffset=*/0,
2459 /*HasBaseReg=*/false, Scale,
2460 AccessTy.AddrSpace))
2461 goto decline_post_inc;
2462 }
2463 }
2464 }
2465
2466 LLVM_DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
2467 << *Cond << '\n');
2468
2469 // It's possible for the setcc instruction to be anywhere in the loop, and
2470 // possible for it to have multiple users. If it is not immediately before
2471 // the exiting block branch, move it.
2472 if (&*++BasicBlock::iterator(Cond) != TermBr) {
2473 if (Cond->hasOneUse()) {
2474 Cond->moveBefore(TermBr);
2475 } else {
2476 // Clone the terminating condition and insert into the loopend.
2477 ICmpInst *OldCond = Cond;
2478 Cond = cast<ICmpInst>(Cond->clone());
2479 Cond->setName(L->getHeader()->getName() + ".termcond");
2480 ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond);
2481
2482 // Clone the IVUse, as the old use still exists!
2483 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
2484 TermBr->replaceUsesOfWith(OldCond, Cond);
2485 }
2486 }
2487
2488 // If we get to here, we know that we can transform the setcc instruction to
2489 // use the post-incremented version of the IV, allowing us to coalesce the
2490 // live ranges for the IV correctly.
2491 CondUse->transformToPostInc(L);
2492 Changed = true;
2493
2494 PostIncs.insert(Cond);
2495 decline_post_inc:;
2496 }
2497
2498 // Determine an insertion point for the loop induction variable increment. It
2499 // must dominate all the post-inc comparisons we just set up, and it must
2500 // dominate the loop latch edge.
2501 IVIncInsertPos = L->getLoopLatch()->getTerminator();
2502 for (Instruction *Inst : PostIncs) {
2503 BasicBlock *BB =
2504 DT.findNearestCommonDominator(IVIncInsertPos->getParent(),
2505 Inst->getParent());
2506 if (BB == Inst->getParent())
2507 IVIncInsertPos = Inst;
2508 else if (BB != IVIncInsertPos->getParent())
2509 IVIncInsertPos = BB->getTerminator();
2510 }
2511 }
2512
2513 /// Determine if the given use can accommodate a fixup at the given offset and
2514 /// other details. If so, update the use and return true.
reconcileNewOffset(LSRUse & LU,int64_t NewOffset,bool HasBaseReg,LSRUse::KindType Kind,MemAccessTy AccessTy)2515 bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
2516 bool HasBaseReg, LSRUse::KindType Kind,
2517 MemAccessTy AccessTy) {
2518 int64_t NewMinOffset = LU.MinOffset;
2519 int64_t NewMaxOffset = LU.MaxOffset;
2520 MemAccessTy NewAccessTy = AccessTy;
2521
2522 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
2523 // something conservative, however this can pessimize in the case that one of
2524 // the uses will have all its uses outside the loop, for example.
2525 if (LU.Kind != Kind)
2526 return false;
2527
2528 // Check for a mismatched access type, and fall back conservatively as needed.
2529 // TODO: Be less conservative when the type is similar and can use the same
2530 // addressing modes.
2531 if (Kind == LSRUse::Address) {
2532 if (AccessTy.MemTy != LU.AccessTy.MemTy) {
2533 NewAccessTy = MemAccessTy::getUnknown(AccessTy.MemTy->getContext(),
2534 AccessTy.AddrSpace);
2535 }
2536 }
2537
2538 // Conservatively assume HasBaseReg is true for now.
2539 if (NewOffset < LU.MinOffset) {
2540 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2541 LU.MaxOffset - NewOffset, HasBaseReg))
2542 return false;
2543 NewMinOffset = NewOffset;
2544 } else if (NewOffset > LU.MaxOffset) {
2545 if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr,
2546 NewOffset - LU.MinOffset, HasBaseReg))
2547 return false;
2548 NewMaxOffset = NewOffset;
2549 }
2550
2551 // Update the use.
2552 LU.MinOffset = NewMinOffset;
2553 LU.MaxOffset = NewMaxOffset;
2554 LU.AccessTy = NewAccessTy;
2555 return true;
2556 }
2557
2558 /// Return an LSRUse index and an offset value for a fixup which needs the given
2559 /// expression, with the given kind and optional access type. Either reuse an
2560 /// existing use or create a new one, as needed.
getUse(const SCEV * & Expr,LSRUse::KindType Kind,MemAccessTy AccessTy)2561 std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr,
2562 LSRUse::KindType Kind,
2563 MemAccessTy AccessTy) {
2564 const SCEV *Copy = Expr;
2565 int64_t Offset = ExtractImmediate(Expr, SE);
2566
2567 // Basic uses can't accept any offset, for example.
2568 if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
2569 Offset, /*HasBaseReg=*/ true)) {
2570 Expr = Copy;
2571 Offset = 0;
2572 }
2573
2574 std::pair<UseMapTy::iterator, bool> P =
2575 UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0));
2576 if (!P.second) {
2577 // A use already existed with this base.
2578 size_t LUIdx = P.first->second;
2579 LSRUse &LU = Uses[LUIdx];
2580 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
2581 // Reuse this use.
2582 return std::make_pair(LUIdx, Offset);
2583 }
2584
2585 // Create a new use.
2586 size_t LUIdx = Uses.size();
2587 P.first->second = LUIdx;
2588 Uses.push_back(LSRUse(Kind, AccessTy));
2589 LSRUse &LU = Uses[LUIdx];
2590
2591 LU.MinOffset = Offset;
2592 LU.MaxOffset = Offset;
2593 return std::make_pair(LUIdx, Offset);
2594 }
2595
2596 /// Delete the given use from the Uses list.
DeleteUse(LSRUse & LU,size_t LUIdx)2597 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
2598 if (&LU != &Uses.back())
2599 std::swap(LU, Uses.back());
2600 Uses.pop_back();
2601
2602 // Update RegUses.
2603 RegUses.swapAndDropUse(LUIdx, Uses.size());
2604 }
2605
2606 /// Look for a use distinct from OrigLU which is has a formula that has the same
2607 /// registers as the given formula.
2608 LSRUse *
FindUseWithSimilarFormula(const Formula & OrigF,const LSRUse & OrigLU)2609 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
2610 const LSRUse &OrigLU) {
2611 // Search all uses for the formula. This could be more clever.
2612 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2613 LSRUse &LU = Uses[LUIdx];
2614 // Check whether this use is close enough to OrigLU, to see whether it's
2615 // worthwhile looking through its formulae.
2616 // Ignore ICmpZero uses because they may contain formulae generated by
2617 // GenerateICmpZeroScales, in which case adding fixup offsets may
2618 // be invalid.
2619 if (&LU != &OrigLU &&
2620 LU.Kind != LSRUse::ICmpZero &&
2621 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
2622 LU.WidestFixupType == OrigLU.WidestFixupType &&
2623 LU.HasFormulaWithSameRegs(OrigF)) {
2624 // Scan through this use's formulae.
2625 for (const Formula &F : LU.Formulae) {
2626 // Check to see if this formula has the same registers and symbols
2627 // as OrigF.
2628 if (F.BaseRegs == OrigF.BaseRegs &&
2629 F.ScaledReg == OrigF.ScaledReg &&
2630 F.BaseGV == OrigF.BaseGV &&
2631 F.Scale == OrigF.Scale &&
2632 F.UnfoldedOffset == OrigF.UnfoldedOffset) {
2633 if (F.BaseOffset == 0)
2634 return &LU;
2635 // This is the formula where all the registers and symbols matched;
2636 // there aren't going to be any others. Since we declined it, we
2637 // can skip the rest of the formulae and proceed to the next LSRUse.
2638 break;
2639 }
2640 }
2641 }
2642 }
2643
2644 // Nothing looked good.
2645 return nullptr;
2646 }
2647
CollectInterestingTypesAndFactors()2648 void LSRInstance::CollectInterestingTypesAndFactors() {
2649 SmallSetVector<const SCEV *, 4> Strides;
2650
2651 // Collect interesting types and strides.
2652 SmallVector<const SCEV *, 4> Worklist;
2653 for (const IVStrideUse &U : IU) {
2654 const SCEV *Expr = IU.getExpr(U);
2655
2656 // Collect interesting types.
2657 Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
2658
2659 // Add strides for mentioned loops.
2660 Worklist.push_back(Expr);
2661 do {
2662 const SCEV *S = Worklist.pop_back_val();
2663 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2664 if (AR->getLoop() == L)
2665 Strides.insert(AR->getStepRecurrence(SE));
2666 Worklist.push_back(AR->getStart());
2667 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2668 Worklist.append(Add->op_begin(), Add->op_end());
2669 }
2670 } while (!Worklist.empty());
2671 }
2672
2673 // Compute interesting factors from the set of interesting strides.
2674 for (SmallSetVector<const SCEV *, 4>::const_iterator
2675 I = Strides.begin(), E = Strides.end(); I != E; ++I)
2676 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
2677 std::next(I); NewStrideIter != E; ++NewStrideIter) {
2678 const SCEV *OldStride = *I;
2679 const SCEV *NewStride = *NewStrideIter;
2680
2681 if (SE.getTypeSizeInBits(OldStride->getType()) !=
2682 SE.getTypeSizeInBits(NewStride->getType())) {
2683 if (SE.getTypeSizeInBits(OldStride->getType()) >
2684 SE.getTypeSizeInBits(NewStride->getType()))
2685 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
2686 else
2687 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
2688 }
2689 if (const SCEVConstant *Factor =
2690 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
2691 SE, true))) {
2692 if (Factor->getAPInt().getMinSignedBits() <= 64)
2693 Factors.insert(Factor->getAPInt().getSExtValue());
2694 } else if (const SCEVConstant *Factor =
2695 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride,
2696 NewStride,
2697 SE, true))) {
2698 if (Factor->getAPInt().getMinSignedBits() <= 64)
2699 Factors.insert(Factor->getAPInt().getSExtValue());
2700 }
2701 }
2702
2703 // If all uses use the same type, don't bother looking for truncation-based
2704 // reuse.
2705 if (Types.size() == 1)
2706 Types.clear();
2707
2708 LLVM_DEBUG(print_factors_and_types(dbgs()));
2709 }
2710
2711 /// Helper for CollectChains that finds an IV operand (computed by an AddRec in
2712 /// this loop) within [OI,OE) or returns OE. If IVUsers mapped Instructions to
2713 /// IVStrideUses, we could partially skip this.
2714 static User::op_iterator
findIVOperand(User::op_iterator OI,User::op_iterator OE,Loop * L,ScalarEvolution & SE)2715 findIVOperand(User::op_iterator OI, User::op_iterator OE,
2716 Loop *L, ScalarEvolution &SE) {
2717 for(; OI != OE; ++OI) {
2718 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) {
2719 if (!SE.isSCEVable(Oper->getType()))
2720 continue;
2721
2722 if (const SCEVAddRecExpr *AR =
2723 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) {
2724 if (AR->getLoop() == L)
2725 break;
2726 }
2727 }
2728 }
2729 return OI;
2730 }
2731
2732 /// IVChain logic must consistently peek base TruncInst operands, so wrap it in
2733 /// a convenient helper.
getWideOperand(Value * Oper)2734 static Value *getWideOperand(Value *Oper) {
2735 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper))
2736 return Trunc->getOperand(0);
2737 return Oper;
2738 }
2739
2740 /// Return true if we allow an IV chain to include both types.
isCompatibleIVType(Value * LVal,Value * RVal)2741 static bool isCompatibleIVType(Value *LVal, Value *RVal) {
2742 Type *LType = LVal->getType();
2743 Type *RType = RVal->getType();
2744 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy() &&
2745 // Different address spaces means (possibly)
2746 // different types of the pointer implementation,
2747 // e.g. i16 vs i32 so disallow that.
2748 (LType->getPointerAddressSpace() ==
2749 RType->getPointerAddressSpace()));
2750 }
2751
2752 /// Return an approximation of this SCEV expression's "base", or NULL for any
2753 /// constant. Returning the expression itself is conservative. Returning a
2754 /// deeper subexpression is more precise and valid as long as it isn't less
2755 /// complex than another subexpression. For expressions involving multiple
2756 /// unscaled values, we need to return the pointer-type SCEVUnknown. This avoids
2757 /// forming chains across objects, such as: PrevOper==a[i], IVOper==b[i],
2758 /// IVInc==b-a.
2759 ///
2760 /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost
2761 /// SCEVUnknown, we simply return the rightmost SCEV operand.
getExprBase(const SCEV * S)2762 static const SCEV *getExprBase(const SCEV *S) {
2763 switch (S->getSCEVType()) {
2764 default: // uncluding scUnknown.
2765 return S;
2766 case scConstant:
2767 return nullptr;
2768 case scTruncate:
2769 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
2770 case scZeroExtend:
2771 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand());
2772 case scSignExtend:
2773 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand());
2774 case scAddExpr: {
2775 // Skip over scaled operands (scMulExpr) to follow add operands as long as
2776 // there's nothing more complex.
2777 // FIXME: not sure if we want to recognize negation.
2778 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S);
2779 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()),
2780 E(Add->op_begin()); I != E; ++I) {
2781 const SCEV *SubExpr = *I;
2782 if (SubExpr->getSCEVType() == scAddExpr)
2783 return getExprBase(SubExpr);
2784
2785 if (SubExpr->getSCEVType() != scMulExpr)
2786 return SubExpr;
2787 }
2788 return S; // all operands are scaled, be conservative.
2789 }
2790 case scAddRecExpr:
2791 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart());
2792 }
2793 llvm_unreachable("Unknown SCEV kind!");
2794 }
2795
2796 /// Return true if the chain increment is profitable to expand into a loop
2797 /// invariant value, which may require its own register. A profitable chain
2798 /// increment will be an offset relative to the same base. We allow such offsets
2799 /// to potentially be used as chain increment as long as it's not obviously
2800 /// expensive to expand using real instructions.
isProfitableIncrement(const SCEV * OperExpr,const SCEV * IncExpr,ScalarEvolution & SE)2801 bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
2802 const SCEV *IncExpr,
2803 ScalarEvolution &SE) {
2804 // Aggressively form chains when -stress-ivchain.
2805 if (StressIVChain)
2806 return true;
2807
2808 // Do not replace a constant offset from IV head with a nonconstant IV
2809 // increment.
2810 if (!isa<SCEVConstant>(IncExpr)) {
2811 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand));
2812 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr)))
2813 return false;
2814 }
2815
2816 SmallPtrSet<const SCEV*, 8> Processed;
2817 return !isHighCostExpansion(IncExpr, Processed, SE);
2818 }
2819
2820 /// Return true if the number of registers needed for the chain is estimated to
2821 /// be less than the number required for the individual IV users. First prohibit
2822 /// any IV users that keep the IV live across increments (the Users set should
2823 /// be empty). Next count the number and type of increments in the chain.
2824 ///
2825 /// Chaining IVs can lead to considerable code bloat if ISEL doesn't
2826 /// effectively use postinc addressing modes. Only consider it profitable it the
2827 /// increments can be computed in fewer registers when chained.
2828 ///
2829 /// TODO: Consider IVInc free if it's already used in another chains.
isProfitableChain(IVChain & Chain,SmallPtrSetImpl<Instruction * > & Users,ScalarEvolution & SE,const TargetTransformInfo & TTI)2830 static bool isProfitableChain(IVChain &Chain,
2831 SmallPtrSetImpl<Instruction *> &Users,
2832 ScalarEvolution &SE,
2833 const TargetTransformInfo &TTI) {
2834 if (StressIVChain)
2835 return true;
2836
2837 if (!Chain.hasIncs())
2838 return false;
2839
2840 if (!Users.empty()) {
2841 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n";
2842 for (Instruction *Inst
2843 : Users) { dbgs() << " " << *Inst << "\n"; });
2844 return false;
2845 }
2846 assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
2847
2848 // The chain itself may require a register, so intialize cost to 1.
2849 int cost = 1;
2850
2851 // A complete chain likely eliminates the need for keeping the original IV in
2852 // a register. LSR does not currently know how to form a complete chain unless
2853 // the header phi already exists.
2854 if (isa<PHINode>(Chain.tailUserInst())
2855 && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
2856 --cost;
2857 }
2858 const SCEV *LastIncExpr = nullptr;
2859 unsigned NumConstIncrements = 0;
2860 unsigned NumVarIncrements = 0;
2861 unsigned NumReusedIncrements = 0;
2862
2863 // If any LSRUse in the chain is marked as profitable by target, mark this
2864 // chain as profitable.
2865 for (const IVInc &Inc : Chain.Incs)
2866 if (TTI.isProfitableLSRChainElement(Inc.UserInst))
2867 return true;
2868
2869 // If number of registers is not the major cost, we cannot benefit from this
2870 // profitable chain which is based on number of registers.
2871 // FIXME: add profitable chain optimization for other kinds major cost, for
2872 // example number of instructions.
2873 if (!TTI.isNumRegsMajorCostOfLSR())
2874 return false;
2875
2876 for (const IVInc &Inc : Chain) {
2877 if (Inc.IncExpr->isZero())
2878 continue;
2879
2880 // Incrementing by zero or some constant is neutral. We assume constants can
2881 // be folded into an addressing mode or an add's immediate operand.
2882 if (isa<SCEVConstant>(Inc.IncExpr)) {
2883 ++NumConstIncrements;
2884 continue;
2885 }
2886
2887 if (Inc.IncExpr == LastIncExpr)
2888 ++NumReusedIncrements;
2889 else
2890 ++NumVarIncrements;
2891
2892 LastIncExpr = Inc.IncExpr;
2893 }
2894 // An IV chain with a single increment is handled by LSR's postinc
2895 // uses. However, a chain with multiple increments requires keeping the IV's
2896 // value live longer than it needs to be if chained.
2897 if (NumConstIncrements > 1)
2898 --cost;
2899
2900 // Materializing increment expressions in the preheader that didn't exist in
2901 // the original code may cost a register. For example, sign-extended array
2902 // indices can produce ridiculous increments like this:
2903 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
2904 cost += NumVarIncrements;
2905
2906 // Reusing variable increments likely saves a register to hold the multiple of
2907 // the stride.
2908 cost -= NumReusedIncrements;
2909
2910 LLVM_DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost
2911 << "\n");
2912
2913 return cost < 0;
2914 }
2915
2916 /// Add this IV user to an existing chain or make it the head of a new chain.
ChainInstruction(Instruction * UserInst,Instruction * IVOper,SmallVectorImpl<ChainUsers> & ChainUsersVec)2917 void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
2918 SmallVectorImpl<ChainUsers> &ChainUsersVec) {
2919 // When IVs are used as types of varying widths, they are generally converted
2920 // to a wider type with some uses remaining narrow under a (free) trunc.
2921 Value *const NextIV = getWideOperand(IVOper);
2922 const SCEV *const OperExpr = SE.getSCEV(NextIV);
2923 const SCEV *const OperExprBase = getExprBase(OperExpr);
2924
2925 // Visit all existing chains. Check if its IVOper can be computed as a
2926 // profitable loop invariant increment from the last link in the Chain.
2927 unsigned ChainIdx = 0, NChains = IVChainVec.size();
2928 const SCEV *LastIncExpr = nullptr;
2929 for (; ChainIdx < NChains; ++ChainIdx) {
2930 IVChain &Chain = IVChainVec[ChainIdx];
2931
2932 // Prune the solution space aggressively by checking that both IV operands
2933 // are expressions that operate on the same unscaled SCEVUnknown. This
2934 // "base" will be canceled by the subsequent getMinusSCEV call. Checking
2935 // first avoids creating extra SCEV expressions.
2936 if (!StressIVChain && Chain.ExprBase != OperExprBase)
2937 continue;
2938
2939 Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand);
2940 if (!isCompatibleIVType(PrevIV, NextIV))
2941 continue;
2942
2943 // A phi node terminates a chain.
2944 if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst()))
2945 continue;
2946
2947 // The increment must be loop-invariant so it can be kept in a register.
2948 const SCEV *PrevExpr = SE.getSCEV(PrevIV);
2949 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr);
2950 if (!SE.isLoopInvariant(IncExpr, L))
2951 continue;
2952
2953 if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) {
2954 LastIncExpr = IncExpr;
2955 break;
2956 }
2957 }
2958 // If we haven't found a chain, create a new one, unless we hit the max. Don't
2959 // bother for phi nodes, because they must be last in the chain.
2960 if (ChainIdx == NChains) {
2961 if (isa<PHINode>(UserInst))
2962 return;
2963 if (NChains >= MaxChains && !StressIVChain) {
2964 LLVM_DEBUG(dbgs() << "IV Chain Limit\n");
2965 return;
2966 }
2967 LastIncExpr = OperExpr;
2968 // IVUsers may have skipped over sign/zero extensions. We don't currently
2969 // attempt to form chains involving extensions unless they can be hoisted
2970 // into this loop's AddRec.
2971 if (!isa<SCEVAddRecExpr>(LastIncExpr))
2972 return;
2973 ++NChains;
2974 IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr),
2975 OperExprBase));
2976 ChainUsersVec.resize(NChains);
2977 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst
2978 << ") IV=" << *LastIncExpr << "\n");
2979 } else {
2980 LLVM_DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst
2981 << ") IV+" << *LastIncExpr << "\n");
2982 // Add this IV user to the end of the chain.
2983 IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr));
2984 }
2985 IVChain &Chain = IVChainVec[ChainIdx];
2986
2987 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers;
2988 // This chain's NearUsers become FarUsers.
2989 if (!LastIncExpr->isZero()) {
2990 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(),
2991 NearUsers.end());
2992 NearUsers.clear();
2993 }
2994
2995 // All other uses of IVOperand become near uses of the chain.
2996 // We currently ignore intermediate values within SCEV expressions, assuming
2997 // they will eventually be used be the current chain, or can be computed
2998 // from one of the chain increments. To be more precise we could
2999 // transitively follow its user and only add leaf IV users to the set.
3000 for (User *U : IVOper->users()) {
3001 Instruction *OtherUse = dyn_cast<Instruction>(U);
3002 if (!OtherUse)
3003 continue;
3004 // Uses in the chain will no longer be uses if the chain is formed.
3005 // Include the head of the chain in this iteration (not Chain.begin()).
3006 IVChain::const_iterator IncIter = Chain.Incs.begin();
3007 IVChain::const_iterator IncEnd = Chain.Incs.end();
3008 for( ; IncIter != IncEnd; ++IncIter) {
3009 if (IncIter->UserInst == OtherUse)
3010 break;
3011 }
3012 if (IncIter != IncEnd)
3013 continue;
3014
3015 if (SE.isSCEVable(OtherUse->getType())
3016 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse))
3017 && IU.isIVUserOrOperand(OtherUse)) {
3018 continue;
3019 }
3020 NearUsers.insert(OtherUse);
3021 }
3022
3023 // Since this user is part of the chain, it's no longer considered a use
3024 // of the chain.
3025 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst);
3026 }
3027
3028 /// Populate the vector of Chains.
3029 ///
3030 /// This decreases ILP at the architecture level. Targets with ample registers,
3031 /// multiple memory ports, and no register renaming probably don't want
3032 /// this. However, such targets should probably disable LSR altogether.
3033 ///
3034 /// The job of LSR is to make a reasonable choice of induction variables across
3035 /// the loop. Subsequent passes can easily "unchain" computation exposing more
3036 /// ILP *within the loop* if the target wants it.
3037 ///
3038 /// Finding the best IV chain is potentially a scheduling problem. Since LSR
3039 /// will not reorder memory operations, it will recognize this as a chain, but
3040 /// will generate redundant IV increments. Ideally this would be corrected later
3041 /// by a smart scheduler:
3042 /// = A[i]
3043 /// = A[i+x]
3044 /// A[i] =
3045 /// A[i+x] =
3046 ///
3047 /// TODO: Walk the entire domtree within this loop, not just the path to the
3048 /// loop latch. This will discover chains on side paths, but requires
3049 /// maintaining multiple copies of the Chains state.
CollectChains()3050 void LSRInstance::CollectChains() {
3051 LLVM_DEBUG(dbgs() << "Collecting IV Chains.\n");
3052 SmallVector<ChainUsers, 8> ChainUsersVec;
3053
3054 SmallVector<BasicBlock *,8> LatchPath;
3055 BasicBlock *LoopHeader = L->getHeader();
3056 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch());
3057 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) {
3058 LatchPath.push_back(Rung->getBlock());
3059 }
3060 LatchPath.push_back(LoopHeader);
3061
3062 // Walk the instruction stream from the loop header to the loop latch.
3063 for (BasicBlock *BB : reverse(LatchPath)) {
3064 for (Instruction &I : *BB) {
3065 // Skip instructions that weren't seen by IVUsers analysis.
3066 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(&I))
3067 continue;
3068
3069 // Ignore users that are part of a SCEV expression. This way we only
3070 // consider leaf IV Users. This effectively rediscovers a portion of
3071 // IVUsers analysis but in program order this time.
3072 if (SE.isSCEVable(I.getType()) && !isa<SCEVUnknown>(SE.getSCEV(&I)))
3073 continue;
3074
3075 // Remove this instruction from any NearUsers set it may be in.
3076 for (unsigned ChainIdx = 0, NChains = IVChainVec.size();
3077 ChainIdx < NChains; ++ChainIdx) {
3078 ChainUsersVec[ChainIdx].NearUsers.erase(&I);
3079 }
3080 // Search for operands that can be chained.
3081 SmallPtrSet<Instruction*, 4> UniqueOperands;
3082 User::op_iterator IVOpEnd = I.op_end();
3083 User::op_iterator IVOpIter = findIVOperand(I.op_begin(), IVOpEnd, L, SE);
3084 while (IVOpIter != IVOpEnd) {
3085 Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
3086 if (UniqueOperands.insert(IVOpInst).second)
3087 ChainInstruction(&I, IVOpInst, ChainUsersVec);
3088 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3089 }
3090 } // Continue walking down the instructions.
3091 } // Continue walking down the domtree.
3092 // Visit phi backedges to determine if the chain can generate the IV postinc.
3093 for (PHINode &PN : L->getHeader()->phis()) {
3094 if (!SE.isSCEVable(PN.getType()))
3095 continue;
3096
3097 Instruction *IncV =
3098 dyn_cast<Instruction>(PN.getIncomingValueForBlock(L->getLoopLatch()));
3099 if (IncV)
3100 ChainInstruction(&PN, IncV, ChainUsersVec);
3101 }
3102 // Remove any unprofitable chains.
3103 unsigned ChainIdx = 0;
3104 for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
3105 UsersIdx < NChains; ++UsersIdx) {
3106 if (!isProfitableChain(IVChainVec[UsersIdx],
3107 ChainUsersVec[UsersIdx].FarUsers, SE, TTI))
3108 continue;
3109 // Preserve the chain at UsesIdx.
3110 if (ChainIdx != UsersIdx)
3111 IVChainVec[ChainIdx] = IVChainVec[UsersIdx];
3112 FinalizeChain(IVChainVec[ChainIdx]);
3113 ++ChainIdx;
3114 }
3115 IVChainVec.resize(ChainIdx);
3116 }
3117
FinalizeChain(IVChain & Chain)3118 void LSRInstance::FinalizeChain(IVChain &Chain) {
3119 assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
3120 LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");
3121
3122 for (const IVInc &Inc : Chain) {
3123 LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n");
3124 auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand);
3125 assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand");
3126 IVIncSet.insert(UseI);
3127 }
3128 }
3129
3130 /// Return true if the IVInc can be folded into an addressing mode.
canFoldIVIncExpr(const SCEV * IncExpr,Instruction * UserInst,Value * Operand,const TargetTransformInfo & TTI)3131 static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
3132 Value *Operand, const TargetTransformInfo &TTI) {
3133 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
3134 if (!IncConst || !isAddressUse(TTI, UserInst, Operand))
3135 return false;
3136
3137 if (IncConst->getAPInt().getMinSignedBits() > 64)
3138 return false;
3139
3140 MemAccessTy AccessTy = getAccessType(TTI, UserInst, Operand);
3141 int64_t IncOffset = IncConst->getValue()->getSExtValue();
3142 if (!isAlwaysFoldable(TTI, LSRUse::Address, AccessTy, /*BaseGV=*/nullptr,
3143 IncOffset, /*HasBaseReg=*/false))
3144 return false;
3145
3146 return true;
3147 }
3148
3149 /// Generate an add or subtract for each IVInc in a chain to materialize the IV
3150 /// user's operand from the previous IV user's operand.
GenerateIVChain(const IVChain & Chain,SCEVExpander & Rewriter,SmallVectorImpl<WeakTrackingVH> & DeadInsts)3151 void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
3152 SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
3153 // Find the new IVOperand for the head of the chain. It may have been replaced
3154 // by LSR.
3155 const IVInc &Head = Chain.Incs[0];
3156 User::op_iterator IVOpEnd = Head.UserInst->op_end();
3157 // findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
3158 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
3159 IVOpEnd, L, SE);
3160 Value *IVSrc = nullptr;
3161 while (IVOpIter != IVOpEnd) {
3162 IVSrc = getWideOperand(*IVOpIter);
3163
3164 // If this operand computes the expression that the chain needs, we may use
3165 // it. (Check this after setting IVSrc which is used below.)
3166 //
3167 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too
3168 // narrow for the chain, so we can no longer use it. We do allow using a
3169 // wider phi, assuming the LSR checked for free truncation. In that case we
3170 // should already have a truncate on this operand such that
3171 // getSCEV(IVSrc) == IncExpr.
3172 if (SE.getSCEV(*IVOpIter) == Head.IncExpr
3173 || SE.getSCEV(IVSrc) == Head.IncExpr) {
3174 break;
3175 }
3176 IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
3177 }
3178 if (IVOpIter == IVOpEnd) {
3179 // Gracefully give up on this chain.
3180 LLVM_DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n");
3181 return;
3182 }
3183 assert(IVSrc && "Failed to find IV chain source");
3184
3185 LLVM_DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
3186 Type *IVTy = IVSrc->getType();
3187 Type *IntTy = SE.getEffectiveSCEVType(IVTy);
3188 const SCEV *LeftOverExpr = nullptr;
3189 for (const IVInc &Inc : Chain) {
3190 Instruction *InsertPt = Inc.UserInst;
3191 if (isa<PHINode>(InsertPt))
3192 InsertPt = L->getLoopLatch()->getTerminator();
3193
3194 // IVOper will replace the current IV User's operand. IVSrc is the IV
3195 // value currently held in a register.
3196 Value *IVOper = IVSrc;
3197 if (!Inc.IncExpr->isZero()) {
3198 // IncExpr was the result of subtraction of two narrow values, so must
3199 // be signed.
3200 const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy);
3201 LeftOverExpr = LeftOverExpr ?
3202 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr;
3203 }
3204 if (LeftOverExpr && !LeftOverExpr->isZero()) {
3205 // Expand the IV increment.
3206 Rewriter.clearPostInc();
3207 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt);
3208 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc),
3209 SE.getUnknown(IncV));
3210 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt);
3211
3212 // If an IV increment can't be folded, use it as the next IV value.
3213 if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) {
3214 assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
3215 IVSrc = IVOper;
3216 LeftOverExpr = nullptr;
3217 }
3218 }
3219 Type *OperTy = Inc.IVOperand->getType();
3220 if (IVTy != OperTy) {
3221 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) &&
3222 "cannot extend a chained IV");
3223 IRBuilder<> Builder(InsertPt);
3224 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain");
3225 }
3226 Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper);
3227 if (auto *OperandIsInstr = dyn_cast<Instruction>(Inc.IVOperand))
3228 DeadInsts.emplace_back(OperandIsInstr);
3229 }
3230 // If LSR created a new, wider phi, we may also replace its postinc. We only
3231 // do this if we also found a wide value for the head of the chain.
3232 if (isa<PHINode>(Chain.tailUserInst())) {
3233 for (PHINode &Phi : L->getHeader()->phis()) {
3234 if (!isCompatibleIVType(&Phi, IVSrc))
3235 continue;
3236 Instruction *PostIncV = dyn_cast<Instruction>(
3237 Phi.getIncomingValueForBlock(L->getLoopLatch()));
3238 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc)))
3239 continue;
3240 Value *IVOper = IVSrc;
3241 Type *PostIncTy = PostIncV->getType();
3242 if (IVTy != PostIncTy) {
3243 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types");
3244 IRBuilder<> Builder(L->getLoopLatch()->getTerminator());
3245 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc());
3246 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain");
3247 }
3248 Phi.replaceUsesOfWith(PostIncV, IVOper);
3249 DeadInsts.emplace_back(PostIncV);
3250 }
3251 }
3252 }
3253
CollectFixupsAndInitialFormulae()3254 void LSRInstance::CollectFixupsAndInitialFormulae() {
3255 BranchInst *ExitBranch = nullptr;
3256 bool SaveCmp = TTI.canSaveCmp(L, &ExitBranch, &SE, &LI, &DT, &AC, &TLI);
3257
3258 for (const IVStrideUse &U : IU) {
3259 Instruction *UserInst = U.getUser();
3260 // Skip IV users that are part of profitable IV Chains.
3261 User::op_iterator UseI =
3262 find(UserInst->operands(), U.getOperandValToReplace());
3263 assert(UseI != UserInst->op_end() && "cannot find IV operand");
3264 if (IVIncSet.count(UseI)) {
3265 LLVM_DEBUG(dbgs() << "Use is in profitable chain: " << **UseI << '\n');
3266 continue;
3267 }
3268
3269 LSRUse::KindType Kind = LSRUse::Basic;
3270 MemAccessTy AccessTy;
3271 if (isAddressUse(TTI, UserInst, U.getOperandValToReplace())) {
3272 Kind = LSRUse::Address;
3273 AccessTy = getAccessType(TTI, UserInst, U.getOperandValToReplace());
3274 }
3275
3276 const SCEV *S = IU.getExpr(U);
3277 PostIncLoopSet TmpPostIncLoops = U.getPostIncLoops();
3278
3279 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
3280 // (N - i == 0), and this allows (N - i) to be the expression that we work
3281 // with rather than just N or i, so we can consider the register
3282 // requirements for both N and i at the same time. Limiting this code to
3283 // equality icmps is not a problem because all interesting loops use
3284 // equality icmps, thanks to IndVarSimplify.
3285 if (ICmpInst *CI = dyn_cast<ICmpInst>(UserInst)) {
3286 // If CI can be saved in some target, like replaced inside hardware loop
3287 // in PowerPC, no need to generate initial formulae for it.
3288 if (SaveCmp && CI == dyn_cast<ICmpInst>(ExitBranch->getCondition()))
3289 continue;
3290 if (CI->isEquality()) {
3291 // Swap the operands if needed to put the OperandValToReplace on the
3292 // left, for consistency.
3293 Value *NV = CI->getOperand(1);
3294 if (NV == U.getOperandValToReplace()) {
3295 CI->setOperand(1, CI->getOperand(0));
3296 CI->setOperand(0, NV);
3297 NV = CI->getOperand(1);
3298 Changed = true;
3299 }
3300
3301 // x == y --> x - y == 0
3302 const SCEV *N = SE.getSCEV(NV);
3303 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) {
3304 // S is normalized, so normalize N before folding it into S
3305 // to keep the result normalized.
3306 N = normalizeForPostIncUse(N, TmpPostIncLoops, SE);
3307 Kind = LSRUse::ICmpZero;
3308 S = SE.getMinusSCEV(N, S);
3309 }
3310
3311 // -1 and the negations of all interesting strides (except the negation
3312 // of -1) are now also interesting.
3313 for (size_t i = 0, e = Factors.size(); i != e; ++i)
3314 if (Factors[i] != -1)
3315 Factors.insert(-(uint64_t)Factors[i]);
3316 Factors.insert(-1);
3317 }
3318 }
3319
3320 // Get or create an LSRUse.
3321 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
3322 size_t LUIdx = P.first;
3323 int64_t Offset = P.second;
3324 LSRUse &LU = Uses[LUIdx];
3325
3326 // Record the fixup.
3327 LSRFixup &LF = LU.getNewFixup();
3328 LF.UserInst = UserInst;
3329 LF.OperandValToReplace = U.getOperandValToReplace();
3330 LF.PostIncLoops = TmpPostIncLoops;
3331 LF.Offset = Offset;
3332 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3333
3334 if (!LU.WidestFixupType ||
3335 SE.getTypeSizeInBits(LU.WidestFixupType) <
3336 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3337 LU.WidestFixupType = LF.OperandValToReplace->getType();
3338
3339 // If this is the first use of this LSRUse, give it a formula.
3340 if (LU.Formulae.empty()) {
3341 InsertInitialFormula(S, LU, LUIdx);
3342 CountRegisters(LU.Formulae.back(), LUIdx);
3343 }
3344 }
3345
3346 LLVM_DEBUG(print_fixups(dbgs()));
3347 }
3348
3349 /// Insert a formula for the given expression into the given use, separating out
3350 /// loop-variant portions from loop-invariant and loop-computable portions.
3351 void
InsertInitialFormula(const SCEV * S,LSRUse & LU,size_t LUIdx)3352 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
3353 // Mark uses whose expressions cannot be expanded.
3354 if (!isSafeToExpand(S, SE))
3355 LU.RigidFormula = true;
3356
3357 Formula F;
3358 F.initialMatch(S, L, SE);
3359 bool Inserted = InsertFormula(LU, LUIdx, F);
3360 assert(Inserted && "Initial formula already exists!"); (void)Inserted;
3361 }
3362
3363 /// Insert a simple single-register formula for the given expression into the
3364 /// given use.
3365 void
InsertSupplementalFormula(const SCEV * S,LSRUse & LU,size_t LUIdx)3366 LSRInstance::InsertSupplementalFormula(const SCEV *S,
3367 LSRUse &LU, size_t LUIdx) {
3368 Formula F;
3369 F.BaseRegs.push_back(S);
3370 F.HasBaseReg = true;
3371 bool Inserted = InsertFormula(LU, LUIdx, F);
3372 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted;
3373 }
3374
3375 /// Note which registers are used by the given formula, updating RegUses.
CountRegisters(const Formula & F,size_t LUIdx)3376 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
3377 if (F.ScaledReg)
3378 RegUses.countRegister(F.ScaledReg, LUIdx);
3379 for (const SCEV *BaseReg : F.BaseRegs)
3380 RegUses.countRegister(BaseReg, LUIdx);
3381 }
3382
3383 /// If the given formula has not yet been inserted, add it to the list, and
3384 /// return true. Return false otherwise.
InsertFormula(LSRUse & LU,unsigned LUIdx,const Formula & F)3385 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
3386 // Do not insert formula that we will not be able to expand.
3387 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) &&
3388 "Formula is illegal");
3389
3390 if (!LU.InsertFormula(F, *L))
3391 return false;
3392
3393 CountRegisters(F, LUIdx);
3394 return true;
3395 }
3396
3397 /// Check for other uses of loop-invariant values which we're tracking. These
3398 /// other uses will pin these values in registers, making them less profitable
3399 /// for elimination.
3400 /// TODO: This currently misses non-constant addrec step registers.
3401 /// TODO: Should this give more weight to users inside the loop?
3402 void
CollectLoopInvariantFixupsAndFormulae()3403 LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
3404 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
3405 SmallPtrSet<const SCEV *, 32> Visited;
3406
3407 while (!Worklist.empty()) {
3408 const SCEV *S = Worklist.pop_back_val();
3409
3410 // Don't process the same SCEV twice
3411 if (!Visited.insert(S).second)
3412 continue;
3413
3414 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
3415 Worklist.append(N->op_begin(), N->op_end());
3416 else if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(S))
3417 Worklist.push_back(C->getOperand());
3418 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
3419 Worklist.push_back(D->getLHS());
3420 Worklist.push_back(D->getRHS());
3421 } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) {
3422 const Value *V = US->getValue();
3423 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
3424 // Look for instructions defined outside the loop.
3425 if (L->contains(Inst)) continue;
3426 } else if (isa<UndefValue>(V))
3427 // Undef doesn't have a live range, so it doesn't matter.
3428 continue;
3429 for (const Use &U : V->uses()) {
3430 const Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
3431 // Ignore non-instructions.
3432 if (!UserInst)
3433 continue;
3434 // Ignore instructions in other functions (as can happen with
3435 // Constants).
3436 if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
3437 continue;
3438 // Ignore instructions not dominated by the loop.
3439 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
3440 UserInst->getParent() :
3441 cast<PHINode>(UserInst)->getIncomingBlock(
3442 PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
3443 if (!DT.dominates(L->getHeader(), UseBB))
3444 continue;
3445 // Don't bother if the instruction is in a BB which ends in an EHPad.
3446 if (UseBB->getTerminator()->isEHPad())
3447 continue;
3448 // Don't bother rewriting PHIs in catchswitch blocks.
3449 if (isa<CatchSwitchInst>(UserInst->getParent()->getTerminator()))
3450 continue;
3451 // Ignore uses which are part of other SCEV expressions, to avoid
3452 // analyzing them multiple times.
3453 if (SE.isSCEVable(UserInst->getType())) {
3454 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
3455 // If the user is a no-op, look through to its uses.
3456 if (!isa<SCEVUnknown>(UserS))
3457 continue;
3458 if (UserS == US) {
3459 Worklist.push_back(
3460 SE.getUnknown(const_cast<Instruction *>(UserInst)));
3461 continue;
3462 }
3463 }
3464 // Ignore icmp instructions which are already being analyzed.
3465 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
3466 unsigned OtherIdx = !U.getOperandNo();
3467 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
3468 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
3469 continue;
3470 }
3471
3472 std::pair<size_t, int64_t> P = getUse(
3473 S, LSRUse::Basic, MemAccessTy());
3474 size_t LUIdx = P.first;
3475 int64_t Offset = P.second;
3476 LSRUse &LU = Uses[LUIdx];
3477 LSRFixup &LF = LU.getNewFixup();
3478 LF.UserInst = const_cast<Instruction *>(UserInst);
3479 LF.OperandValToReplace = U;
3480 LF.Offset = Offset;
3481 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
3482 if (!LU.WidestFixupType ||
3483 SE.getTypeSizeInBits(LU.WidestFixupType) <
3484 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
3485 LU.WidestFixupType = LF.OperandValToReplace->getType();
3486 InsertSupplementalFormula(US, LU, LUIdx);
3487 CountRegisters(LU.Formulae.back(), Uses.size() - 1);
3488 break;
3489 }
3490 }
3491 }
3492 }
3493
3494 /// Split S into subexpressions which can be pulled out into separate
3495 /// registers. If C is non-null, multiply each subexpression by C.
3496 ///
3497 /// Return remainder expression after factoring the subexpressions captured by
3498 /// Ops. If Ops is complete, return NULL.
CollectSubexprs(const SCEV * S,const SCEVConstant * C,SmallVectorImpl<const SCEV * > & Ops,const Loop * L,ScalarEvolution & SE,unsigned Depth=0)3499 static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
3500 SmallVectorImpl<const SCEV *> &Ops,
3501 const Loop *L,
3502 ScalarEvolution &SE,
3503 unsigned Depth = 0) {
3504 // Arbitrarily cap recursion to protect compile time.
3505 if (Depth >= 3)
3506 return S;
3507
3508 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3509 // Break out add operands.
3510 for (const SCEV *S : Add->operands()) {
3511 const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1);
3512 if (Remainder)
3513 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3514 }
3515 return nullptr;
3516 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
3517 // Split a non-zero base out of an addrec.
3518 if (AR->getStart()->isZero() || !AR->isAffine())
3519 return S;
3520
3521 const SCEV *Remainder = CollectSubexprs(AR->getStart(),
3522 C, Ops, L, SE, Depth+1);
3523 // Split the non-zero AddRec unless it is part of a nested recurrence that
3524 // does not pertain to this loop.
3525 if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
3526 Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
3527 Remainder = nullptr;
3528 }
3529 if (Remainder != AR->getStart()) {
3530 if (!Remainder)
3531 Remainder = SE.getConstant(AR->getType(), 0);
3532 return SE.getAddRecExpr(Remainder,
3533 AR->getStepRecurrence(SE),
3534 AR->getLoop(),
3535 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
3536 SCEV::FlagAnyWrap);
3537 }
3538 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3539 // Break (C * (a + b + c)) into C*a + C*b + C*c.
3540 if (Mul->getNumOperands() != 2)
3541 return S;
3542 if (const SCEVConstant *Op0 =
3543 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3544 C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0;
3545 const SCEV *Remainder =
3546 CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
3547 if (Remainder)
3548 Ops.push_back(SE.getMulExpr(C, Remainder));
3549 return nullptr;
3550 }
3551 }
3552 return S;
3553 }
3554
3555 /// Return true if the SCEV represents a value that may end up as a
3556 /// post-increment operation.
mayUsePostIncMode(const TargetTransformInfo & TTI,LSRUse & LU,const SCEV * S,const Loop * L,ScalarEvolution & SE)3557 static bool mayUsePostIncMode(const TargetTransformInfo &TTI,
3558 LSRUse &LU, const SCEV *S, const Loop *L,
3559 ScalarEvolution &SE) {
3560 if (LU.Kind != LSRUse::Address ||
3561 !LU.AccessTy.getType()->isIntOrIntVectorTy())
3562 return false;
3563 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
3564 if (!AR)
3565 return false;
3566 const SCEV *LoopStep = AR->getStepRecurrence(SE);
3567 if (!isa<SCEVConstant>(LoopStep))
3568 return false;
3569 // Check if a post-indexed load/store can be used.
3570 if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) ||
3571 TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) {
3572 const SCEV *LoopStart = AR->getStart();
3573 if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L))
3574 return true;
3575 }
3576 return false;
3577 }
3578
3579 /// Helper function for LSRInstance::GenerateReassociations.
GenerateReassociationsImpl(LSRUse & LU,unsigned LUIdx,const Formula & Base,unsigned Depth,size_t Idx,bool IsScaledReg)3580 void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
3581 const Formula &Base,
3582 unsigned Depth, size_t Idx,
3583 bool IsScaledReg) {
3584 const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3585 // Don't generate reassociations for the base register of a value that
3586 // may generate a post-increment operator. The reason is that the
3587 // reassociations cause extra base+register formula to be created,
3588 // and possibly chosen, but the post-increment is more efficient.
3589 if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
3590 return;
3591 SmallVector<const SCEV *, 8> AddOps;
3592 const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
3593 if (Remainder)
3594 AddOps.push_back(Remainder);
3595
3596 if (AddOps.size() == 1)
3597 return;
3598
3599 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
3600 JE = AddOps.end();
3601 J != JE; ++J) {
3602 // Loop-variant "unknown" values are uninteresting; we won't be able to
3603 // do anything meaningful with them.
3604 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
3605 continue;
3606
3607 // Don't pull a constant into a register if the constant could be folded
3608 // into an immediate field.
3609 if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3610 LU.AccessTy, *J, Base.getNumRegs() > 1))
3611 continue;
3612
3613 // Collect all operands except *J.
3614 SmallVector<const SCEV *, 8> InnerAddOps(
3615 ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
3616 InnerAddOps.append(std::next(J),
3617 ((const SmallVector<const SCEV *, 8> &)AddOps).end());
3618
3619 // Don't leave just a constant behind in a register if the constant could
3620 // be folded into an immediate field.
3621 if (InnerAddOps.size() == 1 &&
3622 isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind,
3623 LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1))
3624 continue;
3625
3626 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
3627 if (InnerSum->isZero())
3628 continue;
3629 Formula F = Base;
3630
3631 // Add the remaining pieces of the add back into the new formula.
3632 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
3633 if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
3634 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3635 InnerSumSC->getValue()->getZExtValue())) {
3636 F.UnfoldedOffset =
3637 (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue();
3638 if (IsScaledReg)
3639 F.ScaledReg = nullptr;
3640 else
3641 F.BaseRegs.erase(F.BaseRegs.begin() + Idx);
3642 } else if (IsScaledReg)
3643 F.ScaledReg = InnerSum;
3644 else
3645 F.BaseRegs[Idx] = InnerSum;
3646
3647 // Add J as its own register, or an unfolded immediate.
3648 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
3649 if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
3650 TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
3651 SC->getValue()->getZExtValue()))
3652 F.UnfoldedOffset =
3653 (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue();
3654 else
3655 F.BaseRegs.push_back(*J);
3656 // We may have changed the number of register in base regs, adjust the
3657 // formula accordingly.
3658 F.canonicalize(*L);
3659
3660 if (InsertFormula(LU, LUIdx, F))
3661 // If that formula hadn't been seen before, recurse to find more like
3662 // it.
3663 // Add check on Log16(AddOps.size()) - same as Log2_32(AddOps.size()) >> 2)
3664 // Because just Depth is not enough to bound compile time.
3665 // This means that every time AddOps.size() is greater 16^x we will add
3666 // x to Depth.
3667 GenerateReassociations(LU, LUIdx, LU.Formulae.back(),
3668 Depth + 1 + (Log2_32(AddOps.size()) >> 2));
3669 }
3670 }
3671
3672 /// Split out subexpressions from adds and the bases of addrecs.
GenerateReassociations(LSRUse & LU,unsigned LUIdx,Formula Base,unsigned Depth)3673 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
3674 Formula Base, unsigned Depth) {
3675 assert(Base.isCanonical(*L) && "Input must be in the canonical form");
3676 // Arbitrarily cap recursion to protect compile time.
3677 if (Depth >= 3)
3678 return;
3679
3680 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3681 GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i);
3682
3683 if (Base.Scale == 1)
3684 GenerateReassociationsImpl(LU, LUIdx, Base, Depth,
3685 /* Idx */ -1, /* IsScaledReg */ true);
3686 }
3687
3688 /// Generate a formula consisting of all of the loop-dominating registers added
3689 /// into a single register.
GenerateCombinations(LSRUse & LU,unsigned LUIdx,Formula Base)3690 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
3691 Formula Base) {
3692 // This method is only interesting on a plurality of registers.
3693 if (Base.BaseRegs.size() + (Base.Scale == 1) +
3694 (Base.UnfoldedOffset != 0) <= 1)
3695 return;
3696
3697 // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before
3698 // processing the formula.
3699 Base.unscale();
3700 SmallVector<const SCEV *, 4> Ops;
3701 Formula NewBase = Base;
3702 NewBase.BaseRegs.clear();
3703 Type *CombinedIntegerType = nullptr;
3704 for (const SCEV *BaseReg : Base.BaseRegs) {
3705 if (SE.properlyDominates(BaseReg, L->getHeader()) &&
3706 !SE.hasComputableLoopEvolution(BaseReg, L)) {
3707 if (!CombinedIntegerType)
3708 CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType());
3709 Ops.push_back(BaseReg);
3710 }
3711 else
3712 NewBase.BaseRegs.push_back(BaseReg);
3713 }
3714
3715 // If no register is relevant, we're done.
3716 if (Ops.size() == 0)
3717 return;
3718
3719 // Utility function for generating the required variants of the combined
3720 // registers.
3721 auto GenerateFormula = [&](const SCEV *Sum) {
3722 Formula F = NewBase;
3723
3724 // TODO: If Sum is zero, it probably means ScalarEvolution missed an
3725 // opportunity to fold something. For now, just ignore such cases
3726 // rather than proceed with zero in a register.
3727 if (Sum->isZero())
3728 return;
3729
3730 F.BaseRegs.push_back(Sum);
3731 F.canonicalize(*L);
3732 (void)InsertFormula(LU, LUIdx, F);
3733 };
3734
3735 // If we collected at least two registers, generate a formula combining them.
3736 if (Ops.size() > 1) {
3737 SmallVector<const SCEV *, 4> OpsCopy(Ops); // Don't let SE modify Ops.
3738 GenerateFormula(SE.getAddExpr(OpsCopy));
3739 }
3740
3741 // If we have an unfolded offset, generate a formula combining it with the
3742 // registers collected.
3743 if (NewBase.UnfoldedOffset) {
3744 assert(CombinedIntegerType && "Missing a type for the unfolded offset");
3745 Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset,
3746 true));
3747 NewBase.UnfoldedOffset = 0;
3748 GenerateFormula(SE.getAddExpr(Ops));
3749 }
3750 }
3751
3752 /// Helper function for LSRInstance::GenerateSymbolicOffsets.
GenerateSymbolicOffsetsImpl(LSRUse & LU,unsigned LUIdx,const Formula & Base,size_t Idx,bool IsScaledReg)3753 void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx,
3754 const Formula &Base, size_t Idx,
3755 bool IsScaledReg) {
3756 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3757 GlobalValue *GV = ExtractSymbol(G, SE);
3758 if (G->isZero() || !GV)
3759 return;
3760 Formula F = Base;
3761 F.BaseGV = GV;
3762 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3763 return;
3764 if (IsScaledReg)
3765 F.ScaledReg = G;
3766 else
3767 F.BaseRegs[Idx] = G;
3768 (void)InsertFormula(LU, LUIdx, F);
3769 }
3770
3771 /// Generate reuse formulae using symbolic offsets.
GenerateSymbolicOffsets(LSRUse & LU,unsigned LUIdx,Formula Base)3772 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
3773 Formula Base) {
3774 // We can't add a symbolic offset if the address already contains one.
3775 if (Base.BaseGV) return;
3776
3777 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3778 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i);
3779 if (Base.Scale == 1)
3780 GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1,
3781 /* IsScaledReg */ true);
3782 }
3783
3784 /// Helper function for LSRInstance::GenerateConstantOffsets.
GenerateConstantOffsetsImpl(LSRUse & LU,unsigned LUIdx,const Formula & Base,const SmallVectorImpl<int64_t> & Worklist,size_t Idx,bool IsScaledReg)3785 void LSRInstance::GenerateConstantOffsetsImpl(
3786 LSRUse &LU, unsigned LUIdx, const Formula &Base,
3787 const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) {
3788
3789 auto GenerateOffset = [&](const SCEV *G, int64_t Offset) {
3790 Formula F = Base;
3791 F.BaseOffset = (uint64_t)Base.BaseOffset - Offset;
3792
3793 if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind,
3794 LU.AccessTy, F)) {
3795 // Add the offset to the base register.
3796 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G);
3797 // If it cancelled out, drop the base register, otherwise update it.
3798 if (NewG->isZero()) {
3799 if (IsScaledReg) {
3800 F.Scale = 0;
3801 F.ScaledReg = nullptr;
3802 } else
3803 F.deleteBaseReg(F.BaseRegs[Idx]);
3804 F.canonicalize(*L);
3805 } else if (IsScaledReg)
3806 F.ScaledReg = NewG;
3807 else
3808 F.BaseRegs[Idx] = NewG;
3809
3810 (void)InsertFormula(LU, LUIdx, F);
3811 }
3812 };
3813
3814 const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx];
3815
3816 // With constant offsets and constant steps, we can generate pre-inc
3817 // accesses by having the offset equal the step. So, for access #0 with a
3818 // step of 8, we generate a G - 8 base which would require the first access
3819 // to be ((G - 8) + 8),+,8. The pre-indexed access then updates the pointer
3820 // for itself and hopefully becomes the base for other accesses. This means
3821 // means that a single pre-indexed access can be generated to become the new
3822 // base pointer for each iteration of the loop, resulting in no extra add/sub
3823 // instructions for pointer updating.
3824 if (FavorBackedgeIndex && LU.Kind == LSRUse::Address) {
3825 if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) {
3826 if (auto *StepRec =
3827 dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) {
3828 const APInt &StepInt = StepRec->getAPInt();
3829 int64_t Step = StepInt.isNegative() ?
3830 StepInt.getSExtValue() : StepInt.getZExtValue();
3831
3832 for (int64_t Offset : Worklist) {
3833 Offset -= Step;
3834 GenerateOffset(G, Offset);
3835 }
3836 }
3837 }
3838 }
3839 for (int64_t Offset : Worklist)
3840 GenerateOffset(G, Offset);
3841
3842 int64_t Imm = ExtractImmediate(G, SE);
3843 if (G->isZero() || Imm == 0)
3844 return;
3845 Formula F = Base;
3846 F.BaseOffset = (uint64_t)F.BaseOffset + Imm;
3847 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F))
3848 return;
3849 if (IsScaledReg) {
3850 F.ScaledReg = G;
3851 } else {
3852 F.BaseRegs[Idx] = G;
3853 // We may generate non canonical Formula if G is a recurrent expr reg
3854 // related with current loop while F.ScaledReg is not.
3855 F.canonicalize(*L);
3856 }
3857 (void)InsertFormula(LU, LUIdx, F);
3858 }
3859
3860 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
GenerateConstantOffsets(LSRUse & LU,unsigned LUIdx,Formula Base)3861 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
3862 Formula Base) {
3863 // TODO: For now, just add the min and max offset, because it usually isn't
3864 // worthwhile looking at everything inbetween.
3865 SmallVector<int64_t, 2> Worklist;
3866 Worklist.push_back(LU.MinOffset);
3867 if (LU.MaxOffset != LU.MinOffset)
3868 Worklist.push_back(LU.MaxOffset);
3869
3870 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
3871 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i);
3872 if (Base.Scale == 1)
3873 GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1,
3874 /* IsScaledReg */ true);
3875 }
3876
3877 /// For ICmpZero, check to see if we can scale up the comparison. For example, x
3878 /// == y -> x*c == y*c.
GenerateICmpZeroScales(LSRUse & LU,unsigned LUIdx,Formula Base)3879 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
3880 Formula Base) {
3881 if (LU.Kind != LSRUse::ICmpZero) return;
3882
3883 // Determine the integer type for the base formula.
3884 Type *IntTy = Base.getType();
3885 if (!IntTy) return;
3886 if (SE.getTypeSizeInBits(IntTy) > 64) return;
3887
3888 // Don't do this if there is more than one offset.
3889 if (LU.MinOffset != LU.MaxOffset) return;
3890
3891 // Check if transformation is valid. It is illegal to multiply pointer.
3892 if (Base.ScaledReg && Base.ScaledReg->getType()->isPointerTy())
3893 return;
3894 for (const SCEV *BaseReg : Base.BaseRegs)
3895 if (BaseReg->getType()->isPointerTy())
3896 return;
3897 assert(!Base.BaseGV && "ICmpZero use is not legal!");
3898
3899 // Check each interesting stride.
3900 for (int64_t Factor : Factors) {
3901 // Check that the multiplication doesn't overflow.
3902 if (Base.BaseOffset == std::numeric_limits<int64_t>::min() && Factor == -1)
3903 continue;
3904 int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor;
3905 if (NewBaseOffset / Factor != Base.BaseOffset)
3906 continue;
3907 // If the offset will be truncated at this use, check that it is in bounds.
3908 if (!IntTy->isPointerTy() &&
3909 !ConstantInt::isValueValidForType(IntTy, NewBaseOffset))
3910 continue;
3911
3912 // Check that multiplying with the use offset doesn't overflow.
3913 int64_t Offset = LU.MinOffset;
3914 if (Offset == std::numeric_limits<int64_t>::min() && Factor == -1)
3915 continue;
3916 Offset = (uint64_t)Offset * Factor;
3917 if (Offset / Factor != LU.MinOffset)
3918 continue;
3919 // If the offset will be truncated at this use, check that it is in bounds.
3920 if (!IntTy->isPointerTy() &&
3921 !ConstantInt::isValueValidForType(IntTy, Offset))
3922 continue;
3923
3924 Formula F = Base;
3925 F.BaseOffset = NewBaseOffset;
3926
3927 // Check that this scale is legal.
3928 if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F))
3929 continue;
3930
3931 // Compensate for the use having MinOffset built into it.
3932 F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset;
3933
3934 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
3935
3936 // Check that multiplying with each base register doesn't overflow.
3937 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
3938 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
3939 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
3940 goto next;
3941 }
3942
3943 // Check that multiplying with the scaled register doesn't overflow.
3944 if (F.ScaledReg) {
3945 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
3946 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
3947 continue;
3948 }
3949
3950 // Check that multiplying with the unfolded offset doesn't overflow.
3951 if (F.UnfoldedOffset != 0) {
3952 if (F.UnfoldedOffset == std::numeric_limits<int64_t>::min() &&
3953 Factor == -1)
3954 continue;
3955 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor;
3956 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset)
3957 continue;
3958 // If the offset will be truncated, check that it is in bounds.
3959 if (!IntTy->isPointerTy() &&
3960 !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset))
3961 continue;
3962 }
3963
3964 // If we make it here and it's legal, add it.
3965 (void)InsertFormula(LU, LUIdx, F);
3966 next:;
3967 }
3968 }
3969
3970 /// Generate stride factor reuse formulae by making use of scaled-offset address
3971 /// modes, for example.
GenerateScales(LSRUse & LU,unsigned LUIdx,Formula Base)3972 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
3973 // Determine the integer type for the base formula.
3974 Type *IntTy = Base.getType();
3975 if (!IntTy) return;
3976
3977 // If this Formula already has a scaled register, we can't add another one.
3978 // Try to unscale the formula to generate a better scale.
3979 if (Base.Scale != 0 && !Base.unscale())
3980 return;
3981
3982 assert(Base.Scale == 0 && "unscale did not did its job!");
3983
3984 // Check each interesting stride.
3985 for (int64_t Factor : Factors) {
3986 Base.Scale = Factor;
3987 Base.HasBaseReg = Base.BaseRegs.size() > 1;
3988 // Check whether this scale is going to be legal.
3989 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
3990 Base)) {
3991 // As a special-case, handle special out-of-loop Basic users specially.
3992 // TODO: Reconsider this special case.
3993 if (LU.Kind == LSRUse::Basic &&
3994 isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special,
3995 LU.AccessTy, Base) &&
3996 LU.AllFixupsOutsideLoop)
3997 LU.Kind = LSRUse::Special;
3998 else
3999 continue;
4000 }
4001 // For an ICmpZero, negating a solitary base register won't lead to
4002 // new solutions.
4003 if (LU.Kind == LSRUse::ICmpZero &&
4004 !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV)
4005 continue;
4006 // For each addrec base reg, if its loop is current loop, apply the scale.
4007 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
4008 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i]);
4009 if (AR && (AR->getLoop() == L || LU.AllFixupsOutsideLoop)) {
4010 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
4011 if (FactorS->isZero())
4012 continue;
4013 // Divide out the factor, ignoring high bits, since we'll be
4014 // scaling the value back up in the end.
4015 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) {
4016 // TODO: This could be optimized to avoid all the copying.
4017 Formula F = Base;
4018 F.ScaledReg = Quotient;
4019 F.deleteBaseReg(F.BaseRegs[i]);
4020 // The canonical representation of 1*reg is reg, which is already in
4021 // Base. In that case, do not try to insert the formula, it will be
4022 // rejected anyway.
4023 if (F.Scale == 1 && (F.BaseRegs.empty() ||
4024 (AR->getLoop() != L && LU.AllFixupsOutsideLoop)))
4025 continue;
4026 // If AllFixupsOutsideLoop is true and F.Scale is 1, we may generate
4027 // non canonical Formula with ScaledReg's loop not being L.
4028 if (F.Scale == 1 && LU.AllFixupsOutsideLoop)
4029 F.canonicalize(*L);
4030 (void)InsertFormula(LU, LUIdx, F);
4031 }
4032 }
4033 }
4034 }
4035 }
4036
4037 /// Generate reuse formulae from different IV types.
GenerateTruncates(LSRUse & LU,unsigned LUIdx,Formula Base)4038 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
4039 // Don't bother truncating symbolic values.
4040 if (Base.BaseGV) return;
4041
4042 // Determine the integer type for the base formula.
4043 Type *DstTy = Base.getType();
4044 if (!DstTy) return;
4045 DstTy = SE.getEffectiveSCEVType(DstTy);
4046
4047 for (Type *SrcTy : Types) {
4048 if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
4049 Formula F = Base;
4050
4051 // Sometimes SCEV is able to prove zero during ext transform. It may
4052 // happen if SCEV did not do all possible transforms while creating the
4053 // initial node (maybe due to depth limitations), but it can do them while
4054 // taking ext.
4055 if (F.ScaledReg) {
4056 const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy);
4057 if (NewScaledReg->isZero())
4058 continue;
4059 F.ScaledReg = NewScaledReg;
4060 }
4061 bool HasZeroBaseReg = false;
4062 for (const SCEV *&BaseReg : F.BaseRegs) {
4063 const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy);
4064 if (NewBaseReg->isZero()) {
4065 HasZeroBaseReg = true;
4066 break;
4067 }
4068 BaseReg = NewBaseReg;
4069 }
4070 if (HasZeroBaseReg)
4071 continue;
4072
4073 // TODO: This assumes we've done basic processing on all uses and
4074 // have an idea what the register usage is.
4075 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
4076 continue;
4077
4078 F.canonicalize(*L);
4079 (void)InsertFormula(LU, LUIdx, F);
4080 }
4081 }
4082 }
4083
4084 namespace {
4085
4086 /// Helper class for GenerateCrossUseConstantOffsets. It's used to defer
4087 /// modifications so that the search phase doesn't have to worry about the data
4088 /// structures moving underneath it.
4089 struct WorkItem {
4090 size_t LUIdx;
4091 int64_t Imm;
4092 const SCEV *OrigReg;
4093
WorkItem__anondbe16f4d0d11::WorkItem4094 WorkItem(size_t LI, int64_t I, const SCEV *R)
4095 : LUIdx(LI), Imm(I), OrigReg(R) {}
4096
4097 void print(raw_ostream &OS) const;
4098 void dump() const;
4099 };
4100
4101 } // end anonymous namespace
4102
4103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print(raw_ostream & OS) const4104 void WorkItem::print(raw_ostream &OS) const {
4105 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
4106 << " , add offset " << Imm;
4107 }
4108
dump() const4109 LLVM_DUMP_METHOD void WorkItem::dump() const {
4110 print(errs()); errs() << '\n';
4111 }
4112 #endif
4113
4114 /// Look for registers which are a constant distance apart and try to form reuse
4115 /// opportunities between them.
GenerateCrossUseConstantOffsets()4116 void LSRInstance::GenerateCrossUseConstantOffsets() {
4117 // Group the registers by their value without any added constant offset.
4118 using ImmMapTy = std::map<int64_t, const SCEV *>;
4119
4120 DenseMap<const SCEV *, ImmMapTy> Map;
4121 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
4122 SmallVector<const SCEV *, 8> Sequence;
4123 for (const SCEV *Use : RegUses) {
4124 const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify.
4125 int64_t Imm = ExtractImmediate(Reg, SE);
4126 auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy()));
4127 if (Pair.second)
4128 Sequence.push_back(Reg);
4129 Pair.first->second.insert(std::make_pair(Imm, Use));
4130 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use);
4131 }
4132
4133 // Now examine each set of registers with the same base value. Build up
4134 // a list of work to do and do the work in a separate step so that we're
4135 // not adding formulae and register counts while we're searching.
4136 SmallVector<WorkItem, 32> WorkItems;
4137 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
4138 for (const SCEV *Reg : Sequence) {
4139 const ImmMapTy &Imms = Map.find(Reg)->second;
4140
4141 // It's not worthwhile looking for reuse if there's only one offset.
4142 if (Imms.size() == 1)
4143 continue;
4144
4145 LLVM_DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
4146 for (const auto &Entry
4147 : Imms) dbgs()
4148 << ' ' << Entry.first;
4149 dbgs() << '\n');
4150
4151 // Examine each offset.
4152 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
4153 J != JE; ++J) {
4154 const SCEV *OrigReg = J->second;
4155
4156 int64_t JImm = J->first;
4157 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
4158
4159 if (!isa<SCEVConstant>(OrigReg) &&
4160 UsedByIndicesMap[Reg].count() == 1) {
4161 LLVM_DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg
4162 << '\n');
4163 continue;
4164 }
4165
4166 // Conservatively examine offsets between this orig reg a few selected
4167 // other orig regs.
4168 int64_t First = Imms.begin()->first;
4169 int64_t Last = std::prev(Imms.end())->first;
4170 // Compute (First + Last) / 2 without overflow using the fact that
4171 // First + Last = 2 * (First + Last) + (First ^ Last).
4172 int64_t Avg = (First & Last) + ((First ^ Last) >> 1);
4173 // If the result is negative and First is odd and Last even (or vice versa),
4174 // we rounded towards -inf. Add 1 in that case, to round towards 0.
4175 Avg = Avg + ((First ^ Last) & ((uint64_t)Avg >> 63));
4176 ImmMapTy::const_iterator OtherImms[] = {
4177 Imms.begin(), std::prev(Imms.end()),
4178 Imms.lower_bound(Avg)};
4179 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
4180 ImmMapTy::const_iterator M = OtherImms[i];
4181 if (M == J || M == JE) continue;
4182
4183 // Compute the difference between the two.
4184 int64_t Imm = (uint64_t)JImm - M->first;
4185 for (unsigned LUIdx : UsedByIndices.set_bits())
4186 // Make a memo of this use, offset, and register tuple.
4187 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second)
4188 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
4189 }
4190 }
4191 }
4192
4193 Map.clear();
4194 Sequence.clear();
4195 UsedByIndicesMap.clear();
4196 UniqueItems.clear();
4197
4198 // Now iterate through the worklist and add new formulae.
4199 for (const WorkItem &WI : WorkItems) {
4200 size_t LUIdx = WI.LUIdx;
4201 LSRUse &LU = Uses[LUIdx];
4202 int64_t Imm = WI.Imm;
4203 const SCEV *OrigReg = WI.OrigReg;
4204
4205 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
4206 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
4207 unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
4208
4209 // TODO: Use a more targeted data structure.
4210 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
4211 Formula F = LU.Formulae[L];
4212 // FIXME: The code for the scaled and unscaled registers looks
4213 // very similar but slightly different. Investigate if they
4214 // could be merged. That way, we would not have to unscale the
4215 // Formula.
4216 F.unscale();
4217 // Use the immediate in the scaled register.
4218 if (F.ScaledReg == OrigReg) {
4219 int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale;
4220 // Don't create 50 + reg(-50).
4221 if (F.referencesReg(SE.getSCEV(
4222 ConstantInt::get(IntTy, -(uint64_t)Offset))))
4223 continue;
4224 Formula NewF = F;
4225 NewF.BaseOffset = Offset;
4226 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
4227 NewF))
4228 continue;
4229 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
4230
4231 // If the new scale is a constant in a register, and adding the constant
4232 // value to the immediate would produce a value closer to zero than the
4233 // immediate itself, then the formula isn't worthwhile.
4234 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
4235 if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) &&
4236 (C->getAPInt().abs() * APInt(BitWidth, F.Scale))
4237 .ule(std::abs(NewF.BaseOffset)))
4238 continue;
4239
4240 // OK, looks good.
4241 NewF.canonicalize(*this->L);
4242 (void)InsertFormula(LU, LUIdx, NewF);
4243 } else {
4244 // Use the immediate in a base register.
4245 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
4246 const SCEV *BaseReg = F.BaseRegs[N];
4247 if (BaseReg != OrigReg)
4248 continue;
4249 Formula NewF = F;
4250 NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm;
4251 if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset,
4252 LU.Kind, LU.AccessTy, NewF)) {
4253 if (TTI.shouldFavorPostInc() &&
4254 mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE))
4255 continue;
4256 if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
4257 continue;
4258 NewF = F;
4259 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
4260 }
4261 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
4262
4263 // If the new formula has a constant in a register, and adding the
4264 // constant value to the immediate would produce a value closer to
4265 // zero than the immediate itself, then the formula isn't worthwhile.
4266 for (const SCEV *NewReg : NewF.BaseRegs)
4267 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg))
4268 if ((C->getAPInt() + NewF.BaseOffset)
4269 .abs()
4270 .slt(std::abs(NewF.BaseOffset)) &&
4271 (C->getAPInt() + NewF.BaseOffset).countTrailingZeros() >=
4272 countTrailingZeros<uint64_t>(NewF.BaseOffset))
4273 goto skip_formula;
4274
4275 // Ok, looks good.
4276 NewF.canonicalize(*this->L);
4277 (void)InsertFormula(LU, LUIdx, NewF);
4278 break;
4279 skip_formula:;
4280 }
4281 }
4282 }
4283 }
4284 }
4285
4286 /// Generate formulae for each use.
4287 void
GenerateAllReuseFormulae()4288 LSRInstance::GenerateAllReuseFormulae() {
4289 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
4290 // queries are more precise.
4291 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4292 LSRUse &LU = Uses[LUIdx];
4293 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4294 GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
4295 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4296 GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
4297 }
4298 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4299 LSRUse &LU = Uses[LUIdx];
4300 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4301 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
4302 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4303 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
4304 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4305 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
4306 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4307 GenerateScales(LU, LUIdx, LU.Formulae[i]);
4308 }
4309 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4310 LSRUse &LU = Uses[LUIdx];
4311 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
4312 GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
4313 }
4314
4315 GenerateCrossUseConstantOffsets();
4316
4317 LLVM_DEBUG(dbgs() << "\n"
4318 "After generating reuse formulae:\n";
4319 print_uses(dbgs()));
4320 }
4321
4322 /// If there are multiple formulae with the same set of registers used
4323 /// by other uses, pick the best one and delete the others.
FilterOutUndesirableDedicatedRegisters()4324 void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
4325 DenseSet<const SCEV *> VisitedRegs;
4326 SmallPtrSet<const SCEV *, 16> Regs;
4327 SmallPtrSet<const SCEV *, 16> LoserRegs;
4328 #ifndef NDEBUG
4329 bool ChangedFormulae = false;
4330 #endif
4331
4332 // Collect the best formula for each unique set of shared registers. This
4333 // is reset for each use.
4334 using BestFormulaeTy =
4335 DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo>;
4336
4337 BestFormulaeTy BestFormulae;
4338
4339 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4340 LSRUse &LU = Uses[LUIdx];
4341 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());
4342 dbgs() << '\n');
4343
4344 bool Any = false;
4345 for (size_t FIdx = 0, NumForms = LU.Formulae.size();
4346 FIdx != NumForms; ++FIdx) {
4347 Formula &F = LU.Formulae[FIdx];
4348
4349 // Some formulas are instant losers. For example, they may depend on
4350 // nonexistent AddRecs from other loops. These need to be filtered
4351 // immediately, otherwise heuristics could choose them over others leading
4352 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
4353 // avoids the need to recompute this information across formulae using the
4354 // same bad AddRec. Passing LoserRegs is also essential unless we remove
4355 // the corresponding bad register from the Regs set.
4356 Cost CostF(L, SE, TTI);
4357 Regs.clear();
4358 CostF.RateFormula(F, Regs, VisitedRegs, LU, &LoserRegs);
4359 if (CostF.isLoser()) {
4360 // During initial formula generation, undesirable formulae are generated
4361 // by uses within other loops that have some non-trivial address mode or
4362 // use the postinc form of the IV. LSR needs to provide these formulae
4363 // as the basis of rediscovering the desired formula that uses an AddRec
4364 // corresponding to the existing phi. Once all formulae have been
4365 // generated, these initial losers may be pruned.
4366 LLVM_DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());
4367 dbgs() << "\n");
4368 }
4369 else {
4370 SmallVector<const SCEV *, 4> Key;
4371 for (const SCEV *Reg : F.BaseRegs) {
4372 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
4373 Key.push_back(Reg);
4374 }
4375 if (F.ScaledReg &&
4376 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
4377 Key.push_back(F.ScaledReg);
4378 // Unstable sort by host order ok, because this is only used for
4379 // uniquifying.
4380 llvm::sort(Key);
4381
4382 std::pair<BestFormulaeTy::const_iterator, bool> P =
4383 BestFormulae.insert(std::make_pair(Key, FIdx));
4384 if (P.second)
4385 continue;
4386
4387 Formula &Best = LU.Formulae[P.first->second];
4388
4389 Cost CostBest(L, SE, TTI);
4390 Regs.clear();
4391 CostBest.RateFormula(Best, Regs, VisitedRegs, LU);
4392 if (CostF.isLess(CostBest))
4393 std::swap(F, Best);
4394 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
4395 dbgs() << "\n"
4396 " in favor of formula ";
4397 Best.print(dbgs()); dbgs() << '\n');
4398 }
4399 #ifndef NDEBUG
4400 ChangedFormulae = true;
4401 #endif
4402 LU.DeleteFormula(F);
4403 --FIdx;
4404 --NumForms;
4405 Any = true;
4406 }
4407
4408 // Now that we've filtered out some formulae, recompute the Regs set.
4409 if (Any)
4410 LU.RecomputeRegs(LUIdx, RegUses);
4411
4412 // Reset this to prepare for the next use.
4413 BestFormulae.clear();
4414 }
4415
4416 LLVM_DEBUG(if (ChangedFormulae) {
4417 dbgs() << "\n"
4418 "After filtering out undesirable candidates:\n";
4419 print_uses(dbgs());
4420 });
4421 }
4422
4423 /// Estimate the worst-case number of solutions the solver might have to
4424 /// consider. It almost never considers this many solutions because it prune the
4425 /// search space, but the pruning isn't always sufficient.
EstimateSearchSpaceComplexity() const4426 size_t LSRInstance::EstimateSearchSpaceComplexity() const {
4427 size_t Power = 1;
4428 for (const LSRUse &LU : Uses) {
4429 size_t FSize = LU.Formulae.size();
4430 if (FSize >= ComplexityLimit) {
4431 Power = ComplexityLimit;
4432 break;
4433 }
4434 Power *= FSize;
4435 if (Power >= ComplexityLimit)
4436 break;
4437 }
4438 return Power;
4439 }
4440
4441 /// When one formula uses a superset of the registers of another formula, it
4442 /// won't help reduce register pressure (though it may not necessarily hurt
4443 /// register pressure); remove it to simplify the system.
NarrowSearchSpaceByDetectingSupersets()4444 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
4445 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4446 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4447
4448 LLVM_DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
4449 "which use a superset of registers used by other "
4450 "formulae.\n");
4451
4452 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4453 LSRUse &LU = Uses[LUIdx];
4454 bool Any = false;
4455 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4456 Formula &F = LU.Formulae[i];
4457 // Look for a formula with a constant or GV in a register. If the use
4458 // also has a formula with that same value in an immediate field,
4459 // delete the one that uses a register.
4460 for (SmallVectorImpl<const SCEV *>::const_iterator
4461 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
4462 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
4463 Formula NewF = F;
4464 //FIXME: Formulas should store bitwidth to do wrapping properly.
4465 // See PR41034.
4466 NewF.BaseOffset += (uint64_t)C->getValue()->getSExtValue();
4467 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4468 (I - F.BaseRegs.begin()));
4469 if (LU.HasFormulaWithSameRegs(NewF)) {
4470 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());
4471 dbgs() << '\n');
4472 LU.DeleteFormula(F);
4473 --i;
4474 --e;
4475 Any = true;
4476 break;
4477 }
4478 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
4479 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
4480 if (!F.BaseGV) {
4481 Formula NewF = F;
4482 NewF.BaseGV = GV;
4483 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
4484 (I - F.BaseRegs.begin()));
4485 if (LU.HasFormulaWithSameRegs(NewF)) {
4486 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs());
4487 dbgs() << '\n');
4488 LU.DeleteFormula(F);
4489 --i;
4490 --e;
4491 Any = true;
4492 break;
4493 }
4494 }
4495 }
4496 }
4497 }
4498 if (Any)
4499 LU.RecomputeRegs(LUIdx, RegUses);
4500 }
4501
4502 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4503 }
4504 }
4505
4506 /// When there are many registers for expressions like A, A+1, A+2, etc.,
4507 /// allocate a single register for them.
NarrowSearchSpaceByCollapsingUnrolledCode()4508 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
4509 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4510 return;
4511
4512 LLVM_DEBUG(
4513 dbgs() << "The search space is too complex.\n"
4514 "Narrowing the search space by assuming that uses separated "
4515 "by a constant offset will use the same registers.\n");
4516
4517 // This is especially useful for unrolled loops.
4518
4519 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4520 LSRUse &LU = Uses[LUIdx];
4521 for (const Formula &F : LU.Formulae) {
4522 if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1))
4523 continue;
4524
4525 LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU);
4526 if (!LUThatHas)
4527 continue;
4528
4529 if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false,
4530 LU.Kind, LU.AccessTy))
4531 continue;
4532
4533 LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n');
4534
4535 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
4536
4537 // Transfer the fixups of LU to LUThatHas.
4538 for (LSRFixup &Fixup : LU.Fixups) {
4539 Fixup.Offset += F.BaseOffset;
4540 LUThatHas->pushFixup(Fixup);
4541 LLVM_DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n');
4542 }
4543
4544 // Delete formulae from the new use which are no longer legal.
4545 bool Any = false;
4546 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
4547 Formula &F = LUThatHas->Formulae[i];
4548 if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset,
4549 LUThatHas->Kind, LUThatHas->AccessTy, F)) {
4550 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
4551 LUThatHas->DeleteFormula(F);
4552 --i;
4553 --e;
4554 Any = true;
4555 }
4556 }
4557
4558 if (Any)
4559 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
4560
4561 // Delete the old use.
4562 DeleteUse(LU, LUIdx);
4563 --LUIdx;
4564 --NumUses;
4565 break;
4566 }
4567 }
4568
4569 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4570 }
4571
4572 /// Call FilterOutUndesirableDedicatedRegisters again, if necessary, now that
4573 /// we've done more filtering, as it may be able to find more formulae to
4574 /// eliminate.
NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters()4575 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
4576 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4577 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4578
4579 LLVM_DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
4580 "undesirable dedicated registers.\n");
4581
4582 FilterOutUndesirableDedicatedRegisters();
4583
4584 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4585 }
4586 }
4587
4588 /// If a LSRUse has multiple formulae with the same ScaledReg and Scale.
4589 /// Pick the best one and delete the others.
4590 /// This narrowing heuristic is to keep as many formulae with different
4591 /// Scale and ScaledReg pair as possible while narrowing the search space.
4592 /// The benefit is that it is more likely to find out a better solution
4593 /// from a formulae set with more Scale and ScaledReg variations than
4594 /// a formulae set with the same Scale and ScaledReg. The picking winner
4595 /// reg heuristic will often keep the formulae with the same Scale and
4596 /// ScaledReg and filter others, and we want to avoid that if possible.
NarrowSearchSpaceByFilterFormulaWithSameScaledReg()4597 void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
4598 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4599 return;
4600
4601 LLVM_DEBUG(
4602 dbgs() << "The search space is too complex.\n"
4603 "Narrowing the search space by choosing the best Formula "
4604 "from the Formulae with the same Scale and ScaledReg.\n");
4605
4606 // Map the "Scale * ScaledReg" pair to the best formula of current LSRUse.
4607 using BestFormulaeTy = DenseMap<std::pair<const SCEV *, int64_t>, size_t>;
4608
4609 BestFormulaeTy BestFormulae;
4610 #ifndef NDEBUG
4611 bool ChangedFormulae = false;
4612 #endif
4613 DenseSet<const SCEV *> VisitedRegs;
4614 SmallPtrSet<const SCEV *, 16> Regs;
4615
4616 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4617 LSRUse &LU = Uses[LUIdx];
4618 LLVM_DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs());
4619 dbgs() << '\n');
4620
4621 // Return true if Formula FA is better than Formula FB.
4622 auto IsBetterThan = [&](Formula &FA, Formula &FB) {
4623 // First we will try to choose the Formula with fewer new registers.
4624 // For a register used by current Formula, the more the register is
4625 // shared among LSRUses, the less we increase the register number
4626 // counter of the formula.
4627 size_t FARegNum = 0;
4628 for (const SCEV *Reg : FA.BaseRegs) {
4629 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
4630 FARegNum += (NumUses - UsedByIndices.count() + 1);
4631 }
4632 size_t FBRegNum = 0;
4633 for (const SCEV *Reg : FB.BaseRegs) {
4634 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(Reg);
4635 FBRegNum += (NumUses - UsedByIndices.count() + 1);
4636 }
4637 if (FARegNum != FBRegNum)
4638 return FARegNum < FBRegNum;
4639
4640 // If the new register numbers are the same, choose the Formula with
4641 // less Cost.
4642 Cost CostFA(L, SE, TTI);
4643 Cost CostFB(L, SE, TTI);
4644 Regs.clear();
4645 CostFA.RateFormula(FA, Regs, VisitedRegs, LU);
4646 Regs.clear();
4647 CostFB.RateFormula(FB, Regs, VisitedRegs, LU);
4648 return CostFA.isLess(CostFB);
4649 };
4650
4651 bool Any = false;
4652 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
4653 ++FIdx) {
4654 Formula &F = LU.Formulae[FIdx];
4655 if (!F.ScaledReg)
4656 continue;
4657 auto P = BestFormulae.insert({{F.ScaledReg, F.Scale}, FIdx});
4658 if (P.second)
4659 continue;
4660
4661 Formula &Best = LU.Formulae[P.first->second];
4662 if (IsBetterThan(F, Best))
4663 std::swap(F, Best);
4664 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
4665 dbgs() << "\n"
4666 " in favor of formula ";
4667 Best.print(dbgs()); dbgs() << '\n');
4668 #ifndef NDEBUG
4669 ChangedFormulae = true;
4670 #endif
4671 LU.DeleteFormula(F);
4672 --FIdx;
4673 --NumForms;
4674 Any = true;
4675 }
4676 if (Any)
4677 LU.RecomputeRegs(LUIdx, RegUses);
4678
4679 // Reset this to prepare for the next use.
4680 BestFormulae.clear();
4681 }
4682
4683 LLVM_DEBUG(if (ChangedFormulae) {
4684 dbgs() << "\n"
4685 "After filtering out undesirable candidates:\n";
4686 print_uses(dbgs());
4687 });
4688 }
4689
4690 /// If we are over the complexity limit, filter out any post-inc prefering
4691 /// variables to only post-inc values.
NarrowSearchSpaceByFilterPostInc()4692 void LSRInstance::NarrowSearchSpaceByFilterPostInc() {
4693 if (!TTI.shouldFavorPostInc())
4694 return;
4695 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4696 return;
4697
4698 LLVM_DEBUG(dbgs() << "The search space is too complex.\n"
4699 "Narrowing the search space by choosing the lowest "
4700 "register Formula for PostInc Uses.\n");
4701
4702 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4703 LSRUse &LU = Uses[LUIdx];
4704
4705 if (LU.Kind != LSRUse::Address)
4706 continue;
4707 if (!TTI.isIndexedLoadLegal(TTI.MIM_PostInc, LU.AccessTy.getType()) &&
4708 !TTI.isIndexedStoreLegal(TTI.MIM_PostInc, LU.AccessTy.getType()))
4709 continue;
4710
4711 size_t MinRegs = std::numeric_limits<size_t>::max();
4712 for (const Formula &F : LU.Formulae)
4713 MinRegs = std::min(F.getNumRegs(), MinRegs);
4714
4715 bool Any = false;
4716 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms;
4717 ++FIdx) {
4718 Formula &F = LU.Formulae[FIdx];
4719 if (F.getNumRegs() > MinRegs) {
4720 LLVM_DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
4721 dbgs() << "\n");
4722 LU.DeleteFormula(F);
4723 --FIdx;
4724 --NumForms;
4725 Any = true;
4726 }
4727 }
4728 if (Any)
4729 LU.RecomputeRegs(LUIdx, RegUses);
4730
4731 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4732 break;
4733 }
4734
4735 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4736 }
4737
4738 /// The function delete formulas with high registers number expectation.
4739 /// Assuming we don't know the value of each formula (already delete
4740 /// all inefficient), generate probability of not selecting for each
4741 /// register.
4742 /// For example,
4743 /// Use1:
4744 /// reg(a) + reg({0,+,1})
4745 /// reg(a) + reg({-1,+,1}) + 1
4746 /// reg({a,+,1})
4747 /// Use2:
4748 /// reg(b) + reg({0,+,1})
4749 /// reg(b) + reg({-1,+,1}) + 1
4750 /// reg({b,+,1})
4751 /// Use3:
4752 /// reg(c) + reg(b) + reg({0,+,1})
4753 /// reg(c) + reg({b,+,1})
4754 ///
4755 /// Probability of not selecting
4756 /// Use1 Use2 Use3
4757 /// reg(a) (1/3) * 1 * 1
4758 /// reg(b) 1 * (1/3) * (1/2)
4759 /// reg({0,+,1}) (2/3) * (2/3) * (1/2)
4760 /// reg({-1,+,1}) (2/3) * (2/3) * 1
4761 /// reg({a,+,1}) (2/3) * 1 * 1
4762 /// reg({b,+,1}) 1 * (2/3) * (2/3)
4763 /// reg(c) 1 * 1 * 0
4764 ///
4765 /// Now count registers number mathematical expectation for each formula:
4766 /// Note that for each use we exclude probability if not selecting for the use.
4767 /// For example for Use1 probability for reg(a) would be just 1 * 1 (excluding
4768 /// probabilty 1/3 of not selecting for Use1).
4769 /// Use1:
4770 /// reg(a) + reg({0,+,1}) 1 + 1/3 -- to be deleted
4771 /// reg(a) + reg({-1,+,1}) + 1 1 + 4/9 -- to be deleted
4772 /// reg({a,+,1}) 1
4773 /// Use2:
4774 /// reg(b) + reg({0,+,1}) 1/2 + 1/3 -- to be deleted
4775 /// reg(b) + reg({-1,+,1}) + 1 1/2 + 2/3 -- to be deleted
4776 /// reg({b,+,1}) 2/3
4777 /// Use3:
4778 /// reg(c) + reg(b) + reg({0,+,1}) 1 + 1/3 + 4/9 -- to be deleted
4779 /// reg(c) + reg({b,+,1}) 1 + 2/3
NarrowSearchSpaceByDeletingCostlyFormulas()4780 void LSRInstance::NarrowSearchSpaceByDeletingCostlyFormulas() {
4781 if (EstimateSearchSpaceComplexity() < ComplexityLimit)
4782 return;
4783 // Ok, we have too many of formulae on our hands to conveniently handle.
4784 // Use a rough heuristic to thin out the list.
4785
4786 // Set of Regs wich will be 100% used in final solution.
4787 // Used in each formula of a solution (in example above this is reg(c)).
4788 // We can skip them in calculations.
4789 SmallPtrSet<const SCEV *, 4> UniqRegs;
4790 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4791
4792 // Map each register to probability of not selecting
4793 DenseMap <const SCEV *, float> RegNumMap;
4794 for (const SCEV *Reg : RegUses) {
4795 if (UniqRegs.count(Reg))
4796 continue;
4797 float PNotSel = 1;
4798 for (const LSRUse &LU : Uses) {
4799 if (!LU.Regs.count(Reg))
4800 continue;
4801 float P = LU.getNotSelectedProbability(Reg);
4802 if (P != 0.0)
4803 PNotSel *= P;
4804 else
4805 UniqRegs.insert(Reg);
4806 }
4807 RegNumMap.insert(std::make_pair(Reg, PNotSel));
4808 }
4809
4810 LLVM_DEBUG(
4811 dbgs() << "Narrowing the search space by deleting costly formulas\n");
4812
4813 // Delete formulas where registers number expectation is high.
4814 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4815 LSRUse &LU = Uses[LUIdx];
4816 // If nothing to delete - continue.
4817 if (LU.Formulae.size() < 2)
4818 continue;
4819 // This is temporary solution to test performance. Float should be
4820 // replaced with round independent type (based on integers) to avoid
4821 // different results for different target builds.
4822 float FMinRegNum = LU.Formulae[0].getNumRegs();
4823 float FMinARegNum = LU.Formulae[0].getNumRegs();
4824 size_t MinIdx = 0;
4825 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4826 Formula &F = LU.Formulae[i];
4827 float FRegNum = 0;
4828 float FARegNum = 0;
4829 for (const SCEV *BaseReg : F.BaseRegs) {
4830 if (UniqRegs.count(BaseReg))
4831 continue;
4832 FRegNum += RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg);
4833 if (isa<SCEVAddRecExpr>(BaseReg))
4834 FARegNum +=
4835 RegNumMap[BaseReg] / LU.getNotSelectedProbability(BaseReg);
4836 }
4837 if (const SCEV *ScaledReg = F.ScaledReg) {
4838 if (!UniqRegs.count(ScaledReg)) {
4839 FRegNum +=
4840 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
4841 if (isa<SCEVAddRecExpr>(ScaledReg))
4842 FARegNum +=
4843 RegNumMap[ScaledReg] / LU.getNotSelectedProbability(ScaledReg);
4844 }
4845 }
4846 if (FMinRegNum > FRegNum ||
4847 (FMinRegNum == FRegNum && FMinARegNum > FARegNum)) {
4848 FMinRegNum = FRegNum;
4849 FMinARegNum = FARegNum;
4850 MinIdx = i;
4851 }
4852 }
4853 LLVM_DEBUG(dbgs() << " The formula "; LU.Formulae[MinIdx].print(dbgs());
4854 dbgs() << " with min reg num " << FMinRegNum << '\n');
4855 if (MinIdx != 0)
4856 std::swap(LU.Formulae[MinIdx], LU.Formulae[0]);
4857 while (LU.Formulae.size() != 1) {
4858 LLVM_DEBUG(dbgs() << " Deleting "; LU.Formulae.back().print(dbgs());
4859 dbgs() << '\n');
4860 LU.Formulae.pop_back();
4861 }
4862 LU.RecomputeRegs(LUIdx, RegUses);
4863 assert(LU.Formulae.size() == 1 && "Should be exactly 1 min regs formula");
4864 Formula &F = LU.Formulae[0];
4865 LLVM_DEBUG(dbgs() << " Leaving only "; F.print(dbgs()); dbgs() << '\n');
4866 // When we choose the formula, the regs become unique.
4867 UniqRegs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
4868 if (F.ScaledReg)
4869 UniqRegs.insert(F.ScaledReg);
4870 }
4871 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4872 }
4873
4874 /// Pick a register which seems likely to be profitable, and then in any use
4875 /// which has any reference to that register, delete all formulae which do not
4876 /// reference that register.
NarrowSearchSpaceByPickingWinnerRegs()4877 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
4878 // With all other options exhausted, loop until the system is simple
4879 // enough to handle.
4880 SmallPtrSet<const SCEV *, 4> Taken;
4881 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
4882 // Ok, we have too many of formulae on our hands to conveniently handle.
4883 // Use a rough heuristic to thin out the list.
4884 LLVM_DEBUG(dbgs() << "The search space is too complex.\n");
4885
4886 // Pick the register which is used by the most LSRUses, which is likely
4887 // to be a good reuse register candidate.
4888 const SCEV *Best = nullptr;
4889 unsigned BestNum = 0;
4890 for (const SCEV *Reg : RegUses) {
4891 if (Taken.count(Reg))
4892 continue;
4893 if (!Best) {
4894 Best = Reg;
4895 BestNum = RegUses.getUsedByIndices(Reg).count();
4896 } else {
4897 unsigned Count = RegUses.getUsedByIndices(Reg).count();
4898 if (Count > BestNum) {
4899 Best = Reg;
4900 BestNum = Count;
4901 }
4902 }
4903 }
4904 assert(Best && "Failed to find best LSRUse candidate");
4905
4906 LLVM_DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
4907 << " will yield profitable reuse.\n");
4908 Taken.insert(Best);
4909
4910 // In any use with formulae which references this register, delete formulae
4911 // which don't reference it.
4912 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
4913 LSRUse &LU = Uses[LUIdx];
4914 if (!LU.Regs.count(Best)) continue;
4915
4916 bool Any = false;
4917 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
4918 Formula &F = LU.Formulae[i];
4919 if (!F.referencesReg(Best)) {
4920 LLVM_DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
4921 LU.DeleteFormula(F);
4922 --e;
4923 --i;
4924 Any = true;
4925 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?");
4926 continue;
4927 }
4928 }
4929
4930 if (Any)
4931 LU.RecomputeRegs(LUIdx, RegUses);
4932 }
4933
4934 LLVM_DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs()));
4935 }
4936 }
4937
4938 /// If there are an extraordinary number of formulae to choose from, use some
4939 /// rough heuristics to prune down the number of formulae. This keeps the main
4940 /// solver from taking an extraordinary amount of time in some worst-case
4941 /// scenarios.
NarrowSearchSpaceUsingHeuristics()4942 void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
4943 NarrowSearchSpaceByDetectingSupersets();
4944 NarrowSearchSpaceByCollapsingUnrolledCode();
4945 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
4946 if (FilterSameScaledReg)
4947 NarrowSearchSpaceByFilterFormulaWithSameScaledReg();
4948 NarrowSearchSpaceByFilterPostInc();
4949 if (LSRExpNarrow)
4950 NarrowSearchSpaceByDeletingCostlyFormulas();
4951 else
4952 NarrowSearchSpaceByPickingWinnerRegs();
4953 }
4954
4955 /// This is the recursive solver.
SolveRecurse(SmallVectorImpl<const Formula * > & Solution,Cost & SolutionCost,SmallVectorImpl<const Formula * > & Workspace,const Cost & CurCost,const SmallPtrSet<const SCEV *,16> & CurRegs,DenseSet<const SCEV * > & VisitedRegs) const4956 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
4957 Cost &SolutionCost,
4958 SmallVectorImpl<const Formula *> &Workspace,
4959 const Cost &CurCost,
4960 const SmallPtrSet<const SCEV *, 16> &CurRegs,
4961 DenseSet<const SCEV *> &VisitedRegs) const {
4962 // Some ideas:
4963 // - prune more:
4964 // - use more aggressive filtering
4965 // - sort the formula so that the most profitable solutions are found first
4966 // - sort the uses too
4967 // - search faster:
4968 // - don't compute a cost, and then compare. compare while computing a cost
4969 // and bail early.
4970 // - track register sets with SmallBitVector
4971
4972 const LSRUse &LU = Uses[Workspace.size()];
4973
4974 // If this use references any register that's already a part of the
4975 // in-progress solution, consider it a requirement that a formula must
4976 // reference that register in order to be considered. This prunes out
4977 // unprofitable searching.
4978 SmallSetVector<const SCEV *, 4> ReqRegs;
4979 for (const SCEV *S : CurRegs)
4980 if (LU.Regs.count(S))
4981 ReqRegs.insert(S);
4982
4983 SmallPtrSet<const SCEV *, 16> NewRegs;
4984 Cost NewCost(L, SE, TTI);
4985 for (const Formula &F : LU.Formulae) {
4986 // Ignore formulae which may not be ideal in terms of register reuse of
4987 // ReqRegs. The formula should use all required registers before
4988 // introducing new ones.
4989 // This can sometimes (notably when trying to favour postinc) lead to
4990 // sub-optimial decisions. There it is best left to the cost modelling to
4991 // get correct.
4992 if (!TTI.shouldFavorPostInc() || LU.Kind != LSRUse::Address) {
4993 int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
4994 for (const SCEV *Reg : ReqRegs) {
4995 if ((F.ScaledReg && F.ScaledReg == Reg) ||
4996 is_contained(F.BaseRegs, Reg)) {
4997 --NumReqRegsToFind;
4998 if (NumReqRegsToFind == 0)
4999 break;
5000 }
5001 }
5002 if (NumReqRegsToFind != 0) {
5003 // If none of the formulae satisfied the required registers, then we could
5004 // clear ReqRegs and try again. Currently, we simply give up in this case.
5005 continue;
5006 }
5007 }
5008
5009 // Evaluate the cost of the current formula. If it's already worse than
5010 // the current best, prune the search at that point.
5011 NewCost = CurCost;
5012 NewRegs = CurRegs;
5013 NewCost.RateFormula(F, NewRegs, VisitedRegs, LU);
5014 if (NewCost.isLess(SolutionCost)) {
5015 Workspace.push_back(&F);
5016 if (Workspace.size() != Uses.size()) {
5017 SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
5018 NewRegs, VisitedRegs);
5019 if (F.getNumRegs() == 1 && Workspace.size() == 1)
5020 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
5021 } else {
5022 LLVM_DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
5023 dbgs() << ".\nRegs:\n";
5024 for (const SCEV *S : NewRegs) dbgs()
5025 << "- " << *S << "\n";
5026 dbgs() << '\n');
5027
5028 SolutionCost = NewCost;
5029 Solution = Workspace;
5030 }
5031 Workspace.pop_back();
5032 }
5033 }
5034 }
5035
5036 /// Choose one formula from each use. Return the results in the given Solution
5037 /// vector.
Solve(SmallVectorImpl<const Formula * > & Solution) const5038 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
5039 SmallVector<const Formula *, 8> Workspace;
5040 Cost SolutionCost(L, SE, TTI);
5041 SolutionCost.Lose();
5042 Cost CurCost(L, SE, TTI);
5043 SmallPtrSet<const SCEV *, 16> CurRegs;
5044 DenseSet<const SCEV *> VisitedRegs;
5045 Workspace.reserve(Uses.size());
5046
5047 // SolveRecurse does all the work.
5048 SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
5049 CurRegs, VisitedRegs);
5050 if (Solution.empty()) {
5051 LLVM_DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
5052 return;
5053 }
5054
5055 // Ok, we've now made all our decisions.
5056 LLVM_DEBUG(dbgs() << "\n"
5057 "The chosen solution requires ";
5058 SolutionCost.print(dbgs()); dbgs() << ":\n";
5059 for (size_t i = 0, e = Uses.size(); i != e; ++i) {
5060 dbgs() << " ";
5061 Uses[i].print(dbgs());
5062 dbgs() << "\n"
5063 " ";
5064 Solution[i]->print(dbgs());
5065 dbgs() << '\n';
5066 });
5067
5068 assert(Solution.size() == Uses.size() && "Malformed solution!");
5069 }
5070
5071 /// Helper for AdjustInsertPositionForExpand. Climb up the dominator tree far as
5072 /// we can go while still being dominated by the input positions. This helps
5073 /// canonicalize the insert position, which encourages sharing.
5074 BasicBlock::iterator
HoistInsertPosition(BasicBlock::iterator IP,const SmallVectorImpl<Instruction * > & Inputs) const5075 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
5076 const SmallVectorImpl<Instruction *> &Inputs)
5077 const {
5078 Instruction *Tentative = &*IP;
5079 while (true) {
5080 bool AllDominate = true;
5081 Instruction *BetterPos = nullptr;
5082 // Don't bother attempting to insert before a catchswitch, their basic block
5083 // cannot have other non-PHI instructions.
5084 if (isa<CatchSwitchInst>(Tentative))
5085 return IP;
5086
5087 for (Instruction *Inst : Inputs) {
5088 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
5089 AllDominate = false;
5090 break;
5091 }
5092 // Attempt to find an insert position in the middle of the block,
5093 // instead of at the end, so that it can be used for other expansions.
5094 if (Tentative->getParent() == Inst->getParent() &&
5095 (!BetterPos || !DT.dominates(Inst, BetterPos)))
5096 BetterPos = &*std::next(BasicBlock::iterator(Inst));
5097 }
5098 if (!AllDominate)
5099 break;
5100 if (BetterPos)
5101 IP = BetterPos->getIterator();
5102 else
5103 IP = Tentative->getIterator();
5104
5105 const Loop *IPLoop = LI.getLoopFor(IP->getParent());
5106 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
5107
5108 BasicBlock *IDom;
5109 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
5110 if (!Rung) return IP;
5111 Rung = Rung->getIDom();
5112 if (!Rung) return IP;
5113 IDom = Rung->getBlock();
5114
5115 // Don't climb into a loop though.
5116 const Loop *IDomLoop = LI.getLoopFor(IDom);
5117 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
5118 if (IDomDepth <= IPLoopDepth &&
5119 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
5120 break;
5121 }
5122
5123 Tentative = IDom->getTerminator();
5124 }
5125
5126 return IP;
5127 }
5128
5129 /// Determine an input position which will be dominated by the operands and
5130 /// which will dominate the result.
5131 BasicBlock::iterator
AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,const LSRFixup & LF,const LSRUse & LU,SCEVExpander & Rewriter) const5132 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP,
5133 const LSRFixup &LF,
5134 const LSRUse &LU,
5135 SCEVExpander &Rewriter) const {
5136 // Collect some instructions which must be dominated by the
5137 // expanding replacement. These must be dominated by any operands that
5138 // will be required in the expansion.
5139 SmallVector<Instruction *, 4> Inputs;
5140 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
5141 Inputs.push_back(I);
5142 if (LU.Kind == LSRUse::ICmpZero)
5143 if (Instruction *I =
5144 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
5145 Inputs.push_back(I);
5146 if (LF.PostIncLoops.count(L)) {
5147 if (LF.isUseFullyOutsideLoop(L))
5148 Inputs.push_back(L->getLoopLatch()->getTerminator());
5149 else
5150 Inputs.push_back(IVIncInsertPos);
5151 }
5152 // The expansion must also be dominated by the increment positions of any
5153 // loops it for which it is using post-inc mode.
5154 for (const Loop *PIL : LF.PostIncLoops) {
5155 if (PIL == L) continue;
5156
5157 // Be dominated by the loop exit.
5158 SmallVector<BasicBlock *, 4> ExitingBlocks;
5159 PIL->getExitingBlocks(ExitingBlocks);
5160 if (!ExitingBlocks.empty()) {
5161 BasicBlock *BB = ExitingBlocks[0];
5162 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
5163 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
5164 Inputs.push_back(BB->getTerminator());
5165 }
5166 }
5167
5168 assert(!isa<PHINode>(LowestIP) && !LowestIP->isEHPad()
5169 && !isa<DbgInfoIntrinsic>(LowestIP) &&
5170 "Insertion point must be a normal instruction");
5171
5172 // Then, climb up the immediate dominator tree as far as we can go while
5173 // still being dominated by the input positions.
5174 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs);
5175
5176 // Don't insert instructions before PHI nodes.
5177 while (isa<PHINode>(IP)) ++IP;
5178
5179 // Ignore landingpad instructions.
5180 while (IP->isEHPad()) ++IP;
5181
5182 // Ignore debug intrinsics.
5183 while (isa<DbgInfoIntrinsic>(IP)) ++IP;
5184
5185 // Set IP below instructions recently inserted by SCEVExpander. This keeps the
5186 // IP consistent across expansions and allows the previously inserted
5187 // instructions to be reused by subsequent expansion.
5188 while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP)
5189 ++IP;
5190
5191 return IP;
5192 }
5193
5194 /// Emit instructions for the leading candidate expression for this LSRUse (this
5195 /// is called "expanding").
Expand(const LSRUse & LU,const LSRFixup & LF,const Formula & F,BasicBlock::iterator IP,SCEVExpander & Rewriter,SmallVectorImpl<WeakTrackingVH> & DeadInsts) const5196 Value *LSRInstance::Expand(const LSRUse &LU, const LSRFixup &LF,
5197 const Formula &F, BasicBlock::iterator IP,
5198 SCEVExpander &Rewriter,
5199 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5200 if (LU.RigidFormula)
5201 return LF.OperandValToReplace;
5202
5203 // Determine an input position which will be dominated by the operands and
5204 // which will dominate the result.
5205 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter);
5206 Rewriter.setInsertPoint(&*IP);
5207
5208 // Inform the Rewriter if we have a post-increment use, so that it can
5209 // perform an advantageous expansion.
5210 Rewriter.setPostInc(LF.PostIncLoops);
5211
5212 // This is the type that the user actually needs.
5213 Type *OpTy = LF.OperandValToReplace->getType();
5214 // This will be the type that we'll initially expand to.
5215 Type *Ty = F.getType();
5216 if (!Ty)
5217 // No type known; just expand directly to the ultimate type.
5218 Ty = OpTy;
5219 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
5220 // Expand directly to the ultimate type if it's the right size.
5221 Ty = OpTy;
5222 // This is the type to do integer arithmetic in.
5223 Type *IntTy = SE.getEffectiveSCEVType(Ty);
5224
5225 // Build up a list of operands to add together to form the full base.
5226 SmallVector<const SCEV *, 8> Ops;
5227
5228 // Expand the BaseRegs portion.
5229 for (const SCEV *Reg : F.BaseRegs) {
5230 assert(!Reg->isZero() && "Zero allocated in a base register!");
5231
5232 // If we're expanding for a post-inc user, make the post-inc adjustment.
5233 Reg = denormalizeForPostIncUse(Reg, LF.PostIncLoops, SE);
5234 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr)));
5235 }
5236
5237 // Expand the ScaledReg portion.
5238 Value *ICmpScaledV = nullptr;
5239 if (F.Scale != 0) {
5240 const SCEV *ScaledS = F.ScaledReg;
5241
5242 // If we're expanding for a post-inc user, make the post-inc adjustment.
5243 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
5244 ScaledS = denormalizeForPostIncUse(ScaledS, Loops, SE);
5245
5246 if (LU.Kind == LSRUse::ICmpZero) {
5247 // Expand ScaleReg as if it was part of the base regs.
5248 if (F.Scale == 1)
5249 Ops.push_back(
5250 SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr)));
5251 else {
5252 // An interesting way of "folding" with an icmp is to use a negated
5253 // scale, which we'll implement by inserting it into the other operand
5254 // of the icmp.
5255 assert(F.Scale == -1 &&
5256 "The only scale supported by ICmpZero uses is -1!");
5257 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr);
5258 }
5259 } else {
5260 // Otherwise just expand the scaled register and an explicit scale,
5261 // which is expected to be matched as part of the address.
5262
5263 // Flush the operand list to suppress SCEVExpander hoisting address modes.
5264 // Unless the addressing mode will not be folded.
5265 if (!Ops.empty() && LU.Kind == LSRUse::Address &&
5266 isAMCompletelyFolded(TTI, LU, F)) {
5267 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr);
5268 Ops.clear();
5269 Ops.push_back(SE.getUnknown(FullV));
5270 }
5271 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr));
5272 if (F.Scale != 1)
5273 ScaledS =
5274 SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale));
5275 Ops.push_back(ScaledS);
5276 }
5277 }
5278
5279 // Expand the GV portion.
5280 if (F.BaseGV) {
5281 // Flush the operand list to suppress SCEVExpander hoisting.
5282 if (!Ops.empty()) {
5283 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty);
5284 Ops.clear();
5285 Ops.push_back(SE.getUnknown(FullV));
5286 }
5287 Ops.push_back(SE.getUnknown(F.BaseGV));
5288 }
5289
5290 // Flush the operand list to suppress SCEVExpander hoisting of both folded and
5291 // unfolded offsets. LSR assumes they both live next to their uses.
5292 if (!Ops.empty()) {
5293 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty);
5294 Ops.clear();
5295 Ops.push_back(SE.getUnknown(FullV));
5296 }
5297
5298 // Expand the immediate portion.
5299 int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset;
5300 if (Offset != 0) {
5301 if (LU.Kind == LSRUse::ICmpZero) {
5302 // The other interesting way of "folding" with an ICmpZero is to use a
5303 // negated immediate.
5304 if (!ICmpScaledV)
5305 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset);
5306 else {
5307 Ops.push_back(SE.getUnknown(ICmpScaledV));
5308 ICmpScaledV = ConstantInt::get(IntTy, Offset);
5309 }
5310 } else {
5311 // Just add the immediate values. These again are expected to be matched
5312 // as part of the address.
5313 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset)));
5314 }
5315 }
5316
5317 // Expand the unfolded offset portion.
5318 int64_t UnfoldedOffset = F.UnfoldedOffset;
5319 if (UnfoldedOffset != 0) {
5320 // Just add the immediate values.
5321 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy,
5322 UnfoldedOffset)));
5323 }
5324
5325 // Emit instructions summing all the operands.
5326 const SCEV *FullS = Ops.empty() ?
5327 SE.getConstant(IntTy, 0) :
5328 SE.getAddExpr(Ops);
5329 Value *FullV = Rewriter.expandCodeFor(FullS, Ty);
5330
5331 // We're done expanding now, so reset the rewriter.
5332 Rewriter.clearPostInc();
5333
5334 // An ICmpZero Formula represents an ICmp which we're handling as a
5335 // comparison against zero. Now that we've expanded an expression for that
5336 // form, update the ICmp's other operand.
5337 if (LU.Kind == LSRUse::ICmpZero) {
5338 ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
5339 if (auto *OperandIsInstr = dyn_cast<Instruction>(CI->getOperand(1)))
5340 DeadInsts.emplace_back(OperandIsInstr);
5341 assert(!F.BaseGV && "ICmp does not support folding a global value and "
5342 "a scale at the same time!");
5343 if (F.Scale == -1) {
5344 if (ICmpScaledV->getType() != OpTy) {
5345 Instruction *Cast =
5346 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
5347 OpTy, false),
5348 ICmpScaledV, OpTy, "tmp", CI);
5349 ICmpScaledV = Cast;
5350 }
5351 CI->setOperand(1, ICmpScaledV);
5352 } else {
5353 // A scale of 1 means that the scale has been expanded as part of the
5354 // base regs.
5355 assert((F.Scale == 0 || F.Scale == 1) &&
5356 "ICmp does not support folding a global value and "
5357 "a scale at the same time!");
5358 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
5359 -(uint64_t)Offset);
5360 if (C->getType() != OpTy)
5361 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
5362 OpTy, false),
5363 C, OpTy);
5364
5365 CI->setOperand(1, C);
5366 }
5367 }
5368
5369 return FullV;
5370 }
5371
5372 /// Helper for Rewrite. PHI nodes are special because the use of their operands
5373 /// effectively happens in their predecessor blocks, so the expression may need
5374 /// to be expanded in multiple places.
RewriteForPHI(PHINode * PN,const LSRUse & LU,const LSRFixup & LF,const Formula & F,SCEVExpander & Rewriter,SmallVectorImpl<WeakTrackingVH> & DeadInsts) const5375 void LSRInstance::RewriteForPHI(
5376 PHINode *PN, const LSRUse &LU, const LSRFixup &LF, const Formula &F,
5377 SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5378 DenseMap<BasicBlock *, Value *> Inserted;
5379 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5380 if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
5381 bool needUpdateFixups = false;
5382 BasicBlock *BB = PN->getIncomingBlock(i);
5383
5384 // If this is a critical edge, split the edge so that we do not insert
5385 // the code on all predecessor/successor paths. We do this unless this
5386 // is the canonical backedge for this loop, which complicates post-inc
5387 // users.
5388 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
5389 !isa<IndirectBrInst>(BB->getTerminator()) &&
5390 !isa<CatchSwitchInst>(BB->getTerminator())) {
5391 BasicBlock *Parent = PN->getParent();
5392 Loop *PNLoop = LI.getLoopFor(Parent);
5393 if (!PNLoop || Parent != PNLoop->getHeader()) {
5394 // Split the critical edge.
5395 BasicBlock *NewBB = nullptr;
5396 if (!Parent->isLandingPad()) {
5397 NewBB =
5398 SplitCriticalEdge(BB, Parent,
5399 CriticalEdgeSplittingOptions(&DT, &LI, MSSAU)
5400 .setMergeIdenticalEdges()
5401 .setKeepOneInputPHIs());
5402 } else {
5403 SmallVector<BasicBlock*, 2> NewBBs;
5404 SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, &DT, &LI);
5405 NewBB = NewBBs[0];
5406 }
5407 // If NewBB==NULL, then SplitCriticalEdge refused to split because all
5408 // phi predecessors are identical. The simple thing to do is skip
5409 // splitting in this case rather than complicate the API.
5410 if (NewBB) {
5411 // If PN is outside of the loop and BB is in the loop, we want to
5412 // move the block to be immediately before the PHI block, not
5413 // immediately after BB.
5414 if (L->contains(BB) && !L->contains(PN))
5415 NewBB->moveBefore(PN->getParent());
5416
5417 // Splitting the edge can reduce the number of PHI entries we have.
5418 e = PN->getNumIncomingValues();
5419 BB = NewBB;
5420 i = PN->getBasicBlockIndex(BB);
5421
5422 needUpdateFixups = true;
5423 }
5424 }
5425 }
5426
5427 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
5428 Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr)));
5429 if (!Pair.second)
5430 PN->setIncomingValue(i, Pair.first->second);
5431 else {
5432 Value *FullV = Expand(LU, LF, F, BB->getTerminator()->getIterator(),
5433 Rewriter, DeadInsts);
5434
5435 // If this is reuse-by-noop-cast, insert the noop cast.
5436 Type *OpTy = LF.OperandValToReplace->getType();
5437 if (FullV->getType() != OpTy)
5438 FullV =
5439 CastInst::Create(CastInst::getCastOpcode(FullV, false,
5440 OpTy, false),
5441 FullV, LF.OperandValToReplace->getType(),
5442 "tmp", BB->getTerminator());
5443
5444 PN->setIncomingValue(i, FullV);
5445 Pair.first->second = FullV;
5446 }
5447
5448 // If LSR splits critical edge and phi node has other pending
5449 // fixup operands, we need to update those pending fixups. Otherwise
5450 // formulae will not be implemented completely and some instructions
5451 // will not be eliminated.
5452 if (needUpdateFixups) {
5453 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx)
5454 for (LSRFixup &Fixup : Uses[LUIdx].Fixups)
5455 // If fixup is supposed to rewrite some operand in the phi
5456 // that was just updated, it may be already moved to
5457 // another phi node. Such fixup requires update.
5458 if (Fixup.UserInst == PN) {
5459 // Check if the operand we try to replace still exists in the
5460 // original phi.
5461 bool foundInOriginalPHI = false;
5462 for (const auto &val : PN->incoming_values())
5463 if (val == Fixup.OperandValToReplace) {
5464 foundInOriginalPHI = true;
5465 break;
5466 }
5467
5468 // If fixup operand found in original PHI - nothing to do.
5469 if (foundInOriginalPHI)
5470 continue;
5471
5472 // Otherwise it might be moved to another PHI and requires update.
5473 // If fixup operand not found in any of the incoming blocks that
5474 // means we have already rewritten it - nothing to do.
5475 for (const auto &Block : PN->blocks())
5476 for (BasicBlock::iterator I = Block->begin(); isa<PHINode>(I);
5477 ++I) {
5478 PHINode *NewPN = cast<PHINode>(I);
5479 for (const auto &val : NewPN->incoming_values())
5480 if (val == Fixup.OperandValToReplace)
5481 Fixup.UserInst = NewPN;
5482 }
5483 }
5484 }
5485 }
5486 }
5487
5488 /// Emit instructions for the leading candidate expression for this LSRUse (this
5489 /// is called "expanding"), and update the UserInst to reference the newly
5490 /// expanded value.
Rewrite(const LSRUse & LU,const LSRFixup & LF,const Formula & F,SCEVExpander & Rewriter,SmallVectorImpl<WeakTrackingVH> & DeadInsts) const5491 void LSRInstance::Rewrite(const LSRUse &LU, const LSRFixup &LF,
5492 const Formula &F, SCEVExpander &Rewriter,
5493 SmallVectorImpl<WeakTrackingVH> &DeadInsts) const {
5494 // First, find an insertion point that dominates UserInst. For PHI nodes,
5495 // find the nearest block which dominates all the relevant uses.
5496 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
5497 RewriteForPHI(PN, LU, LF, F, Rewriter, DeadInsts);
5498 } else {
5499 Value *FullV =
5500 Expand(LU, LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts);
5501
5502 // If this is reuse-by-noop-cast, insert the noop cast.
5503 Type *OpTy = LF.OperandValToReplace->getType();
5504 if (FullV->getType() != OpTy) {
5505 Instruction *Cast =
5506 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
5507 FullV, OpTy, "tmp", LF.UserInst);
5508 FullV = Cast;
5509 }
5510
5511 // Update the user. ICmpZero is handled specially here (for now) because
5512 // Expand may have updated one of the operands of the icmp already, and
5513 // its new value may happen to be equal to LF.OperandValToReplace, in
5514 // which case doing replaceUsesOfWith leads to replacing both operands
5515 // with the same value. TODO: Reorganize this.
5516 if (LU.Kind == LSRUse::ICmpZero)
5517 LF.UserInst->setOperand(0, FullV);
5518 else
5519 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
5520 }
5521
5522 if (auto *OperandIsInstr = dyn_cast<Instruction>(LF.OperandValToReplace))
5523 DeadInsts.emplace_back(OperandIsInstr);
5524 }
5525
5526 /// Rewrite all the fixup locations with new values, following the chosen
5527 /// solution.
ImplementSolution(const SmallVectorImpl<const Formula * > & Solution)5528 void LSRInstance::ImplementSolution(
5529 const SmallVectorImpl<const Formula *> &Solution) {
5530 // Keep track of instructions we may have made dead, so that
5531 // we can remove them after we are done working.
5532 SmallVector<WeakTrackingVH, 16> DeadInsts;
5533
5534 SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr",
5535 false);
5536 #ifndef NDEBUG
5537 Rewriter.setDebugType(DEBUG_TYPE);
5538 #endif
5539 Rewriter.disableCanonicalMode();
5540 Rewriter.enableLSRMode();
5541 Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
5542
5543 // Mark phi nodes that terminate chains so the expander tries to reuse them.
5544 for (const IVChain &Chain : IVChainVec) {
5545 if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst()))
5546 Rewriter.setChainedPhi(PN);
5547 }
5548
5549 // Expand the new value definitions and update the users.
5550 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx)
5551 for (const LSRFixup &Fixup : Uses[LUIdx].Fixups) {
5552 Rewrite(Uses[LUIdx], Fixup, *Solution[LUIdx], Rewriter, DeadInsts);
5553 Changed = true;
5554 }
5555
5556 for (const IVChain &Chain : IVChainVec) {
5557 GenerateIVChain(Chain, Rewriter, DeadInsts);
5558 Changed = true;
5559 }
5560 // Clean up after ourselves. This must be done before deleting any
5561 // instructions.
5562 Rewriter.clear();
5563
5564 Changed |= RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts,
5565 &TLI, MSSAU);
5566 }
5567
LSRInstance(Loop * L,IVUsers & IU,ScalarEvolution & SE,DominatorTree & DT,LoopInfo & LI,const TargetTransformInfo & TTI,AssumptionCache & AC,TargetLibraryInfo & TLI,MemorySSAUpdater * MSSAU)5568 LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
5569 DominatorTree &DT, LoopInfo &LI,
5570 const TargetTransformInfo &TTI, AssumptionCache &AC,
5571 TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU)
5572 : IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L),
5573 MSSAU(MSSAU), FavorBackedgeIndex(EnableBackedgeIndexing &&
5574 TTI.shouldFavorBackedgeIndex(L)) {
5575 // If LoopSimplify form is not available, stay out of trouble.
5576 if (!L->isLoopSimplifyForm())
5577 return;
5578
5579 // If there's no interesting work to be done, bail early.
5580 if (IU.empty()) return;
5581
5582 // If there's too much analysis to be done, bail early. We won't be able to
5583 // model the problem anyway.
5584 unsigned NumUsers = 0;
5585 for (const IVStrideUse &U : IU) {
5586 if (++NumUsers > MaxIVUsers) {
5587 (void)U;
5588 LLVM_DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U
5589 << "\n");
5590 return;
5591 }
5592 // Bail out if we have a PHI on an EHPad that gets a value from a
5593 // CatchSwitchInst. Because the CatchSwitchInst cannot be split, there is
5594 // no good place to stick any instructions.
5595 if (auto *PN = dyn_cast<PHINode>(U.getUser())) {
5596 auto *FirstNonPHI = PN->getParent()->getFirstNonPHI();
5597 if (isa<FuncletPadInst>(FirstNonPHI) ||
5598 isa<CatchSwitchInst>(FirstNonPHI))
5599 for (BasicBlock *PredBB : PN->blocks())
5600 if (isa<CatchSwitchInst>(PredBB->getFirstNonPHI()))
5601 return;
5602 }
5603 }
5604
5605 #ifndef NDEBUG
5606 // All dominating loops must have preheaders, or SCEVExpander may not be able
5607 // to materialize an AddRecExpr whose Start is an outer AddRecExpr.
5608 //
5609 // IVUsers analysis should only create users that are dominated by simple loop
5610 // headers. Since this loop should dominate all of its users, its user list
5611 // should be empty if this loop itself is not within a simple loop nest.
5612 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader());
5613 Rung; Rung = Rung->getIDom()) {
5614 BasicBlock *BB = Rung->getBlock();
5615 const Loop *DomLoop = LI.getLoopFor(BB);
5616 if (DomLoop && DomLoop->getHeader() == BB) {
5617 assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest");
5618 }
5619 }
5620 #endif // DEBUG
5621
5622 LLVM_DEBUG(dbgs() << "\nLSR on loop ";
5623 L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false);
5624 dbgs() << ":\n");
5625
5626 // First, perform some low-level loop optimizations.
5627 OptimizeShadowIV();
5628 OptimizeLoopTermCond();
5629
5630 // If loop preparation eliminates all interesting IV users, bail.
5631 if (IU.empty()) return;
5632
5633 // Skip nested loops until we can model them better with formulae.
5634 if (!L->isInnermost()) {
5635 LLVM_DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
5636 return;
5637 }
5638
5639 // Start collecting data and preparing for the solver.
5640 CollectChains();
5641 CollectInterestingTypesAndFactors();
5642 CollectFixupsAndInitialFormulae();
5643 CollectLoopInvariantFixupsAndFormulae();
5644
5645 if (Uses.empty())
5646 return;
5647
5648 LLVM_DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
5649 print_uses(dbgs()));
5650
5651 // Now use the reuse data to generate a bunch of interesting ways
5652 // to formulate the values needed for the uses.
5653 GenerateAllReuseFormulae();
5654
5655 FilterOutUndesirableDedicatedRegisters();
5656 NarrowSearchSpaceUsingHeuristics();
5657
5658 SmallVector<const Formula *, 8> Solution;
5659 Solve(Solution);
5660
5661 // Release memory that is no longer needed.
5662 Factors.clear();
5663 Types.clear();
5664 RegUses.clear();
5665
5666 if (Solution.empty())
5667 return;
5668
5669 #ifndef NDEBUG
5670 // Formulae should be legal.
5671 for (const LSRUse &LU : Uses) {
5672 for (const Formula &F : LU.Formulae)
5673 assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy,
5674 F) && "Illegal formula generated!");
5675 };
5676 #endif
5677
5678 // Now that we've decided what we want, make it so.
5679 ImplementSolution(Solution);
5680 }
5681
5682 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
print_factors_and_types(raw_ostream & OS) const5683 void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
5684 if (Factors.empty() && Types.empty()) return;
5685
5686 OS << "LSR has identified the following interesting factors and types: ";
5687 bool First = true;
5688
5689 for (int64_t Factor : Factors) {
5690 if (!First) OS << ", ";
5691 First = false;
5692 OS << '*' << Factor;
5693 }
5694
5695 for (Type *Ty : Types) {
5696 if (!First) OS << ", ";
5697 First = false;
5698 OS << '(' << *Ty << ')';
5699 }
5700 OS << '\n';
5701 }
5702
print_fixups(raw_ostream & OS) const5703 void LSRInstance::print_fixups(raw_ostream &OS) const {
5704 OS << "LSR is examining the following fixup sites:\n";
5705 for (const LSRUse &LU : Uses)
5706 for (const LSRFixup &LF : LU.Fixups) {
5707 dbgs() << " ";
5708 LF.print(OS);
5709 OS << '\n';
5710 }
5711 }
5712
print_uses(raw_ostream & OS) const5713 void LSRInstance::print_uses(raw_ostream &OS) const {
5714 OS << "LSR is examining the following uses:\n";
5715 for (const LSRUse &LU : Uses) {
5716 dbgs() << " ";
5717 LU.print(OS);
5718 OS << '\n';
5719 for (const Formula &F : LU.Formulae) {
5720 OS << " ";
5721 F.print(OS);
5722 OS << '\n';
5723 }
5724 }
5725 }
5726
print(raw_ostream & OS) const5727 void LSRInstance::print(raw_ostream &OS) const {
5728 print_factors_and_types(OS);
5729 print_fixups(OS);
5730 print_uses(OS);
5731 }
5732
dump() const5733 LLVM_DUMP_METHOD void LSRInstance::dump() const {
5734 print(errs()); errs() << '\n';
5735 }
5736 #endif
5737
5738 namespace {
5739
5740 class LoopStrengthReduce : public LoopPass {
5741 public:
5742 static char ID; // Pass ID, replacement for typeid
5743
5744 LoopStrengthReduce();
5745
5746 private:
5747 bool runOnLoop(Loop *L, LPPassManager &LPM) override;
5748 void getAnalysisUsage(AnalysisUsage &AU) const override;
5749 };
5750
5751 } // end anonymous namespace
5752
LoopStrengthReduce()5753 LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) {
5754 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
5755 }
5756
getAnalysisUsage(AnalysisUsage & AU) const5757 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
5758 // We split critical edges, so we change the CFG. However, we do update
5759 // many analyses if they are around.
5760 AU.addPreservedID(LoopSimplifyID);
5761
5762 AU.addRequired<LoopInfoWrapperPass>();
5763 AU.addPreserved<LoopInfoWrapperPass>();
5764 AU.addRequiredID(LoopSimplifyID);
5765 AU.addRequired<DominatorTreeWrapperPass>();
5766 AU.addPreserved<DominatorTreeWrapperPass>();
5767 AU.addRequired<ScalarEvolutionWrapperPass>();
5768 AU.addPreserved<ScalarEvolutionWrapperPass>();
5769 AU.addRequired<AssumptionCacheTracker>();
5770 AU.addRequired<TargetLibraryInfoWrapperPass>();
5771 // Requiring LoopSimplify a second time here prevents IVUsers from running
5772 // twice, since LoopSimplify was invalidated by running ScalarEvolution.
5773 AU.addRequiredID(LoopSimplifyID);
5774 AU.addRequired<IVUsersWrapperPass>();
5775 AU.addPreserved<IVUsersWrapperPass>();
5776 AU.addRequired<TargetTransformInfoWrapperPass>();
5777 AU.addPreserved<MemorySSAWrapperPass>();
5778 }
5779
ReduceLoopStrength(Loop * L,IVUsers & IU,ScalarEvolution & SE,DominatorTree & DT,LoopInfo & LI,const TargetTransformInfo & TTI,AssumptionCache & AC,TargetLibraryInfo & TLI,MemorySSA * MSSA)5780 static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE,
5781 DominatorTree &DT, LoopInfo &LI,
5782 const TargetTransformInfo &TTI,
5783 AssumptionCache &AC, TargetLibraryInfo &TLI,
5784 MemorySSA *MSSA) {
5785
5786 bool Changed = false;
5787 std::unique_ptr<MemorySSAUpdater> MSSAU;
5788 if (MSSA)
5789 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
5790
5791 // Debug preservation - record all llvm.dbg.value from the loop as well as
5792 // the SCEV of their variable location. Since salvageDebugInfo may change the
5793 // DIExpression we need to store the original here as well (i.e. it needs to
5794 // be in sync with the SCEV).
5795 SmallVector<
5796 std::tuple<DbgValueInst *, const Type *, const SCEV *, DIExpression *>,
5797 32>
5798 DbgValues;
5799 for (auto &B : L->getBlocks()) {
5800 for (auto &I : *B) {
5801 if (DbgValueInst *D = dyn_cast<DbgValueInst>(&I)) {
5802 auto V = D->getVariableLocation();
5803 if (!V || !SE.isSCEVable(V->getType()))
5804 continue;
5805 auto DS = SE.getSCEV(V);
5806 DbgValues.push_back(
5807 std::make_tuple(D, V->getType(), DS, D->getExpression()));
5808 }
5809 }
5810 }
5811
5812 // Run the main LSR transformation.
5813 Changed |=
5814 LSRInstance(L, IU, SE, DT, LI, TTI, AC, TLI, MSSAU.get()).getChanged();
5815
5816 // Remove any extra phis created by processing inner loops.
5817 Changed |= DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
5818 if (EnablePhiElim && L->isLoopSimplifyForm()) {
5819 SmallVector<WeakTrackingVH, 16> DeadInsts;
5820 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
5821 SCEVExpander Rewriter(SE, DL, "lsr", false);
5822 #ifndef NDEBUG
5823 Rewriter.setDebugType(DEBUG_TYPE);
5824 #endif
5825 unsigned numFolded = Rewriter.replaceCongruentIVs(L, &DT, DeadInsts, &TTI);
5826 if (numFolded) {
5827 Changed = true;
5828 RecursivelyDeleteTriviallyDeadInstructionsPermissive(DeadInsts, &TLI,
5829 MSSAU.get());
5830 DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
5831 }
5832 }
5833 // Debug preservation - go through all recorded llvm.dbg.value and for those
5834 // that now have an undef variable location use the recorded SCEV to try and
5835 // update it. Compare with SCEV of Phi-nodes of loop header to find a
5836 // suitable update candidate. SCEV match with constant offset is allowed and
5837 // will be compensated for in the DIExpression.
5838 if (Changed) {
5839 for (auto &D : DbgValues) {
5840 auto DbgValue = std::get<DbgValueInst *>(D);
5841 auto DbgValueType = std::get<const Type *>(D);
5842 auto DbgValueSCEV = std::get<const SCEV *>(D);
5843 auto DbgDIExpr = std::get<DIExpression *>(D);
5844 if (!isa<UndefValue>(DbgValue->getVariableLocation()))
5845 continue;
5846 for (PHINode &Phi : L->getHeader()->phis()) {
5847 if (DbgValueType != Phi.getType())
5848 continue;
5849 if (!SE.isSCEVable(Phi.getType()))
5850 continue;
5851 auto PhiSCEV = SE.getSCEV(&Phi);
5852 if (Optional<APInt> Offset =
5853 SE.computeConstantDifference(DbgValueSCEV, PhiSCEV)) {
5854 auto &Ctx = DbgValue->getContext();
5855 DbgValue->setOperand(
5856 0, MetadataAsValue::get(Ctx, ValueAsMetadata::get(&Phi)));
5857 if (Offset.getValue().getSExtValue()) {
5858 SmallVector<uint64_t, 8> Ops;
5859 DIExpression::appendOffset(Ops, Offset.getValue().getSExtValue());
5860 DbgDIExpr = DIExpression::prependOpcodes(DbgDIExpr, Ops, true);
5861 }
5862 DbgValue->setOperand(2, MetadataAsValue::get(Ctx, DbgDIExpr));
5863 }
5864 }
5865 }
5866 }
5867 return Changed;
5868 }
5869
runOnLoop(Loop * L,LPPassManager &)5870 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
5871 if (skipLoop(L))
5872 return false;
5873
5874 auto &IU = getAnalysis<IVUsersWrapperPass>().getIU();
5875 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5876 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5877 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5878 const auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
5879 *L->getHeader()->getParent());
5880 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
5881 *L->getHeader()->getParent());
5882 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
5883 *L->getHeader()->getParent());
5884 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
5885 MemorySSA *MSSA = nullptr;
5886 if (MSSAAnalysis)
5887 MSSA = &MSSAAnalysis->getMSSA();
5888 return ReduceLoopStrength(L, IU, SE, DT, LI, TTI, AC, TLI, MSSA);
5889 }
5890
run(Loop & L,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)5891 PreservedAnalyses LoopStrengthReducePass::run(Loop &L, LoopAnalysisManager &AM,
5892 LoopStandardAnalysisResults &AR,
5893 LPMUpdater &) {
5894 if (!ReduceLoopStrength(&L, AM.getResult<IVUsersAnalysis>(L, AR), AR.SE,
5895 AR.DT, AR.LI, AR.TTI, AR.AC, AR.TLI, AR.MSSA))
5896 return PreservedAnalyses::all();
5897
5898 auto PA = getLoopPassPreservedAnalyses();
5899 if (AR.MSSA)
5900 PA.preserve<MemorySSAAnalysis>();
5901 return PA;
5902 }
5903
5904 char LoopStrengthReduce::ID = 0;
5905
5906 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
5907 "Loop Strength Reduction", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)5908 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
5909 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
5910 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
5911 INITIALIZE_PASS_DEPENDENCY(IVUsersWrapperPass)
5912 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
5913 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
5914 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
5915 "Loop Strength Reduction", false, false)
5916
5917 Pass *llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); }
5918