1 //===- LoopFlatten.cpp - Loop flattening pass------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass flattens pairs nested loops into a single loop.
10 //
11 // The intention is to optimise loop nests like this, which together access an
12 // array linearly:
13 //
14 //   for (int i = 0; i < N; ++i)
15 //     for (int j = 0; j < M; ++j)
16 //       f(A[i*M+j]);
17 //
18 // into one loop:
19 //
20 //   for (int i = 0; i < (N*M); ++i)
21 //     f(A[i]);
22 //
23 // It can also flatten loops where the induction variables are not used in the
24 // loop. This is only worth doing if the induction variables are only used in an
25 // expression like i*M+j. If they had any other uses, we would have to insert a
26 // div/mod to reconstruct the original values, so this wouldn't be profitable.
27 //
28 // We also need to prove that N*M will not overflow. The preferred solution is
29 // to widen the IV, which avoids overflow checks, so that is tried first. If
30 // the IV cannot be widened, then we try to determine that this new tripcount
31 // expression won't overflow.
32 //
33 // Q: Does LoopFlatten use SCEV?
34 // Short answer: Yes and no.
35 //
36 // Long answer:
37 // For this transformation to be valid, we require all uses of the induction
38 // variables to be linear expressions of the form i*M+j. The different Loop
39 // APIs are used to get some loop components like the induction variable,
40 // compare statement, etc. In addition, we do some pattern matching to find the
41 // linear expressions and other loop components like the loop increment. The
42 // latter are examples of expressions that do use the induction variable, but
43 // are safe to ignore when we check all uses to be of the form i*M+j. We keep
44 // track of all of this in bookkeeping struct FlattenInfo.
45 // We assume the loops to be canonical, i.e. starting at 0 and increment with
46 // 1. This makes RHS of the compare the loop tripcount (with the right
47 // predicate). We use SCEV to then sanity check that this tripcount matches
48 // with the tripcount as computed by SCEV.
49 //
50 //===----------------------------------------------------------------------===//
51 
52 #include "llvm/Transforms/Scalar/LoopFlatten.h"
53 
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/AssumptionCache.h"
56 #include "llvm/Analysis/LoopInfo.h"
57 #include "llvm/Analysis/LoopNestAnalysis.h"
58 #include "llvm/Analysis/MemorySSAUpdater.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/Analysis/TargetTransformInfo.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Dominators.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/IRBuilder.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/InitializePasses.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Debug.h"
71 #include "llvm/Support/raw_ostream.h"
72 #include "llvm/Transforms/Scalar.h"
73 #include "llvm/Transforms/Scalar/LoopPassManager.h"
74 #include "llvm/Transforms/Utils/Local.h"
75 #include "llvm/Transforms/Utils/LoopUtils.h"
76 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
77 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
78 #include <optional>
79 
80 using namespace llvm;
81 using namespace llvm::PatternMatch;
82 
83 #define DEBUG_TYPE "loop-flatten"
84 
85 STATISTIC(NumFlattened, "Number of loops flattened");
86 
87 static cl::opt<unsigned> RepeatedInstructionThreshold(
88     "loop-flatten-cost-threshold", cl::Hidden, cl::init(2),
89     cl::desc("Limit on the cost of instructions that can be repeated due to "
90              "loop flattening"));
91 
92 static cl::opt<bool>
93     AssumeNoOverflow("loop-flatten-assume-no-overflow", cl::Hidden,
94                      cl::init(false),
95                      cl::desc("Assume that the product of the two iteration "
96                               "trip counts will never overflow"));
97 
98 static cl::opt<bool>
99     WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true),
100             cl::desc("Widen the loop induction variables, if possible, so "
101                      "overflow checks won't reject flattening"));
102 
103 namespace {
104 // We require all uses of both induction variables to match this pattern:
105 //
106 //   (OuterPHI * InnerTripCount) + InnerPHI
107 //
108 // I.e., it needs to be a linear expression of the induction variables and the
109 // inner loop trip count. We keep track of all different expressions on which
110 // checks will be performed in this bookkeeping struct.
111 //
112 struct FlattenInfo {
113   Loop *OuterLoop = nullptr;  // The loop pair to be flattened.
114   Loop *InnerLoop = nullptr;
115 
116   PHINode *InnerInductionPHI = nullptr; // These PHINodes correspond to loop
117   PHINode *OuterInductionPHI = nullptr; // induction variables, which are
118                                         // expected to start at zero and
119                                         // increment by one on each loop.
120 
121   Value *InnerTripCount = nullptr; // The product of these two tripcounts
122   Value *OuterTripCount = nullptr; // will be the new flattened loop
123                                    // tripcount. Also used to recognise a
124                                    // linear expression that will be replaced.
125 
126   SmallPtrSet<Value *, 4> LinearIVUses;  // Contains the linear expressions
127                                          // of the form i*M+j that will be
128                                          // replaced.
129 
130   BinaryOperator *InnerIncrement = nullptr;  // Uses of induction variables in
131   BinaryOperator *OuterIncrement = nullptr;  // loop control statements that
132   BranchInst *InnerBranch = nullptr;         // are safe to ignore.
133 
134   BranchInst *OuterBranch = nullptr; // The instruction that needs to be
135                                      // updated with new tripcount.
136 
137   SmallPtrSet<PHINode *, 4> InnerPHIsToTransform;
138 
139   bool Widened = false; // Whether this holds the flatten info before or after
140                         // widening.
141 
142   PHINode *NarrowInnerInductionPHI = nullptr; // Holds the old/narrow induction
143   PHINode *NarrowOuterInductionPHI = nullptr; // phis, i.e. the Phis before IV
144                                               // has been applied. Used to skip
145                                               // checks on phi nodes.
146 
147   FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL){};
148 
149   bool isNarrowInductionPhi(PHINode *Phi) {
150     // This can't be the narrow phi if we haven't widened the IV first.
151     if (!Widened)
152       return false;
153     return NarrowInnerInductionPHI == Phi || NarrowOuterInductionPHI == Phi;
154   }
155   bool isInnerLoopIncrement(User *U) {
156     return InnerIncrement == U;
157   }
158   bool isOuterLoopIncrement(User *U) {
159     return OuterIncrement == U;
160   }
161   bool isInnerLoopTest(User *U) {
162     return InnerBranch->getCondition() == U;
163   }
164 
165   bool checkOuterInductionPhiUsers(SmallPtrSet<Value *, 4> &ValidOuterPHIUses) {
166     for (User *U : OuterInductionPHI->users()) {
167       if (isOuterLoopIncrement(U))
168         continue;
169 
170       auto IsValidOuterPHIUses = [&] (User *U) -> bool {
171         LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U->dump());
172         if (!ValidOuterPHIUses.count(U)) {
173           LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
174           return false;
175         }
176         LLVM_DEBUG(dbgs() << "Use is optimisable\n");
177         return true;
178       };
179 
180       if (auto *V = dyn_cast<TruncInst>(U)) {
181         for (auto *K : V->users()) {
182           if (!IsValidOuterPHIUses(K))
183             return false;
184         }
185         continue;
186       }
187 
188       if (!IsValidOuterPHIUses(U))
189         return false;
190     }
191     return true;
192   }
193 
194   bool matchLinearIVUser(User *U, Value *InnerTripCount,
195                          SmallPtrSet<Value *, 4> &ValidOuterPHIUses) {
196     LLVM_DEBUG(dbgs() << "Checking linear i*M+j expression for: "; U->dump());
197     Value *MatchedMul = nullptr;
198     Value *MatchedItCount = nullptr;
199 
200     bool IsAdd = match(U, m_c_Add(m_Specific(InnerInductionPHI),
201                                   m_Value(MatchedMul))) &&
202                  match(MatchedMul, m_c_Mul(m_Specific(OuterInductionPHI),
203                                            m_Value(MatchedItCount)));
204 
205     // Matches the same pattern as above, except it also looks for truncs
206     // on the phi, which can be the result of widening the induction variables.
207     bool IsAddTrunc =
208         match(U, m_c_Add(m_Trunc(m_Specific(InnerInductionPHI)),
209                          m_Value(MatchedMul))) &&
210         match(MatchedMul, m_c_Mul(m_Trunc(m_Specific(OuterInductionPHI)),
211                                   m_Value(MatchedItCount)));
212 
213     if (!MatchedItCount)
214       return false;
215 
216     LLVM_DEBUG(dbgs() << "Matched multiplication: "; MatchedMul->dump());
217     LLVM_DEBUG(dbgs() << "Matched iteration count: "; MatchedItCount->dump());
218 
219     // The mul should not have any other uses. Widening may leave trivially dead
220     // uses, which can be ignored.
221     if (count_if(MatchedMul->users(), [](User *U) {
222           return !isInstructionTriviallyDead(cast<Instruction>(U));
223         }) > 1) {
224       LLVM_DEBUG(dbgs() << "Multiply has more than one use\n");
225       return false;
226     }
227 
228     // Look through extends if the IV has been widened. Don't look through
229     // extends if we already looked through a trunc.
230     if (Widened && IsAdd &&
231         (isa<SExtInst>(MatchedItCount) || isa<ZExtInst>(MatchedItCount))) {
232       assert(MatchedItCount->getType() == InnerInductionPHI->getType() &&
233              "Unexpected type mismatch in types after widening");
234       MatchedItCount = isa<SExtInst>(MatchedItCount)
235                            ? dyn_cast<SExtInst>(MatchedItCount)->getOperand(0)
236                            : dyn_cast<ZExtInst>(MatchedItCount)->getOperand(0);
237     }
238 
239     LLVM_DEBUG(dbgs() << "Looking for inner trip count: ";
240                InnerTripCount->dump());
241 
242     if ((IsAdd || IsAddTrunc) && MatchedItCount == InnerTripCount) {
243       LLVM_DEBUG(dbgs() << "Found. This sse is optimisable\n");
244       ValidOuterPHIUses.insert(MatchedMul);
245       LinearIVUses.insert(U);
246       return true;
247     }
248 
249     LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n");
250     return false;
251   }
252 
253   bool checkInnerInductionPhiUsers(SmallPtrSet<Value *, 4> &ValidOuterPHIUses) {
254     Value *SExtInnerTripCount = InnerTripCount;
255     if (Widened &&
256         (isa<SExtInst>(InnerTripCount) || isa<ZExtInst>(InnerTripCount)))
257       SExtInnerTripCount = cast<Instruction>(InnerTripCount)->getOperand(0);
258 
259     for (User *U : InnerInductionPHI->users()) {
260       LLVM_DEBUG(dbgs() << "Checking User: "; U->dump());
261       if (isInnerLoopIncrement(U)) {
262         LLVM_DEBUG(dbgs() << "Use is inner loop increment, continuing\n");
263         continue;
264       }
265 
266       // After widening the IVs, a trunc instruction might have been introduced,
267       // so look through truncs.
268       if (isa<TruncInst>(U)) {
269         if (!U->hasOneUse())
270           return false;
271         U = *U->user_begin();
272       }
273 
274       // If the use is in the compare (which is also the condition of the inner
275       // branch) then the compare has been altered by another transformation e.g
276       // icmp ult %inc, tripcount -> icmp ult %j, tripcount-1, where tripcount is
277       // a constant. Ignore this use as the compare gets removed later anyway.
278       if (isInnerLoopTest(U)) {
279         LLVM_DEBUG(dbgs() << "Use is the inner loop test, continuing\n");
280         continue;
281       }
282 
283       if (!matchLinearIVUser(U, SExtInnerTripCount, ValidOuterPHIUses)) {
284         LLVM_DEBUG(dbgs() << "Not a linear IV user\n");
285         return false;
286       }
287       LLVM_DEBUG(dbgs() << "Linear IV users found!\n");
288     }
289     return true;
290   }
291 };
292 } // namespace
293 
294 static bool
295 setLoopComponents(Value *&TC, Value *&TripCount, BinaryOperator *&Increment,
296                   SmallPtrSetImpl<Instruction *> &IterationInstructions) {
297   TripCount = TC;
298   IterationInstructions.insert(Increment);
299   LLVM_DEBUG(dbgs() << "Found Increment: "; Increment->dump());
300   LLVM_DEBUG(dbgs() << "Found trip count: "; TripCount->dump());
301   LLVM_DEBUG(dbgs() << "Successfully found all loop components\n");
302   return true;
303 }
304 
305 // Given the RHS of the loop latch compare instruction, verify with SCEV
306 // that this is indeed the loop tripcount.
307 // TODO: This used to be a straightforward check but has grown to be quite
308 // complicated now. It is therefore worth revisiting what the additional
309 // benefits are of this (compared to relying on canonical loops and pattern
310 // matching).
311 static bool verifyTripCount(Value *RHS, Loop *L,
312      SmallPtrSetImpl<Instruction *> &IterationInstructions,
313     PHINode *&InductionPHI, Value *&TripCount, BinaryOperator *&Increment,
314     BranchInst *&BackBranch, ScalarEvolution *SE, bool IsWidened) {
315   const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
316   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
317     LLVM_DEBUG(dbgs() << "Backedge-taken count is not predictable\n");
318     return false;
319   }
320 
321   // The Extend=false flag is used for getTripCountFromExitCount as we want
322   // to verify and match it with the pattern matched tripcount. Please note
323   // that overflow checks are performed in checkOverflow, but are first tried
324   // to avoid by widening the IV.
325   const SCEV *SCEVTripCount =
326       SE->getTripCountFromExitCount(BackedgeTakenCount, /*Extend=*/false);
327 
328   const SCEV *SCEVRHS = SE->getSCEV(RHS);
329   if (SCEVRHS == SCEVTripCount)
330     return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
331   ConstantInt *ConstantRHS = dyn_cast<ConstantInt>(RHS);
332   if (ConstantRHS) {
333     const SCEV *BackedgeTCExt = nullptr;
334     if (IsWidened) {
335       const SCEV *SCEVTripCountExt;
336       // Find the extended backedge taken count and extended trip count using
337       // SCEV. One of these should now match the RHS of the compare.
338       BackedgeTCExt = SE->getZeroExtendExpr(BackedgeTakenCount, RHS->getType());
339       SCEVTripCountExt = SE->getTripCountFromExitCount(BackedgeTCExt, false);
340       if (SCEVRHS != BackedgeTCExt && SCEVRHS != SCEVTripCountExt) {
341         LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
342         return false;
343       }
344     }
345     // If the RHS of the compare is equal to the backedge taken count we need
346     // to add one to get the trip count.
347     if (SCEVRHS == BackedgeTCExt || SCEVRHS == BackedgeTakenCount) {
348       ConstantInt *One = ConstantInt::get(ConstantRHS->getType(), 1);
349       Value *NewRHS = ConstantInt::get(
350           ConstantRHS->getContext(), ConstantRHS->getValue() + One->getValue());
351       return setLoopComponents(NewRHS, TripCount, Increment,
352                                IterationInstructions);
353     }
354     return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
355   }
356   // If the RHS isn't a constant then check that the reason it doesn't match
357   // the SCEV trip count is because the RHS is a ZExt or SExt instruction
358   // (and take the trip count to be the RHS).
359   if (!IsWidened) {
360     LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
361     return false;
362   }
363   auto *TripCountInst = dyn_cast<Instruction>(RHS);
364   if (!TripCountInst) {
365     LLVM_DEBUG(dbgs() << "Could not find valid trip count\n");
366     return false;
367   }
368   if ((!isa<ZExtInst>(TripCountInst) && !isa<SExtInst>(TripCountInst)) ||
369       SE->getSCEV(TripCountInst->getOperand(0)) != SCEVTripCount) {
370     LLVM_DEBUG(dbgs() << "Could not find valid extended trip count\n");
371     return false;
372   }
373   return setLoopComponents(RHS, TripCount, Increment, IterationInstructions);
374 }
375 
376 // Finds the induction variable, increment and trip count for a simple loop that
377 // we can flatten.
378 static bool findLoopComponents(
379     Loop *L, SmallPtrSetImpl<Instruction *> &IterationInstructions,
380     PHINode *&InductionPHI, Value *&TripCount, BinaryOperator *&Increment,
381     BranchInst *&BackBranch, ScalarEvolution *SE, bool IsWidened) {
382   LLVM_DEBUG(dbgs() << "Finding components of loop: " << L->getName() << "\n");
383 
384   if (!L->isLoopSimplifyForm()) {
385     LLVM_DEBUG(dbgs() << "Loop is not in normal form\n");
386     return false;
387   }
388 
389   // Currently, to simplify the implementation, the Loop induction variable must
390   // start at zero and increment with a step size of one.
391   if (!L->isCanonical(*SE)) {
392     LLVM_DEBUG(dbgs() << "Loop is not canonical\n");
393     return false;
394   }
395 
396   // There must be exactly one exiting block, and it must be the same at the
397   // latch.
398   BasicBlock *Latch = L->getLoopLatch();
399   if (L->getExitingBlock() != Latch) {
400     LLVM_DEBUG(dbgs() << "Exiting and latch block are different\n");
401     return false;
402   }
403 
404   // Find the induction PHI. If there is no induction PHI, we can't do the
405   // transformation. TODO: could other variables trigger this? Do we have to
406   // search for the best one?
407   InductionPHI = L->getInductionVariable(*SE);
408   if (!InductionPHI) {
409     LLVM_DEBUG(dbgs() << "Could not find induction PHI\n");
410     return false;
411   }
412   LLVM_DEBUG(dbgs() << "Found induction PHI: "; InductionPHI->dump());
413 
414   bool ContinueOnTrue = L->contains(Latch->getTerminator()->getSuccessor(0));
415   auto IsValidPredicate = [&](ICmpInst::Predicate Pred) {
416     if (ContinueOnTrue)
417       return Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT;
418     else
419       return Pred == CmpInst::ICMP_EQ;
420   };
421 
422   // Find Compare and make sure it is valid. getLatchCmpInst checks that the
423   // back branch of the latch is conditional.
424   ICmpInst *Compare = L->getLatchCmpInst();
425   if (!Compare || !IsValidPredicate(Compare->getUnsignedPredicate()) ||
426       Compare->hasNUsesOrMore(2)) {
427     LLVM_DEBUG(dbgs() << "Could not find valid comparison\n");
428     return false;
429   }
430   BackBranch = cast<BranchInst>(Latch->getTerminator());
431   IterationInstructions.insert(BackBranch);
432   LLVM_DEBUG(dbgs() << "Found back branch: "; BackBranch->dump());
433   IterationInstructions.insert(Compare);
434   LLVM_DEBUG(dbgs() << "Found comparison: "; Compare->dump());
435 
436   // Find increment and trip count.
437   // There are exactly 2 incoming values to the induction phi; one from the
438   // pre-header and one from the latch. The incoming latch value is the
439   // increment variable.
440   Increment =
441       cast<BinaryOperator>(InductionPHI->getIncomingValueForBlock(Latch));
442   if ((Compare->getOperand(0) != Increment || !Increment->hasNUses(2)) &&
443       !Increment->hasNUses(1)) {
444     LLVM_DEBUG(dbgs() << "Could not find valid increment\n");
445     return false;
446   }
447   // The trip count is the RHS of the compare. If this doesn't match the trip
448   // count computed by SCEV then this is because the trip count variable
449   // has been widened so the types don't match, or because it is a constant and
450   // another transformation has changed the compare (e.g. icmp ult %inc,
451   // tripcount -> icmp ult %j, tripcount-1), or both.
452   Value *RHS = Compare->getOperand(1);
453 
454   return verifyTripCount(RHS, L, IterationInstructions, InductionPHI, TripCount,
455                          Increment, BackBranch, SE, IsWidened);
456 }
457 
458 static bool checkPHIs(FlattenInfo &FI, const TargetTransformInfo *TTI) {
459   // All PHIs in the inner and outer headers must either be:
460   // - The induction PHI, which we are going to rewrite as one induction in
461   //   the new loop. This is already checked by findLoopComponents.
462   // - An outer header PHI with all incoming values from outside the loop.
463   //   LoopSimplify guarantees we have a pre-header, so we don't need to
464   //   worry about that here.
465   // - Pairs of PHIs in the inner and outer headers, which implement a
466   //   loop-carried dependency that will still be valid in the new loop. To
467   //   be valid, this variable must be modified only in the inner loop.
468 
469   // The set of PHI nodes in the outer loop header that we know will still be
470   // valid after the transformation. These will not need to be modified (with
471   // the exception of the induction variable), but we do need to check that
472   // there are no unsafe PHI nodes.
473   SmallPtrSet<PHINode *, 4> SafeOuterPHIs;
474   SafeOuterPHIs.insert(FI.OuterInductionPHI);
475 
476   // Check that all PHI nodes in the inner loop header match one of the valid
477   // patterns.
478   for (PHINode &InnerPHI : FI.InnerLoop->getHeader()->phis()) {
479     // The induction PHIs break these rules, and that's OK because we treat
480     // them specially when doing the transformation.
481     if (&InnerPHI == FI.InnerInductionPHI)
482       continue;
483     if (FI.isNarrowInductionPhi(&InnerPHI))
484       continue;
485 
486     // Each inner loop PHI node must have two incoming values/blocks - one
487     // from the pre-header, and one from the latch.
488     assert(InnerPHI.getNumIncomingValues() == 2);
489     Value *PreHeaderValue =
490         InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopPreheader());
491     Value *LatchValue =
492         InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopLatch());
493 
494     // The incoming value from the outer loop must be the PHI node in the
495     // outer loop header, with no modifications made in the top of the outer
496     // loop.
497     PHINode *OuterPHI = dyn_cast<PHINode>(PreHeaderValue);
498     if (!OuterPHI || OuterPHI->getParent() != FI.OuterLoop->getHeader()) {
499       LLVM_DEBUG(dbgs() << "value modified in top of outer loop\n");
500       return false;
501     }
502 
503     // The other incoming value must come from the inner loop, without any
504     // modifications in the tail end of the outer loop. We are in LCSSA form,
505     // so this will actually be a PHI in the inner loop's exit block, which
506     // only uses values from inside the inner loop.
507     PHINode *LCSSAPHI = dyn_cast<PHINode>(
508         OuterPHI->getIncomingValueForBlock(FI.OuterLoop->getLoopLatch()));
509     if (!LCSSAPHI) {
510       LLVM_DEBUG(dbgs() << "could not find LCSSA PHI\n");
511       return false;
512     }
513 
514     // The value used by the LCSSA PHI must be the same one that the inner
515     // loop's PHI uses.
516     if (LCSSAPHI->hasConstantValue() != LatchValue) {
517       LLVM_DEBUG(
518           dbgs() << "LCSSA PHI incoming value does not match latch value\n");
519       return false;
520     }
521 
522     LLVM_DEBUG(dbgs() << "PHI pair is safe:\n");
523     LLVM_DEBUG(dbgs() << "  Inner: "; InnerPHI.dump());
524     LLVM_DEBUG(dbgs() << "  Outer: "; OuterPHI->dump());
525     SafeOuterPHIs.insert(OuterPHI);
526     FI.InnerPHIsToTransform.insert(&InnerPHI);
527   }
528 
529   for (PHINode &OuterPHI : FI.OuterLoop->getHeader()->phis()) {
530     if (FI.isNarrowInductionPhi(&OuterPHI))
531       continue;
532     if (!SafeOuterPHIs.count(&OuterPHI)) {
533       LLVM_DEBUG(dbgs() << "found unsafe PHI in outer loop: "; OuterPHI.dump());
534       return false;
535     }
536   }
537 
538   LLVM_DEBUG(dbgs() << "checkPHIs: OK\n");
539   return true;
540 }
541 
542 static bool
543 checkOuterLoopInsts(FlattenInfo &FI,
544                     SmallPtrSetImpl<Instruction *> &IterationInstructions,
545                     const TargetTransformInfo *TTI) {
546   // Check for instructions in the outer but not inner loop. If any of these
547   // have side-effects then this transformation is not legal, and if there is
548   // a significant amount of code here which can't be optimised out that it's
549   // not profitable (as these instructions would get executed for each
550   // iteration of the inner loop).
551   InstructionCost RepeatedInstrCost = 0;
552   for (auto *B : FI.OuterLoop->getBlocks()) {
553     if (FI.InnerLoop->contains(B))
554       continue;
555 
556     for (auto &I : *B) {
557       if (!isa<PHINode>(&I) && !I.isTerminator() &&
558           !isSafeToSpeculativelyExecute(&I)) {
559         LLVM_DEBUG(dbgs() << "Cannot flatten because instruction may have "
560                              "side effects: ";
561                    I.dump());
562         return false;
563       }
564       // The execution count of the outer loop's iteration instructions
565       // (increment, compare and branch) will be increased, but the
566       // equivalent instructions will be removed from the inner loop, so
567       // they make a net difference of zero.
568       if (IterationInstructions.count(&I))
569         continue;
570       // The unconditional branch to the inner loop's header will turn into
571       // a fall-through, so adds no cost.
572       BranchInst *Br = dyn_cast<BranchInst>(&I);
573       if (Br && Br->isUnconditional() &&
574           Br->getSuccessor(0) == FI.InnerLoop->getHeader())
575         continue;
576       // Multiplies of the outer iteration variable and inner iteration
577       // count will be optimised out.
578       if (match(&I, m_c_Mul(m_Specific(FI.OuterInductionPHI),
579                             m_Specific(FI.InnerTripCount))))
580         continue;
581       InstructionCost Cost =
582           TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
583       LLVM_DEBUG(dbgs() << "Cost " << Cost << ": "; I.dump());
584       RepeatedInstrCost += Cost;
585     }
586   }
587 
588   LLVM_DEBUG(dbgs() << "Cost of instructions that will be repeated: "
589                     << RepeatedInstrCost << "\n");
590   // Bail out if flattening the loops would cause instructions in the outer
591   // loop but not in the inner loop to be executed extra times.
592   if (RepeatedInstrCost > RepeatedInstructionThreshold) {
593     LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: not profitable, bailing.\n");
594     return false;
595   }
596 
597   LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: OK\n");
598   return true;
599 }
600 
601 
602 
603 // We require all uses of both induction variables to match this pattern:
604 //
605 //   (OuterPHI * InnerTripCount) + InnerPHI
606 //
607 // Any uses of the induction variables not matching that pattern would
608 // require a div/mod to reconstruct in the flattened loop, so the
609 // transformation wouldn't be profitable.
610 static bool checkIVUsers(FlattenInfo &FI) {
611   // Check that all uses of the inner loop's induction variable match the
612   // expected pattern, recording the uses of the outer IV.
613   SmallPtrSet<Value *, 4> ValidOuterPHIUses;
614   if (!FI.checkInnerInductionPhiUsers(ValidOuterPHIUses))
615     return false;
616 
617   // Check that there are no uses of the outer IV other than the ones found
618   // as part of the pattern above.
619   if (!FI.checkOuterInductionPhiUsers(ValidOuterPHIUses))
620     return false;
621 
622   LLVM_DEBUG(dbgs() << "checkIVUsers: OK\n";
623              dbgs() << "Found " << FI.LinearIVUses.size()
624                     << " value(s) that can be replaced:\n";
625              for (Value *V : FI.LinearIVUses) {
626                dbgs() << "  ";
627                V->dump();
628              });
629   return true;
630 }
631 
632 // Return an OverflowResult dependant on if overflow of the multiplication of
633 // InnerTripCount and OuterTripCount can be assumed not to happen.
634 static OverflowResult checkOverflow(FlattenInfo &FI, DominatorTree *DT,
635                                     AssumptionCache *AC) {
636   Function *F = FI.OuterLoop->getHeader()->getParent();
637   const DataLayout &DL = F->getParent()->getDataLayout();
638 
639   // For debugging/testing.
640   if (AssumeNoOverflow)
641     return OverflowResult::NeverOverflows;
642 
643   // Check if the multiply could not overflow due to known ranges of the
644   // input values.
645   OverflowResult OR = computeOverflowForUnsignedMul(
646       FI.InnerTripCount, FI.OuterTripCount, DL, AC,
647       FI.OuterLoop->getLoopPreheader()->getTerminator(), DT);
648   if (OR != OverflowResult::MayOverflow)
649     return OR;
650 
651   for (Value *V : FI.LinearIVUses) {
652     for (Value *U : V->users()) {
653       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
654         for (Value *GEPUser : U->users()) {
655           auto *GEPUserInst = cast<Instruction>(GEPUser);
656           if (!isa<LoadInst>(GEPUserInst) &&
657               !(isa<StoreInst>(GEPUserInst) &&
658                 GEP == GEPUserInst->getOperand(1)))
659             continue;
660           if (!isGuaranteedToExecuteForEveryIteration(GEPUserInst,
661                                                       FI.InnerLoop))
662             continue;
663           // The IV is used as the operand of a GEP which dominates the loop
664           // latch, and the IV is at least as wide as the address space of the
665           // GEP. In this case, the GEP would wrap around the address space
666           // before the IV increment wraps, which would be UB.
667           if (GEP->isInBounds() &&
668               V->getType()->getIntegerBitWidth() >=
669                   DL.getPointerTypeSizeInBits(GEP->getType())) {
670             LLVM_DEBUG(
671                 dbgs() << "use of linear IV would be UB if overflow occurred: ";
672                 GEP->dump());
673             return OverflowResult::NeverOverflows;
674           }
675         }
676       }
677     }
678   }
679 
680   return OverflowResult::MayOverflow;
681 }
682 
683 static bool CanFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
684                                ScalarEvolution *SE, AssumptionCache *AC,
685                                const TargetTransformInfo *TTI) {
686   SmallPtrSet<Instruction *, 8> IterationInstructions;
687   if (!findLoopComponents(FI.InnerLoop, IterationInstructions,
688                           FI.InnerInductionPHI, FI.InnerTripCount,
689                           FI.InnerIncrement, FI.InnerBranch, SE, FI.Widened))
690     return false;
691   if (!findLoopComponents(FI.OuterLoop, IterationInstructions,
692                           FI.OuterInductionPHI, FI.OuterTripCount,
693                           FI.OuterIncrement, FI.OuterBranch, SE, FI.Widened))
694     return false;
695 
696   // Both of the loop trip count values must be invariant in the outer loop
697   // (non-instructions are all inherently invariant).
698   if (!FI.OuterLoop->isLoopInvariant(FI.InnerTripCount)) {
699     LLVM_DEBUG(dbgs() << "inner loop trip count not invariant\n");
700     return false;
701   }
702   if (!FI.OuterLoop->isLoopInvariant(FI.OuterTripCount)) {
703     LLVM_DEBUG(dbgs() << "outer loop trip count not invariant\n");
704     return false;
705   }
706 
707   if (!checkPHIs(FI, TTI))
708     return false;
709 
710   // FIXME: it should be possible to handle different types correctly.
711   if (FI.InnerInductionPHI->getType() != FI.OuterInductionPHI->getType())
712     return false;
713 
714   if (!checkOuterLoopInsts(FI, IterationInstructions, TTI))
715     return false;
716 
717   // Find the values in the loop that can be replaced with the linearized
718   // induction variable, and check that there are no other uses of the inner
719   // or outer induction variable. If there were, we could still do this
720   // transformation, but we'd have to insert a div/mod to calculate the
721   // original IVs, so it wouldn't be profitable.
722   if (!checkIVUsers(FI))
723     return false;
724 
725   LLVM_DEBUG(dbgs() << "CanFlattenLoopPair: OK\n");
726   return true;
727 }
728 
729 static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
730                               ScalarEvolution *SE, AssumptionCache *AC,
731                               const TargetTransformInfo *TTI, LPMUpdater *U,
732                               MemorySSAUpdater *MSSAU) {
733   Function *F = FI.OuterLoop->getHeader()->getParent();
734   LLVM_DEBUG(dbgs() << "Checks all passed, doing the transformation\n");
735   {
736     using namespace ore;
737     OptimizationRemark Remark(DEBUG_TYPE, "Flattened", FI.InnerLoop->getStartLoc(),
738                               FI.InnerLoop->getHeader());
739     OptimizationRemarkEmitter ORE(F);
740     Remark << "Flattened into outer loop";
741     ORE.emit(Remark);
742   }
743 
744   Value *NewTripCount = BinaryOperator::CreateMul(
745       FI.InnerTripCount, FI.OuterTripCount, "flatten.tripcount",
746       FI.OuterLoop->getLoopPreheader()->getTerminator());
747   LLVM_DEBUG(dbgs() << "Created new trip count in preheader: ";
748              NewTripCount->dump());
749 
750   // Fix up PHI nodes that take values from the inner loop back-edge, which
751   // we are about to remove.
752   FI.InnerInductionPHI->removeIncomingValue(FI.InnerLoop->getLoopLatch());
753 
754   // The old Phi will be optimised away later, but for now we can't leave
755   // leave it in an invalid state, so are updating them too.
756   for (PHINode *PHI : FI.InnerPHIsToTransform)
757     PHI->removeIncomingValue(FI.InnerLoop->getLoopLatch());
758 
759   // Modify the trip count of the outer loop to be the product of the two
760   // trip counts.
761   cast<User>(FI.OuterBranch->getCondition())->setOperand(1, NewTripCount);
762 
763   // Replace the inner loop backedge with an unconditional branch to the exit.
764   BasicBlock *InnerExitBlock = FI.InnerLoop->getExitBlock();
765   BasicBlock *InnerExitingBlock = FI.InnerLoop->getExitingBlock();
766   InnerExitingBlock->getTerminator()->eraseFromParent();
767   BranchInst::Create(InnerExitBlock, InnerExitingBlock);
768 
769   // Update the DomTree and MemorySSA.
770   DT->deleteEdge(InnerExitingBlock, FI.InnerLoop->getHeader());
771   if (MSSAU)
772     MSSAU->removeEdge(InnerExitingBlock, FI.InnerLoop->getHeader());
773 
774   // Replace all uses of the polynomial calculated from the two induction
775   // variables with the one new one.
776   IRBuilder<> Builder(FI.OuterInductionPHI->getParent()->getTerminator());
777   for (Value *V : FI.LinearIVUses) {
778     Value *OuterValue = FI.OuterInductionPHI;
779     if (FI.Widened)
780       OuterValue = Builder.CreateTrunc(FI.OuterInductionPHI, V->getType(),
781                                        "flatten.trunciv");
782 
783     LLVM_DEBUG(dbgs() << "Replacing: "; V->dump(); dbgs() << "with:      ";
784                OuterValue->dump());
785     V->replaceAllUsesWith(OuterValue);
786   }
787 
788   // Tell LoopInfo, SCEV and the pass manager that the inner loop has been
789   // deleted, and invalidate any outer loop information.
790   SE->forgetLoop(FI.OuterLoop);
791   SE->forgetBlockAndLoopDispositions();
792   if (U)
793     U->markLoopAsDeleted(*FI.InnerLoop, FI.InnerLoop->getName());
794   LI->erase(FI.InnerLoop);
795 
796   // Increment statistic value.
797   NumFlattened++;
798 
799   return true;
800 }
801 
802 static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
803                        ScalarEvolution *SE, AssumptionCache *AC,
804                        const TargetTransformInfo *TTI) {
805   if (!WidenIV) {
806     LLVM_DEBUG(dbgs() << "Widening the IVs is disabled\n");
807     return false;
808   }
809 
810   LLVM_DEBUG(dbgs() << "Try widening the IVs\n");
811   Module *M = FI.InnerLoop->getHeader()->getParent()->getParent();
812   auto &DL = M->getDataLayout();
813   auto *InnerType = FI.InnerInductionPHI->getType();
814   auto *OuterType = FI.OuterInductionPHI->getType();
815   unsigned MaxLegalSize = DL.getLargestLegalIntTypeSizeInBits();
816   auto *MaxLegalType = DL.getLargestLegalIntType(M->getContext());
817 
818   // If both induction types are less than the maximum legal integer width,
819   // promote both to the widest type available so we know calculating
820   // (OuterTripCount * InnerTripCount) as the new trip count is safe.
821   if (InnerType != OuterType ||
822       InnerType->getScalarSizeInBits() >= MaxLegalSize ||
823       MaxLegalType->getScalarSizeInBits() <
824           InnerType->getScalarSizeInBits() * 2) {
825     LLVM_DEBUG(dbgs() << "Can't widen the IV\n");
826     return false;
827   }
828 
829   SCEVExpander Rewriter(*SE, DL, "loopflatten");
830   SmallVector<WeakTrackingVH, 4> DeadInsts;
831   unsigned ElimExt = 0;
832   unsigned Widened = 0;
833 
834   auto CreateWideIV = [&](WideIVInfo WideIV, bool &Deleted) -> bool {
835     PHINode *WidePhi =
836         createWideIV(WideIV, LI, SE, Rewriter, DT, DeadInsts, ElimExt, Widened,
837                      true /* HasGuards */, true /* UsePostIncrementRanges */);
838     if (!WidePhi)
839       return false;
840     LLVM_DEBUG(dbgs() << "Created wide phi: "; WidePhi->dump());
841     LLVM_DEBUG(dbgs() << "Deleting old phi: "; WideIV.NarrowIV->dump());
842     Deleted = RecursivelyDeleteDeadPHINode(WideIV.NarrowIV);
843     return true;
844   };
845 
846   bool Deleted;
847   if (!CreateWideIV({FI.InnerInductionPHI, MaxLegalType, false}, Deleted))
848     return false;
849   // Add the narrow phi to list, so that it will be adjusted later when the
850   // the transformation is performed.
851   if (!Deleted)
852     FI.InnerPHIsToTransform.insert(FI.InnerInductionPHI);
853 
854   if (!CreateWideIV({FI.OuterInductionPHI, MaxLegalType, false}, Deleted))
855     return false;
856 
857   assert(Widened && "Widened IV expected");
858   FI.Widened = true;
859 
860   // Save the old/narrow induction phis, which we need to ignore in CheckPHIs.
861   FI.NarrowInnerInductionPHI = FI.InnerInductionPHI;
862   FI.NarrowOuterInductionPHI = FI.OuterInductionPHI;
863 
864   // After widening, rediscover all the loop components.
865   return CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI);
866 }
867 
868 static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
869                             ScalarEvolution *SE, AssumptionCache *AC,
870                             const TargetTransformInfo *TTI, LPMUpdater *U,
871                             MemorySSAUpdater *MSSAU) {
872   LLVM_DEBUG(
873       dbgs() << "Loop flattening running on outer loop "
874              << FI.OuterLoop->getHeader()->getName() << " and inner loop "
875              << FI.InnerLoop->getHeader()->getName() << " in "
876              << FI.OuterLoop->getHeader()->getParent()->getName() << "\n");
877 
878   if (!CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI))
879     return false;
880 
881   // Check if we can widen the induction variables to avoid overflow checks.
882   bool CanFlatten = CanWidenIV(FI, DT, LI, SE, AC, TTI);
883 
884   // It can happen that after widening of the IV, flattening may not be
885   // possible/happening, e.g. when it is deemed unprofitable. So bail here if
886   // that is the case.
887   // TODO: IV widening without performing the actual flattening transformation
888   // is not ideal. While this codegen change should not matter much, it is an
889   // unnecessary change which is better to avoid. It's unlikely this happens
890   // often, because if it's unprofitibale after widening, it should be
891   // unprofitabe before widening as checked in the first round of checks. But
892   // 'RepeatedInstructionThreshold' is set to only 2, which can probably be
893   // relaxed. Because this is making a code change (the IV widening, but not
894   // the flattening), we return true here.
895   if (FI.Widened && !CanFlatten)
896     return true;
897 
898   // If we have widened and can perform the transformation, do that here.
899   if (CanFlatten)
900     return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU);
901 
902   // Otherwise, if we haven't widened the IV, check if the new iteration
903   // variable might overflow. In this case, we need to version the loop, and
904   // select the original version at runtime if the iteration space is too
905   // large.
906   // TODO: We currently don't version the loop.
907   OverflowResult OR = checkOverflow(FI, DT, AC);
908   if (OR == OverflowResult::AlwaysOverflowsHigh ||
909       OR == OverflowResult::AlwaysOverflowsLow) {
910     LLVM_DEBUG(dbgs() << "Multiply would always overflow, so not profitable\n");
911     return false;
912   } else if (OR == OverflowResult::MayOverflow) {
913     LLVM_DEBUG(dbgs() << "Multiply might overflow, not flattening\n");
914     return false;
915   }
916 
917   LLVM_DEBUG(dbgs() << "Multiply cannot overflow, modifying loop in-place\n");
918   return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU);
919 }
920 
921 bool Flatten(LoopNest &LN, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
922              AssumptionCache *AC, TargetTransformInfo *TTI, LPMUpdater *U,
923              MemorySSAUpdater *MSSAU) {
924   bool Changed = false;
925   for (Loop *InnerLoop : LN.getLoops()) {
926     auto *OuterLoop = InnerLoop->getParentLoop();
927     if (!OuterLoop)
928       continue;
929     FlattenInfo FI(OuterLoop, InnerLoop);
930     Changed |= FlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU);
931   }
932   return Changed;
933 }
934 
935 PreservedAnalyses LoopFlattenPass::run(LoopNest &LN, LoopAnalysisManager &LAM,
936                                        LoopStandardAnalysisResults &AR,
937                                        LPMUpdater &U) {
938 
939   bool Changed = false;
940 
941   std::optional<MemorySSAUpdater> MSSAU;
942   if (AR.MSSA) {
943     MSSAU = MemorySSAUpdater(AR.MSSA);
944     if (VerifyMemorySSA)
945       AR.MSSA->verifyMemorySSA();
946   }
947 
948   // The loop flattening pass requires loops to be
949   // in simplified form, and also needs LCSSA. Running
950   // this pass will simplify all loops that contain inner loops,
951   // regardless of whether anything ends up being flattened.
952   Changed |= Flatten(LN, &AR.DT, &AR.LI, &AR.SE, &AR.AC, &AR.TTI, &U,
953                      MSSAU ? &*MSSAU : nullptr);
954 
955   if (!Changed)
956     return PreservedAnalyses::all();
957 
958   if (AR.MSSA && VerifyMemorySSA)
959     AR.MSSA->verifyMemorySSA();
960 
961   auto PA = getLoopPassPreservedAnalyses();
962   if (AR.MSSA)
963     PA.preserve<MemorySSAAnalysis>();
964   return PA;
965 }
966 
967 namespace {
968 class LoopFlattenLegacyPass : public FunctionPass {
969 public:
970   static char ID; // Pass ID, replacement for typeid
971   LoopFlattenLegacyPass() : FunctionPass(ID) {
972     initializeLoopFlattenLegacyPassPass(*PassRegistry::getPassRegistry());
973   }
974 
975   // Possibly flatten loop L into its child.
976   bool runOnFunction(Function &F) override;
977 
978   void getAnalysisUsage(AnalysisUsage &AU) const override {
979     getLoopAnalysisUsage(AU);
980     AU.addRequired<TargetTransformInfoWrapperPass>();
981     AU.addPreserved<TargetTransformInfoWrapperPass>();
982     AU.addRequired<AssumptionCacheTracker>();
983     AU.addPreserved<AssumptionCacheTracker>();
984     AU.addPreserved<MemorySSAWrapperPass>();
985   }
986 };
987 } // namespace
988 
989 char LoopFlattenLegacyPass::ID = 0;
990 INITIALIZE_PASS_BEGIN(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops",
991                       false, false)
992 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
993 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
994 INITIALIZE_PASS_END(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops",
995                     false, false)
996 
997 FunctionPass *llvm::createLoopFlattenPass() {
998   return new LoopFlattenLegacyPass();
999 }
1000 
1001 bool LoopFlattenLegacyPass::runOnFunction(Function &F) {
1002   ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1003   LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1004   auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1005   DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1006   auto &TTIP = getAnalysis<TargetTransformInfoWrapperPass>();
1007   auto *TTI = &TTIP.getTTI(F);
1008   auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1009   auto *MSSA = getAnalysisIfAvailable<MemorySSAWrapperPass>();
1010 
1011   std::optional<MemorySSAUpdater> MSSAU;
1012   if (MSSA)
1013     MSSAU = MemorySSAUpdater(&MSSA->getMSSA());
1014 
1015   bool Changed = false;
1016   for (Loop *L : *LI) {
1017     auto LN = LoopNest::getLoopNest(*L, *SE);
1018     Changed |=
1019         Flatten(*LN, DT, LI, SE, AC, TTI, nullptr, MSSAU ? &*MSSAU : nullptr);
1020   }
1021   return Changed;
1022 }
1023