1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Transforms/Utils/LoopUtils.h"
31 
32 using namespace llvm;
33 
34 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
35     "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
36     cl::desc("When performing SCEV expansion only if it is cheap to do, this "
37              "controls the budget that is considered cheap (default = 4)"));
38 
39 using namespace PatternMatch;
40 
41 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
42 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
43 /// creating a new one.
ReuseOrCreateCast(Value * V,Type * Ty,Instruction::CastOps Op,BasicBlock::iterator IP)44 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
45                                        Instruction::CastOps Op,
46                                        BasicBlock::iterator IP) {
47   // This function must be called with the builder having a valid insertion
48   // point. It doesn't need to be the actual IP where the uses of the returned
49   // cast will be added, but it must dominate such IP.
50   // We use this precondition to produce a cast that will dominate all its
51   // uses. In particular, this is crucial for the case where the builder's
52   // insertion point *is* the point where we were asked to put the cast.
53   // Since we don't know the builder's insertion point is actually
54   // where the uses will be added (only that it dominates it), we are
55   // not allowed to move it.
56   BasicBlock::iterator BIP = Builder.GetInsertPoint();
57 
58   Value *Ret = nullptr;
59 
60   // Check to see if there is already a cast!
61   for (User *U : V->users()) {
62     if (U->getType() != Ty)
63       continue;
64     CastInst *CI = dyn_cast<CastInst>(U);
65     if (!CI || CI->getOpcode() != Op)
66       continue;
67 
68     // Found a suitable cast that is at IP or comes before IP. Use it. Note that
69     // the cast must also properly dominate the Builder's insertion point.
70     if (IP->getParent() == CI->getParent() && &*BIP != CI &&
71         (&*IP == CI || CI->comesBefore(&*IP))) {
72       Ret = CI;
73       break;
74     }
75   }
76 
77   // Create a new cast.
78   if (!Ret) {
79     SCEVInsertPointGuard Guard(Builder, this);
80     Builder.SetInsertPoint(&*IP);
81     Ret = Builder.CreateCast(Op, V, Ty, V->getName());
82   }
83 
84   // We assert at the end of the function since IP might point to an
85   // instruction with different dominance properties than a cast
86   // (an invoke for example) and not dominate BIP (but the cast does).
87   assert(!isa<Instruction>(Ret) ||
88          SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
89 
90   return Ret;
91 }
92 
93 BasicBlock::iterator
findInsertPointAfter(Instruction * I,Instruction * MustDominate) const94 SCEVExpander::findInsertPointAfter(Instruction *I,
95                                    Instruction *MustDominate) const {
96   BasicBlock::iterator IP = ++I->getIterator();
97   if (auto *II = dyn_cast<InvokeInst>(I))
98     IP = II->getNormalDest()->begin();
99 
100   while (isa<PHINode>(IP))
101     ++IP;
102 
103   if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
104     ++IP;
105   } else if (isa<CatchSwitchInst>(IP)) {
106     IP = MustDominate->getParent()->getFirstInsertionPt();
107   } else {
108     assert(!IP->isEHPad() && "unexpected eh pad!");
109   }
110 
111   // Adjust insert point to be after instructions inserted by the expander, so
112   // we can re-use already inserted instructions. Avoid skipping past the
113   // original \p MustDominate, in case it is an inserted instruction.
114   while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
115     ++IP;
116 
117   return IP;
118 }
119 
120 BasicBlock::iterator
GetOptimalInsertionPointForCastOf(Value * V) const121 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
122   // Cast the argument at the beginning of the entry block, after
123   // any bitcasts of other arguments.
124   if (Argument *A = dyn_cast<Argument>(V)) {
125     BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
126     while ((isa<BitCastInst>(IP) &&
127             isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
128             cast<BitCastInst>(IP)->getOperand(0) != A) ||
129            isa<DbgInfoIntrinsic>(IP))
130       ++IP;
131     return IP;
132   }
133 
134   // Cast the instruction immediately after the instruction.
135   if (Instruction *I = dyn_cast<Instruction>(V))
136     return findInsertPointAfter(I, &*Builder.GetInsertPoint());
137 
138   // Otherwise, this must be some kind of a constant,
139   // so let's plop this cast into the function's entry block.
140   assert(isa<Constant>(V) &&
141          "Expected the cast argument to be a global/constant");
142   return Builder.GetInsertBlock()
143       ->getParent()
144       ->getEntryBlock()
145       .getFirstInsertionPt();
146 }
147 
148 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
149 /// which must be possible with a noop cast, doing what we can to share
150 /// the casts.
InsertNoopCastOfTo(Value * V,Type * Ty)151 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
152   Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
153   assert((Op == Instruction::BitCast ||
154           Op == Instruction::PtrToInt ||
155           Op == Instruction::IntToPtr) &&
156          "InsertNoopCastOfTo cannot perform non-noop casts!");
157   assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
158          "InsertNoopCastOfTo cannot change sizes!");
159 
160   // inttoptr only works for integral pointers. For non-integral pointers, we
161   // can create a GEP on i8* null  with the integral value as index. Note that
162   // it is safe to use GEP of null instead of inttoptr here, because only
163   // expressions already based on a GEP of null should be converted to pointers
164   // during expansion.
165   if (Op == Instruction::IntToPtr) {
166     auto *PtrTy = cast<PointerType>(Ty);
167     if (DL.isNonIntegralPointerType(PtrTy)) {
168       auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
169       assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 &&
170              "alloc size of i8 must by 1 byte for the GEP to be correct");
171       auto *GEP = Builder.CreateGEP(
172           Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
173       return Builder.CreateBitCast(GEP, Ty);
174     }
175   }
176   // Short-circuit unnecessary bitcasts.
177   if (Op == Instruction::BitCast) {
178     if (V->getType() == Ty)
179       return V;
180     if (CastInst *CI = dyn_cast<CastInst>(V)) {
181       if (CI->getOperand(0)->getType() == Ty)
182         return CI->getOperand(0);
183     }
184   }
185   // Short-circuit unnecessary inttoptr<->ptrtoint casts.
186   if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
187       SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
188     if (CastInst *CI = dyn_cast<CastInst>(V))
189       if ((CI->getOpcode() == Instruction::PtrToInt ||
190            CI->getOpcode() == Instruction::IntToPtr) &&
191           SE.getTypeSizeInBits(CI->getType()) ==
192           SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
193         return CI->getOperand(0);
194     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
195       if ((CE->getOpcode() == Instruction::PtrToInt ||
196            CE->getOpcode() == Instruction::IntToPtr) &&
197           SE.getTypeSizeInBits(CE->getType()) ==
198           SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
199         return CE->getOperand(0);
200   }
201 
202   // Fold a cast of a constant.
203   if (Constant *C = dyn_cast<Constant>(V))
204     return ConstantExpr::getCast(Op, C, Ty);
205 
206   // Try to reuse existing cast, or insert one.
207   return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
208 }
209 
210 /// InsertBinop - Insert the specified binary operator, doing a small amount
211 /// of work to avoid inserting an obviously redundant operation, and hoisting
212 /// to an outer loop when the opportunity is there and it is safe.
InsertBinop(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,SCEV::NoWrapFlags Flags,bool IsSafeToHoist)213 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
214                                  Value *LHS, Value *RHS,
215                                  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
216   // Fold a binop with constant operands.
217   if (Constant *CLHS = dyn_cast<Constant>(LHS))
218     if (Constant *CRHS = dyn_cast<Constant>(RHS))
219       return ConstantExpr::get(Opcode, CLHS, CRHS);
220 
221   // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
222   unsigned ScanLimit = 6;
223   BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
224   // Scanning starts from the last instruction before the insertion point.
225   BasicBlock::iterator IP = Builder.GetInsertPoint();
226   if (IP != BlockBegin) {
227     --IP;
228     for (; ScanLimit; --IP, --ScanLimit) {
229       // Don't count dbg.value against the ScanLimit, to avoid perturbing the
230       // generated code.
231       if (isa<DbgInfoIntrinsic>(IP))
232         ScanLimit++;
233 
234       auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
235         // Ensure that no-wrap flags match.
236         if (isa<OverflowingBinaryOperator>(I)) {
237           if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
238             return true;
239           if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
240             return true;
241         }
242         // Conservatively, do not use any instruction which has any of exact
243         // flags installed.
244         if (isa<PossiblyExactOperator>(I) && I->isExact())
245           return true;
246         return false;
247       };
248       if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
249           IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
250         return &*IP;
251       if (IP == BlockBegin) break;
252     }
253   }
254 
255   // Save the original insertion point so we can restore it when we're done.
256   DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
257   SCEVInsertPointGuard Guard(Builder, this);
258 
259   if (IsSafeToHoist) {
260     // Move the insertion point out of as many loops as we can.
261     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
262       if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
263       BasicBlock *Preheader = L->getLoopPreheader();
264       if (!Preheader) break;
265 
266       // Ok, move up a level.
267       Builder.SetInsertPoint(Preheader->getTerminator());
268     }
269   }
270 
271   // If we haven't found this binop, insert it.
272   Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
273   BO->setDebugLoc(Loc);
274   if (Flags & SCEV::FlagNUW)
275     BO->setHasNoUnsignedWrap();
276   if (Flags & SCEV::FlagNSW)
277     BO->setHasNoSignedWrap();
278 
279   return BO;
280 }
281 
282 /// FactorOutConstant - Test if S is divisible by Factor, using signed
283 /// division. If so, update S with Factor divided out and return true.
284 /// S need not be evenly divisible if a reasonable remainder can be
285 /// computed.
FactorOutConstant(const SCEV * & S,const SCEV * & Remainder,const SCEV * Factor,ScalarEvolution & SE,const DataLayout & DL)286 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
287                               const SCEV *Factor, ScalarEvolution &SE,
288                               const DataLayout &DL) {
289   // Everything is divisible by one.
290   if (Factor->isOne())
291     return true;
292 
293   // x/x == 1.
294   if (S == Factor) {
295     S = SE.getConstant(S->getType(), 1);
296     return true;
297   }
298 
299   // For a Constant, check for a multiple of the given factor.
300   if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
301     // 0/x == 0.
302     if (C->isZero())
303       return true;
304     // Check for divisibility.
305     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
306       ConstantInt *CI =
307           ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
308       // If the quotient is zero and the remainder is non-zero, reject
309       // the value at this scale. It will be considered for subsequent
310       // smaller scales.
311       if (!CI->isZero()) {
312         const SCEV *Div = SE.getConstant(CI);
313         S = Div;
314         Remainder = SE.getAddExpr(
315             Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
316         return true;
317       }
318     }
319   }
320 
321   // In a Mul, check if there is a constant operand which is a multiple
322   // of the given factor.
323   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
324     // Size is known, check if there is a constant operand which is a multiple
325     // of the given factor. If so, we can factor it.
326     if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
327       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
328         if (!C->getAPInt().srem(FC->getAPInt())) {
329           SmallVector<const SCEV *, 4> NewMulOps(M->operands());
330           NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
331           S = SE.getMulExpr(NewMulOps);
332           return true;
333         }
334   }
335 
336   // In an AddRec, check if both start and step are divisible.
337   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
338     const SCEV *Step = A->getStepRecurrence(SE);
339     const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
340     if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
341       return false;
342     if (!StepRem->isZero())
343       return false;
344     const SCEV *Start = A->getStart();
345     if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
346       return false;
347     S = SE.getAddRecExpr(Start, Step, A->getLoop(),
348                          A->getNoWrapFlags(SCEV::FlagNW));
349     return true;
350   }
351 
352   return false;
353 }
354 
355 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
356 /// is the number of SCEVAddRecExprs present, which are kept at the end of
357 /// the list.
358 ///
SimplifyAddOperands(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)359 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
360                                 Type *Ty,
361                                 ScalarEvolution &SE) {
362   unsigned NumAddRecs = 0;
363   for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
364     ++NumAddRecs;
365   // Group Ops into non-addrecs and addrecs.
366   SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
367   SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
368   // Let ScalarEvolution sort and simplify the non-addrecs list.
369   const SCEV *Sum = NoAddRecs.empty() ?
370                     SE.getConstant(Ty, 0) :
371                     SE.getAddExpr(NoAddRecs);
372   // If it returned an add, use the operands. Otherwise it simplified
373   // the sum into a single value, so just use that.
374   Ops.clear();
375   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
376     Ops.append(Add->op_begin(), Add->op_end());
377   else if (!Sum->isZero())
378     Ops.push_back(Sum);
379   // Then append the addrecs.
380   Ops.append(AddRecs.begin(), AddRecs.end());
381 }
382 
383 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
384 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
385 /// This helps expose more opportunities for folding parts of the expressions
386 /// into GEP indices.
387 ///
SplitAddRecs(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)388 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
389                          Type *Ty,
390                          ScalarEvolution &SE) {
391   // Find the addrecs.
392   SmallVector<const SCEV *, 8> AddRecs;
393   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
394     while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
395       const SCEV *Start = A->getStart();
396       if (Start->isZero()) break;
397       const SCEV *Zero = SE.getConstant(Ty, 0);
398       AddRecs.push_back(SE.getAddRecExpr(Zero,
399                                          A->getStepRecurrence(SE),
400                                          A->getLoop(),
401                                          A->getNoWrapFlags(SCEV::FlagNW)));
402       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
403         Ops[i] = Zero;
404         Ops.append(Add->op_begin(), Add->op_end());
405         e += Add->getNumOperands();
406       } else {
407         Ops[i] = Start;
408       }
409     }
410   if (!AddRecs.empty()) {
411     // Add the addrecs onto the end of the list.
412     Ops.append(AddRecs.begin(), AddRecs.end());
413     // Resort the operand list, moving any constants to the front.
414     SimplifyAddOperands(Ops, Ty, SE);
415   }
416 }
417 
418 /// expandAddToGEP - Expand an addition expression with a pointer type into
419 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
420 /// BasicAliasAnalysis and other passes analyze the result. See the rules
421 /// for getelementptr vs. inttoptr in
422 /// http://llvm.org/docs/LangRef.html#pointeraliasing
423 /// for details.
424 ///
425 /// Design note: The correctness of using getelementptr here depends on
426 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
427 /// they may introduce pointer arithmetic which may not be safely converted
428 /// into getelementptr.
429 ///
430 /// Design note: It might seem desirable for this function to be more
431 /// loop-aware. If some of the indices are loop-invariant while others
432 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
433 /// loop-invariant portions of the overall computation outside the loop.
434 /// However, there are a few reasons this is not done here. Hoisting simple
435 /// arithmetic is a low-level optimization that often isn't very
436 /// important until late in the optimization process. In fact, passes
437 /// like InstructionCombining will combine GEPs, even if it means
438 /// pushing loop-invariant computation down into loops, so even if the
439 /// GEPs were split here, the work would quickly be undone. The
440 /// LoopStrengthReduction pass, which is usually run quite late (and
441 /// after the last InstructionCombining pass), takes care of hoisting
442 /// loop-invariant portions of expressions, after considering what
443 /// can be folded using target addressing modes.
444 ///
expandAddToGEP(const SCEV * const * op_begin,const SCEV * const * op_end,PointerType * PTy,Type * Ty,Value * V)445 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
446                                     const SCEV *const *op_end,
447                                     PointerType *PTy,
448                                     Type *Ty,
449                                     Value *V) {
450   Type *OriginalElTy = PTy->getElementType();
451   Type *ElTy = OriginalElTy;
452   SmallVector<Value *, 4> GepIndices;
453   SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
454   bool AnyNonZeroIndices = false;
455 
456   // Split AddRecs up into parts as either of the parts may be usable
457   // without the other.
458   SplitAddRecs(Ops, Ty, SE);
459 
460   Type *IntIdxTy = DL.getIndexType(PTy);
461 
462   // Descend down the pointer's type and attempt to convert the other
463   // operands into GEP indices, at each level. The first index in a GEP
464   // indexes into the array implied by the pointer operand; the rest of
465   // the indices index into the element or field type selected by the
466   // preceding index.
467   for (;;) {
468     // If the scale size is not 0, attempt to factor out a scale for
469     // array indexing.
470     SmallVector<const SCEV *, 8> ScaledOps;
471     if (ElTy->isSized()) {
472       const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
473       if (!ElSize->isZero()) {
474         SmallVector<const SCEV *, 8> NewOps;
475         for (const SCEV *Op : Ops) {
476           const SCEV *Remainder = SE.getConstant(Ty, 0);
477           if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
478             // Op now has ElSize factored out.
479             ScaledOps.push_back(Op);
480             if (!Remainder->isZero())
481               NewOps.push_back(Remainder);
482             AnyNonZeroIndices = true;
483           } else {
484             // The operand was not divisible, so add it to the list of operands
485             // we'll scan next iteration.
486             NewOps.push_back(Op);
487           }
488         }
489         // If we made any changes, update Ops.
490         if (!ScaledOps.empty()) {
491           Ops = NewOps;
492           SimplifyAddOperands(Ops, Ty, SE);
493         }
494       }
495     }
496 
497     // Record the scaled array index for this level of the type. If
498     // we didn't find any operands that could be factored, tentatively
499     // assume that element zero was selected (since the zero offset
500     // would obviously be folded away).
501     Value *Scaled =
502         ScaledOps.empty()
503             ? Constant::getNullValue(Ty)
504             : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false);
505     GepIndices.push_back(Scaled);
506 
507     // Collect struct field index operands.
508     while (StructType *STy = dyn_cast<StructType>(ElTy)) {
509       bool FoundFieldNo = false;
510       // An empty struct has no fields.
511       if (STy->getNumElements() == 0) break;
512       // Field offsets are known. See if a constant offset falls within any of
513       // the struct fields.
514       if (Ops.empty())
515         break;
516       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
517         if (SE.getTypeSizeInBits(C->getType()) <= 64) {
518           const StructLayout &SL = *DL.getStructLayout(STy);
519           uint64_t FullOffset = C->getValue()->getZExtValue();
520           if (FullOffset < SL.getSizeInBytes()) {
521             unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
522             GepIndices.push_back(
523                 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
524             ElTy = STy->getTypeAtIndex(ElIdx);
525             Ops[0] =
526                 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
527             AnyNonZeroIndices = true;
528             FoundFieldNo = true;
529           }
530         }
531       // If no struct field offsets were found, tentatively assume that
532       // field zero was selected (since the zero offset would obviously
533       // be folded away).
534       if (!FoundFieldNo) {
535         ElTy = STy->getTypeAtIndex(0u);
536         GepIndices.push_back(
537           Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
538       }
539     }
540 
541     if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
542       ElTy = ATy->getElementType();
543     else
544       // FIXME: Handle VectorType.
545       // E.g., If ElTy is scalable vector, then ElSize is not a compile-time
546       // constant, therefore can not be factored out. The generated IR is less
547       // ideal with base 'V' cast to i8* and do ugly getelementptr over that.
548       break;
549   }
550 
551   // If none of the operands were convertible to proper GEP indices, cast
552   // the base to i8* and do an ugly getelementptr with that. It's still
553   // better than ptrtoint+arithmetic+inttoptr at least.
554   if (!AnyNonZeroIndices) {
555     // Cast the base to i8*.
556     V = InsertNoopCastOfTo(V,
557        Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
558 
559     assert(!isa<Instruction>(V) ||
560            SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
561 
562     // Expand the operands for a plain byte offset.
563     Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false);
564 
565     // Fold a GEP with constant operands.
566     if (Constant *CLHS = dyn_cast<Constant>(V))
567       if (Constant *CRHS = dyn_cast<Constant>(Idx))
568         return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
569                                               CLHS, CRHS);
570 
571     // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
572     unsigned ScanLimit = 6;
573     BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
574     // Scanning starts from the last instruction before the insertion point.
575     BasicBlock::iterator IP = Builder.GetInsertPoint();
576     if (IP != BlockBegin) {
577       --IP;
578       for (; ScanLimit; --IP, --ScanLimit) {
579         // Don't count dbg.value against the ScanLimit, to avoid perturbing the
580         // generated code.
581         if (isa<DbgInfoIntrinsic>(IP))
582           ScanLimit++;
583         if (IP->getOpcode() == Instruction::GetElementPtr &&
584             IP->getOperand(0) == V && IP->getOperand(1) == Idx)
585           return &*IP;
586         if (IP == BlockBegin) break;
587       }
588     }
589 
590     // Save the original insertion point so we can restore it when we're done.
591     SCEVInsertPointGuard Guard(Builder, this);
592 
593     // Move the insertion point out of as many loops as we can.
594     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
595       if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
596       BasicBlock *Preheader = L->getLoopPreheader();
597       if (!Preheader) break;
598 
599       // Ok, move up a level.
600       Builder.SetInsertPoint(Preheader->getTerminator());
601     }
602 
603     // Emit a GEP.
604     return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
605   }
606 
607   {
608     SCEVInsertPointGuard Guard(Builder, this);
609 
610     // Move the insertion point out of as many loops as we can.
611     while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
612       if (!L->isLoopInvariant(V)) break;
613 
614       bool AnyIndexNotLoopInvariant = any_of(
615           GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
616 
617       if (AnyIndexNotLoopInvariant)
618         break;
619 
620       BasicBlock *Preheader = L->getLoopPreheader();
621       if (!Preheader) break;
622 
623       // Ok, move up a level.
624       Builder.SetInsertPoint(Preheader->getTerminator());
625     }
626 
627     // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
628     // because ScalarEvolution may have changed the address arithmetic to
629     // compute a value which is beyond the end of the allocated object.
630     Value *Casted = V;
631     if (V->getType() != PTy)
632       Casted = InsertNoopCastOfTo(Casted, PTy);
633     Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
634     Ops.push_back(SE.getUnknown(GEP));
635   }
636 
637   return expand(SE.getAddExpr(Ops));
638 }
639 
expandAddToGEP(const SCEV * Op,PointerType * PTy,Type * Ty,Value * V)640 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
641                                     Value *V) {
642   const SCEV *const Ops[1] = {Op};
643   return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
644 }
645 
646 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
647 /// SCEV expansion. If they are nested, this is the most nested. If they are
648 /// neighboring, pick the later.
PickMostRelevantLoop(const Loop * A,const Loop * B,DominatorTree & DT)649 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
650                                         DominatorTree &DT) {
651   if (!A) return B;
652   if (!B) return A;
653   if (A->contains(B)) return B;
654   if (B->contains(A)) return A;
655   if (DT.dominates(A->getHeader(), B->getHeader())) return B;
656   if (DT.dominates(B->getHeader(), A->getHeader())) return A;
657   return A; // Arbitrarily break the tie.
658 }
659 
660 /// getRelevantLoop - Get the most relevant loop associated with the given
661 /// expression, according to PickMostRelevantLoop.
getRelevantLoop(const SCEV * S)662 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
663   // Test whether we've already computed the most relevant loop for this SCEV.
664   auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
665   if (!Pair.second)
666     return Pair.first->second;
667 
668   if (isa<SCEVConstant>(S))
669     // A constant has no relevant loops.
670     return nullptr;
671   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
672     if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
673       return Pair.first->second = SE.LI.getLoopFor(I->getParent());
674     // A non-instruction has no relevant loops.
675     return nullptr;
676   }
677   if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
678     const Loop *L = nullptr;
679     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
680       L = AR->getLoop();
681     for (const SCEV *Op : N->operands())
682       L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
683     return RelevantLoops[N] = L;
684   }
685   if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
686     const Loop *Result = getRelevantLoop(C->getOperand());
687     return RelevantLoops[C] = Result;
688   }
689   if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
690     const Loop *Result = PickMostRelevantLoop(
691         getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
692     return RelevantLoops[D] = Result;
693   }
694   llvm_unreachable("Unexpected SCEV type!");
695 }
696 
697 namespace {
698 
699 /// LoopCompare - Compare loops by PickMostRelevantLoop.
700 class LoopCompare {
701   DominatorTree &DT;
702 public:
LoopCompare(DominatorTree & dt)703   explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
704 
operator ()(std::pair<const Loop *,const SCEV * > LHS,std::pair<const Loop *,const SCEV * > RHS) const705   bool operator()(std::pair<const Loop *, const SCEV *> LHS,
706                   std::pair<const Loop *, const SCEV *> RHS) const {
707     // Keep pointer operands sorted at the end.
708     if (LHS.second->getType()->isPointerTy() !=
709         RHS.second->getType()->isPointerTy())
710       return LHS.second->getType()->isPointerTy();
711 
712     // Compare loops with PickMostRelevantLoop.
713     if (LHS.first != RHS.first)
714       return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
715 
716     // If one operand is a non-constant negative and the other is not,
717     // put the non-constant negative on the right so that a sub can
718     // be used instead of a negate and add.
719     if (LHS.second->isNonConstantNegative()) {
720       if (!RHS.second->isNonConstantNegative())
721         return false;
722     } else if (RHS.second->isNonConstantNegative())
723       return true;
724 
725     // Otherwise they are equivalent according to this comparison.
726     return false;
727   }
728 };
729 
730 }
731 
visitAddExpr(const SCEVAddExpr * S)732 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
733   Type *Ty = SE.getEffectiveSCEVType(S->getType());
734 
735   // Collect all the add operands in a loop, along with their associated loops.
736   // Iterate in reverse so that constants are emitted last, all else equal, and
737   // so that pointer operands are inserted first, which the code below relies on
738   // to form more involved GEPs.
739   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
740   for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
741        E(S->op_begin()); I != E; ++I)
742     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
743 
744   // Sort by loop. Use a stable sort so that constants follow non-constants and
745   // pointer operands precede non-pointer operands.
746   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
747 
748   // Emit instructions to add all the operands. Hoist as much as possible
749   // out of loops, and form meaningful getelementptrs where possible.
750   Value *Sum = nullptr;
751   for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
752     const Loop *CurLoop = I->first;
753     const SCEV *Op = I->second;
754     if (!Sum) {
755       // This is the first operand. Just expand it.
756       Sum = expand(Op);
757       ++I;
758     } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
759       // The running sum expression is a pointer. Try to form a getelementptr
760       // at this level with that as the base.
761       SmallVector<const SCEV *, 4> NewOps;
762       for (; I != E && I->first == CurLoop; ++I) {
763         // If the operand is SCEVUnknown and not instructions, peek through
764         // it, to enable more of it to be folded into the GEP.
765         const SCEV *X = I->second;
766         if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
767           if (!isa<Instruction>(U->getValue()))
768             X = SE.getSCEV(U->getValue());
769         NewOps.push_back(X);
770       }
771       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
772     } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
773       // The running sum is an integer, and there's a pointer at this level.
774       // Try to form a getelementptr. If the running sum is instructions,
775       // use a SCEVUnknown to avoid re-analyzing them.
776       SmallVector<const SCEV *, 4> NewOps;
777       NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
778                                                SE.getSCEV(Sum));
779       for (++I; I != E && I->first == CurLoop; ++I)
780         NewOps.push_back(I->second);
781       Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
782     } else if (Op->isNonConstantNegative()) {
783       // Instead of doing a negate and add, just do a subtract.
784       Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false);
785       Sum = InsertNoopCastOfTo(Sum, Ty);
786       Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
787                         /*IsSafeToHoist*/ true);
788       ++I;
789     } else {
790       // A simple add.
791       Value *W = expandCodeForImpl(Op, Ty, false);
792       Sum = InsertNoopCastOfTo(Sum, Ty);
793       // Canonicalize a constant to the RHS.
794       if (isa<Constant>(Sum)) std::swap(Sum, W);
795       Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
796                         /*IsSafeToHoist*/ true);
797       ++I;
798     }
799   }
800 
801   return Sum;
802 }
803 
visitMulExpr(const SCEVMulExpr * S)804 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
805   Type *Ty = SE.getEffectiveSCEVType(S->getType());
806 
807   // Collect all the mul operands in a loop, along with their associated loops.
808   // Iterate in reverse so that constants are emitted last, all else equal.
809   SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
810   for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
811        E(S->op_begin()); I != E; ++I)
812     OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
813 
814   // Sort by loop. Use a stable sort so that constants follow non-constants.
815   llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
816 
817   // Emit instructions to mul all the operands. Hoist as much as possible
818   // out of loops.
819   Value *Prod = nullptr;
820   auto I = OpsAndLoops.begin();
821 
822   // Expand the calculation of X pow N in the following manner:
823   // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
824   // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
825   const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
826     auto E = I;
827     // Calculate how many times the same operand from the same loop is included
828     // into this power.
829     uint64_t Exponent = 0;
830     const uint64_t MaxExponent = UINT64_MAX >> 1;
831     // No one sane will ever try to calculate such huge exponents, but if we
832     // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
833     // below when the power of 2 exceeds our Exponent, and we want it to be
834     // 1u << 31 at most to not deal with unsigned overflow.
835     while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
836       ++Exponent;
837       ++E;
838     }
839     assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
840 
841     // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
842     // that are needed into the result.
843     Value *P = expandCodeForImpl(I->second, Ty, false);
844     Value *Result = nullptr;
845     if (Exponent & 1)
846       Result = P;
847     for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
848       P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
849                       /*IsSafeToHoist*/ true);
850       if (Exponent & BinExp)
851         Result = Result ? InsertBinop(Instruction::Mul, Result, P,
852                                       SCEV::FlagAnyWrap,
853                                       /*IsSafeToHoist*/ true)
854                         : P;
855     }
856 
857     I = E;
858     assert(Result && "Nothing was expanded?");
859     return Result;
860   };
861 
862   while (I != OpsAndLoops.end()) {
863     if (!Prod) {
864       // This is the first operand. Just expand it.
865       Prod = ExpandOpBinPowN();
866     } else if (I->second->isAllOnesValue()) {
867       // Instead of doing a multiply by negative one, just do a negate.
868       Prod = InsertNoopCastOfTo(Prod, Ty);
869       Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
870                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
871       ++I;
872     } else {
873       // A simple mul.
874       Value *W = ExpandOpBinPowN();
875       Prod = InsertNoopCastOfTo(Prod, Ty);
876       // Canonicalize a constant to the RHS.
877       if (isa<Constant>(Prod)) std::swap(Prod, W);
878       const APInt *RHS;
879       if (match(W, m_Power2(RHS))) {
880         // Canonicalize Prod*(1<<C) to Prod<<C.
881         assert(!Ty->isVectorTy() && "vector types are not SCEVable");
882         auto NWFlags = S->getNoWrapFlags();
883         // clear nsw flag if shl will produce poison value.
884         if (RHS->logBase2() == RHS->getBitWidth() - 1)
885           NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
886         Prod = InsertBinop(Instruction::Shl, Prod,
887                            ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
888                            /*IsSafeToHoist*/ true);
889       } else {
890         Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
891                            /*IsSafeToHoist*/ true);
892       }
893     }
894   }
895 
896   return Prod;
897 }
898 
visitUDivExpr(const SCEVUDivExpr * S)899 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
900   Type *Ty = SE.getEffectiveSCEVType(S->getType());
901 
902   Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false);
903   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
904     const APInt &RHS = SC->getAPInt();
905     if (RHS.isPowerOf2())
906       return InsertBinop(Instruction::LShr, LHS,
907                          ConstantInt::get(Ty, RHS.logBase2()),
908                          SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
909   }
910 
911   Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false);
912   return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
913                      /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
914 }
915 
916 /// Move parts of Base into Rest to leave Base with the minimal
917 /// expression that provides a pointer operand suitable for a
918 /// GEP expansion.
ExposePointerBase(const SCEV * & Base,const SCEV * & Rest,ScalarEvolution & SE)919 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
920                               ScalarEvolution &SE) {
921   while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
922     Base = A->getStart();
923     Rest = SE.getAddExpr(Rest,
924                          SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
925                                           A->getStepRecurrence(SE),
926                                           A->getLoop(),
927                                           A->getNoWrapFlags(SCEV::FlagNW)));
928   }
929   if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
930     Base = A->getOperand(A->getNumOperands()-1);
931     SmallVector<const SCEV *, 8> NewAddOps(A->operands());
932     NewAddOps.back() = Rest;
933     Rest = SE.getAddExpr(NewAddOps);
934     ExposePointerBase(Base, Rest, SE);
935   }
936 }
937 
938 /// Determine if this is a well-behaved chain of instructions leading back to
939 /// the PHI. If so, it may be reused by expanded expressions.
isNormalAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)940 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
941                                          const Loop *L) {
942   if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
943       (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
944     return false;
945   // If any of the operands don't dominate the insert position, bail.
946   // Addrec operands are always loop-invariant, so this can only happen
947   // if there are instructions which haven't been hoisted.
948   if (L == IVIncInsertLoop) {
949     for (Use &Op : llvm::drop_begin(IncV->operands()))
950       if (Instruction *OInst = dyn_cast<Instruction>(Op))
951         if (!SE.DT.dominates(OInst, IVIncInsertPos))
952           return false;
953   }
954   // Advance to the next instruction.
955   IncV = dyn_cast<Instruction>(IncV->getOperand(0));
956   if (!IncV)
957     return false;
958 
959   if (IncV->mayHaveSideEffects())
960     return false;
961 
962   if (IncV == PN)
963     return true;
964 
965   return isNormalAddRecExprPHI(PN, IncV, L);
966 }
967 
968 /// getIVIncOperand returns an induction variable increment's induction
969 /// variable operand.
970 ///
971 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
972 /// operands dominate InsertPos.
973 ///
974 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
975 /// simple patterns generated by getAddRecExprPHILiterally and
976 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
getIVIncOperand(Instruction * IncV,Instruction * InsertPos,bool allowScale)977 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
978                                            Instruction *InsertPos,
979                                            bool allowScale) {
980   if (IncV == InsertPos)
981     return nullptr;
982 
983   switch (IncV->getOpcode()) {
984   default:
985     return nullptr;
986   // Check for a simple Add/Sub or GEP of a loop invariant step.
987   case Instruction::Add:
988   case Instruction::Sub: {
989     Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
990     if (!OInst || SE.DT.dominates(OInst, InsertPos))
991       return dyn_cast<Instruction>(IncV->getOperand(0));
992     return nullptr;
993   }
994   case Instruction::BitCast:
995     return dyn_cast<Instruction>(IncV->getOperand(0));
996   case Instruction::GetElementPtr:
997     for (Use &U : llvm::drop_begin(IncV->operands())) {
998       if (isa<Constant>(U))
999         continue;
1000       if (Instruction *OInst = dyn_cast<Instruction>(U)) {
1001         if (!SE.DT.dominates(OInst, InsertPos))
1002           return nullptr;
1003       }
1004       if (allowScale) {
1005         // allow any kind of GEP as long as it can be hoisted.
1006         continue;
1007       }
1008       // This must be a pointer addition of constants (pretty), which is already
1009       // handled, or some number of address-size elements (ugly). Ugly geps
1010       // have 2 operands. i1* is used by the expander to represent an
1011       // address-size element.
1012       if (IncV->getNumOperands() != 2)
1013         return nullptr;
1014       unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
1015       if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
1016           && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
1017         return nullptr;
1018       break;
1019     }
1020     return dyn_cast<Instruction>(IncV->getOperand(0));
1021   }
1022 }
1023 
1024 /// If the insert point of the current builder or any of the builders on the
1025 /// stack of saved builders has 'I' as its insert point, update it to point to
1026 /// the instruction after 'I'.  This is intended to be used when the instruction
1027 /// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
1028 /// different block, the inconsistent insert point (with a mismatched
1029 /// Instruction and Block) can lead to an instruction being inserted in a block
1030 /// other than its parent.
fixupInsertPoints(Instruction * I)1031 void SCEVExpander::fixupInsertPoints(Instruction *I) {
1032   BasicBlock::iterator It(*I);
1033   BasicBlock::iterator NewInsertPt = std::next(It);
1034   if (Builder.GetInsertPoint() == It)
1035     Builder.SetInsertPoint(&*NewInsertPt);
1036   for (auto *InsertPtGuard : InsertPointGuards)
1037     if (InsertPtGuard->GetInsertPoint() == It)
1038       InsertPtGuard->SetInsertPoint(NewInsertPt);
1039 }
1040 
1041 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
1042 /// it available to other uses in this loop. Recursively hoist any operands,
1043 /// until we reach a value that dominates InsertPos.
hoistIVInc(Instruction * IncV,Instruction * InsertPos)1044 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
1045   if (SE.DT.dominates(IncV, InsertPos))
1046       return true;
1047 
1048   // InsertPos must itself dominate IncV so that IncV's new position satisfies
1049   // its existing users.
1050   if (isa<PHINode>(InsertPos) ||
1051       !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1052     return false;
1053 
1054   if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1055     return false;
1056 
1057   // Check that the chain of IV operands leading back to Phi can be hoisted.
1058   SmallVector<Instruction*, 4> IVIncs;
1059   for(;;) {
1060     Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1061     if (!Oper)
1062       return false;
1063     // IncV is safe to hoist.
1064     IVIncs.push_back(IncV);
1065     IncV = Oper;
1066     if (SE.DT.dominates(IncV, InsertPos))
1067       break;
1068   }
1069   for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
1070     fixupInsertPoints(*I);
1071     (*I)->moveBefore(InsertPos);
1072   }
1073   return true;
1074 }
1075 
1076 /// Determine if this cyclic phi is in a form that would have been generated by
1077 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1078 /// as it is in a low-cost form, for example, no implied multiplication. This
1079 /// should match any patterns generated by getAddRecExprPHILiterally and
1080 /// expandAddtoGEP.
isExpandedAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)1081 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1082                                            const Loop *L) {
1083   for(Instruction *IVOper = IncV;
1084       (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1085                                 /*allowScale=*/false));) {
1086     if (IVOper == PN)
1087       return true;
1088   }
1089   return false;
1090 }
1091 
1092 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1093 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1094 /// need to materialize IV increments elsewhere to handle difficult situations.
expandIVInc(PHINode * PN,Value * StepV,const Loop * L,Type * ExpandTy,Type * IntTy,bool useSubtract)1095 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1096                                  Type *ExpandTy, Type *IntTy,
1097                                  bool useSubtract) {
1098   Value *IncV;
1099   // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1100   if (ExpandTy->isPointerTy()) {
1101     PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1102     // If the step isn't constant, don't use an implicitly scaled GEP, because
1103     // that would require a multiply inside the loop.
1104     if (!isa<ConstantInt>(StepV))
1105       GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1106                                   GEPPtrTy->getAddressSpace());
1107     IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1108     if (IncV->getType() != PN->getType())
1109       IncV = Builder.CreateBitCast(IncV, PN->getType());
1110   } else {
1111     IncV = useSubtract ?
1112       Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1113       Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1114   }
1115   return IncV;
1116 }
1117 
1118 /// Hoist the addrec instruction chain rooted in the loop phi above the
1119 /// position. This routine assumes that this is possible (has been checked).
hoistBeforePos(DominatorTree * DT,Instruction * InstToHoist,Instruction * Pos,PHINode * LoopPhi)1120 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1121                                   Instruction *Pos, PHINode *LoopPhi) {
1122   do {
1123     if (DT->dominates(InstToHoist, Pos))
1124       break;
1125     // Make sure the increment is where we want it. But don't move it
1126     // down past a potential existing post-inc user.
1127     fixupInsertPoints(InstToHoist);
1128     InstToHoist->moveBefore(Pos);
1129     Pos = InstToHoist;
1130     InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1131   } while (InstToHoist != LoopPhi);
1132 }
1133 
1134 /// Check whether we can cheaply express the requested SCEV in terms of
1135 /// the available PHI SCEV by truncation and/or inversion of the step.
canBeCheaplyTransformed(ScalarEvolution & SE,const SCEVAddRecExpr * Phi,const SCEVAddRecExpr * Requested,bool & InvertStep)1136 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1137                                     const SCEVAddRecExpr *Phi,
1138                                     const SCEVAddRecExpr *Requested,
1139                                     bool &InvertStep) {
1140   Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1141   Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1142 
1143   if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1144     return false;
1145 
1146   // Try truncate it if necessary.
1147   Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1148   if (!Phi)
1149     return false;
1150 
1151   // Check whether truncation will help.
1152   if (Phi == Requested) {
1153     InvertStep = false;
1154     return true;
1155   }
1156 
1157   // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1158   if (SE.getAddExpr(Requested->getStart(),
1159                     SE.getNegativeSCEV(Requested)) == Phi) {
1160     InvertStep = true;
1161     return true;
1162   }
1163 
1164   return false;
1165 }
1166 
IsIncrementNSW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1167 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1168   if (!isa<IntegerType>(AR->getType()))
1169     return false;
1170 
1171   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1172   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1173   const SCEV *Step = AR->getStepRecurrence(SE);
1174   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1175                                             SE.getSignExtendExpr(AR, WideTy));
1176   const SCEV *ExtendAfterOp =
1177     SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1178   return ExtendAfterOp == OpAfterExtend;
1179 }
1180 
IsIncrementNUW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1181 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1182   if (!isa<IntegerType>(AR->getType()))
1183     return false;
1184 
1185   unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1186   Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1187   const SCEV *Step = AR->getStepRecurrence(SE);
1188   const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1189                                             SE.getZeroExtendExpr(AR, WideTy));
1190   const SCEV *ExtendAfterOp =
1191     SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1192   return ExtendAfterOp == OpAfterExtend;
1193 }
1194 
1195 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1196 /// the base addrec, which is the addrec without any non-loop-dominating
1197 /// values, and return the PHI.
1198 PHINode *
getAddRecExprPHILiterally(const SCEVAddRecExpr * Normalized,const Loop * L,Type * ExpandTy,Type * IntTy,Type * & TruncTy,bool & InvertStep)1199 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1200                                         const Loop *L,
1201                                         Type *ExpandTy,
1202                                         Type *IntTy,
1203                                         Type *&TruncTy,
1204                                         bool &InvertStep) {
1205   assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1206 
1207   // Reuse a previously-inserted PHI, if present.
1208   BasicBlock *LatchBlock = L->getLoopLatch();
1209   if (LatchBlock) {
1210     PHINode *AddRecPhiMatch = nullptr;
1211     Instruction *IncV = nullptr;
1212     TruncTy = nullptr;
1213     InvertStep = false;
1214 
1215     // Only try partially matching scevs that need truncation and/or
1216     // step-inversion if we know this loop is outside the current loop.
1217     bool TryNonMatchingSCEV =
1218         IVIncInsertLoop &&
1219         SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1220 
1221     for (PHINode &PN : L->getHeader()->phis()) {
1222       if (!SE.isSCEVable(PN.getType()))
1223         continue;
1224 
1225       // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1226       // PHI has no meaning at all.
1227       if (!PN.isComplete()) {
1228         DEBUG_WITH_TYPE(
1229             DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1230         continue;
1231       }
1232 
1233       const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1234       if (!PhiSCEV)
1235         continue;
1236 
1237       bool IsMatchingSCEV = PhiSCEV == Normalized;
1238       // We only handle truncation and inversion of phi recurrences for the
1239       // expanded expression if the expanded expression's loop dominates the
1240       // loop we insert to. Check now, so we can bail out early.
1241       if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1242           continue;
1243 
1244       // TODO: this possibly can be reworked to avoid this cast at all.
1245       Instruction *TempIncV =
1246           dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
1247       if (!TempIncV)
1248         continue;
1249 
1250       // Check whether we can reuse this PHI node.
1251       if (LSRMode) {
1252         if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1253           continue;
1254         if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1255           continue;
1256       } else {
1257         if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1258           continue;
1259       }
1260 
1261       // Stop if we have found an exact match SCEV.
1262       if (IsMatchingSCEV) {
1263         IncV = TempIncV;
1264         TruncTy = nullptr;
1265         InvertStep = false;
1266         AddRecPhiMatch = &PN;
1267         break;
1268       }
1269 
1270       // Try whether the phi can be translated into the requested form
1271       // (truncated and/or offset by a constant).
1272       if ((!TruncTy || InvertStep) &&
1273           canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1274         // Record the phi node. But don't stop we might find an exact match
1275         // later.
1276         AddRecPhiMatch = &PN;
1277         IncV = TempIncV;
1278         TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1279       }
1280     }
1281 
1282     if (AddRecPhiMatch) {
1283       // Potentially, move the increment. We have made sure in
1284       // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1285       if (L == IVIncInsertLoop)
1286         hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1287 
1288       // Ok, the add recurrence looks usable.
1289       // Remember this PHI, even in post-inc mode.
1290       InsertedValues.insert(AddRecPhiMatch);
1291       // Remember the increment.
1292       rememberInstruction(IncV);
1293       // Those values were not actually inserted but re-used.
1294       ReusedValues.insert(AddRecPhiMatch);
1295       ReusedValues.insert(IncV);
1296       return AddRecPhiMatch;
1297     }
1298   }
1299 
1300   // Save the original insertion point so we can restore it when we're done.
1301   SCEVInsertPointGuard Guard(Builder, this);
1302 
1303   // Another AddRec may need to be recursively expanded below. For example, if
1304   // this AddRec is quadratic, the StepV may itself be an AddRec in this
1305   // loop. Remove this loop from the PostIncLoops set before expanding such
1306   // AddRecs. Otherwise, we cannot find a valid position for the step
1307   // (i.e. StepV can never dominate its loop header).  Ideally, we could do
1308   // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1309   // so it's not worth implementing SmallPtrSet::swap.
1310   PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1311   PostIncLoops.clear();
1312 
1313   // Expand code for the start value into the loop preheader.
1314   assert(L->getLoopPreheader() &&
1315          "Can't expand add recurrences without a loop preheader!");
1316   Value *StartV =
1317       expandCodeForImpl(Normalized->getStart(), ExpandTy,
1318                         L->getLoopPreheader()->getTerminator(), false);
1319 
1320   // StartV must have been be inserted into L's preheader to dominate the new
1321   // phi.
1322   assert(!isa<Instruction>(StartV) ||
1323          SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1324                                  L->getHeader()));
1325 
1326   // Expand code for the step value. Do this before creating the PHI so that PHI
1327   // reuse code doesn't see an incomplete PHI.
1328   const SCEV *Step = Normalized->getStepRecurrence(SE);
1329   // If the stride is negative, insert a sub instead of an add for the increment
1330   // (unless it's a constant, because subtracts of constants are canonicalized
1331   // to adds).
1332   bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1333   if (useSubtract)
1334     Step = SE.getNegativeSCEV(Step);
1335   // Expand the step somewhere that dominates the loop header.
1336   Value *StepV = expandCodeForImpl(
1337       Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1338 
1339   // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1340   // we actually do emit an addition.  It does not apply if we emit a
1341   // subtraction.
1342   bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1343   bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1344 
1345   // Create the PHI.
1346   BasicBlock *Header = L->getHeader();
1347   Builder.SetInsertPoint(Header, Header->begin());
1348   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1349   PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1350                                   Twine(IVName) + ".iv");
1351 
1352   // Create the step instructions and populate the PHI.
1353   for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1354     BasicBlock *Pred = *HPI;
1355 
1356     // Add a start value.
1357     if (!L->contains(Pred)) {
1358       PN->addIncoming(StartV, Pred);
1359       continue;
1360     }
1361 
1362     // Create a step value and add it to the PHI.
1363     // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1364     // instructions at IVIncInsertPos.
1365     Instruction *InsertPos = L == IVIncInsertLoop ?
1366       IVIncInsertPos : Pred->getTerminator();
1367     Builder.SetInsertPoint(InsertPos);
1368     Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1369 
1370     if (isa<OverflowingBinaryOperator>(IncV)) {
1371       if (IncrementIsNUW)
1372         cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1373       if (IncrementIsNSW)
1374         cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1375     }
1376     PN->addIncoming(IncV, Pred);
1377   }
1378 
1379   // After expanding subexpressions, restore the PostIncLoops set so the caller
1380   // can ensure that IVIncrement dominates the current uses.
1381   PostIncLoops = SavedPostIncLoops;
1382 
1383   // Remember this PHI, even in post-inc mode.
1384   InsertedValues.insert(PN);
1385 
1386   return PN;
1387 }
1388 
expandAddRecExprLiterally(const SCEVAddRecExpr * S)1389 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1390   Type *STy = S->getType();
1391   Type *IntTy = SE.getEffectiveSCEVType(STy);
1392   const Loop *L = S->getLoop();
1393 
1394   // Determine a normalized form of this expression, which is the expression
1395   // before any post-inc adjustment is made.
1396   const SCEVAddRecExpr *Normalized = S;
1397   if (PostIncLoops.count(L)) {
1398     PostIncLoopSet Loops;
1399     Loops.insert(L);
1400     Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1401   }
1402 
1403   // Strip off any non-loop-dominating component from the addrec start.
1404   const SCEV *Start = Normalized->getStart();
1405   const SCEV *PostLoopOffset = nullptr;
1406   if (!SE.properlyDominates(Start, L->getHeader())) {
1407     PostLoopOffset = Start;
1408     Start = SE.getConstant(Normalized->getType(), 0);
1409     Normalized = cast<SCEVAddRecExpr>(
1410       SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1411                        Normalized->getLoop(),
1412                        Normalized->getNoWrapFlags(SCEV::FlagNW)));
1413   }
1414 
1415   // Strip off any non-loop-dominating component from the addrec step.
1416   const SCEV *Step = Normalized->getStepRecurrence(SE);
1417   const SCEV *PostLoopScale = nullptr;
1418   if (!SE.dominates(Step, L->getHeader())) {
1419     PostLoopScale = Step;
1420     Step = SE.getConstant(Normalized->getType(), 1);
1421     if (!Start->isZero()) {
1422         // The normalization below assumes that Start is constant zero, so if
1423         // it isn't re-associate Start to PostLoopOffset.
1424         assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1425         PostLoopOffset = Start;
1426         Start = SE.getConstant(Normalized->getType(), 0);
1427     }
1428     Normalized =
1429       cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1430                              Start, Step, Normalized->getLoop(),
1431                              Normalized->getNoWrapFlags(SCEV::FlagNW)));
1432   }
1433 
1434   // Expand the core addrec. If we need post-loop scaling, force it to
1435   // expand to an integer type to avoid the need for additional casting.
1436   Type *ExpandTy = PostLoopScale ? IntTy : STy;
1437   // We can't use a pointer type for the addrec if the pointer type is
1438   // non-integral.
1439   Type *AddRecPHIExpandTy =
1440       DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1441 
1442   // In some cases, we decide to reuse an existing phi node but need to truncate
1443   // it and/or invert the step.
1444   Type *TruncTy = nullptr;
1445   bool InvertStep = false;
1446   PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1447                                           IntTy, TruncTy, InvertStep);
1448 
1449   // Accommodate post-inc mode, if necessary.
1450   Value *Result;
1451   if (!PostIncLoops.count(L))
1452     Result = PN;
1453   else {
1454     // In PostInc mode, use the post-incremented value.
1455     BasicBlock *LatchBlock = L->getLoopLatch();
1456     assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1457     Result = PN->getIncomingValueForBlock(LatchBlock);
1458 
1459     // We might be introducing a new use of the post-inc IV that is not poison
1460     // safe, in which case we should drop poison generating flags. Only keep
1461     // those flags for which SCEV has proven that they always hold.
1462     if (isa<OverflowingBinaryOperator>(Result)) {
1463       auto *I = cast<Instruction>(Result);
1464       if (!S->hasNoUnsignedWrap())
1465         I->setHasNoUnsignedWrap(false);
1466       if (!S->hasNoSignedWrap())
1467         I->setHasNoSignedWrap(false);
1468     }
1469 
1470     // For an expansion to use the postinc form, the client must call
1471     // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1472     // or dominated by IVIncInsertPos.
1473     if (isa<Instruction>(Result) &&
1474         !SE.DT.dominates(cast<Instruction>(Result),
1475                          &*Builder.GetInsertPoint())) {
1476       // The induction variable's postinc expansion does not dominate this use.
1477       // IVUsers tries to prevent this case, so it is rare. However, it can
1478       // happen when an IVUser outside the loop is not dominated by the latch
1479       // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1480       // all cases. Consider a phi outside whose operand is replaced during
1481       // expansion with the value of the postinc user. Without fundamentally
1482       // changing the way postinc users are tracked, the only remedy is
1483       // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1484       // but hopefully expandCodeFor handles that.
1485       bool useSubtract =
1486         !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1487       if (useSubtract)
1488         Step = SE.getNegativeSCEV(Step);
1489       Value *StepV;
1490       {
1491         // Expand the step somewhere that dominates the loop header.
1492         SCEVInsertPointGuard Guard(Builder, this);
1493         StepV = expandCodeForImpl(
1494             Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1495       }
1496       Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1497     }
1498   }
1499 
1500   // We have decided to reuse an induction variable of a dominating loop. Apply
1501   // truncation and/or inversion of the step.
1502   if (TruncTy) {
1503     Type *ResTy = Result->getType();
1504     // Normalize the result type.
1505     if (ResTy != SE.getEffectiveSCEVType(ResTy))
1506       Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1507     // Truncate the result.
1508     if (TruncTy != Result->getType())
1509       Result = Builder.CreateTrunc(Result, TruncTy);
1510 
1511     // Invert the result.
1512     if (InvertStep)
1513       Result = Builder.CreateSub(
1514           expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result);
1515   }
1516 
1517   // Re-apply any non-loop-dominating scale.
1518   if (PostLoopScale) {
1519     assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1520     Result = InsertNoopCastOfTo(Result, IntTy);
1521     Result = Builder.CreateMul(Result,
1522                                expandCodeForImpl(PostLoopScale, IntTy, false));
1523   }
1524 
1525   // Re-apply any non-loop-dominating offset.
1526   if (PostLoopOffset) {
1527     if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1528       if (Result->getType()->isIntegerTy()) {
1529         Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false);
1530         Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1531       } else {
1532         Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1533       }
1534     } else {
1535       Result = InsertNoopCastOfTo(Result, IntTy);
1536       Result = Builder.CreateAdd(
1537           Result, expandCodeForImpl(PostLoopOffset, IntTy, false));
1538     }
1539   }
1540 
1541   return Result;
1542 }
1543 
visitAddRecExpr(const SCEVAddRecExpr * S)1544 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1545   // In canonical mode we compute the addrec as an expression of a canonical IV
1546   // using evaluateAtIteration and expand the resulting SCEV expression. This
1547   // way we avoid introducing new IVs to carry on the comutation of the addrec
1548   // throughout the loop.
1549   //
1550   // For nested addrecs evaluateAtIteration might need a canonical IV of a
1551   // type wider than the addrec itself. Emitting a canonical IV of the
1552   // proper type might produce non-legal types, for example expanding an i64
1553   // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1554   // back to non-canonical mode for nested addrecs.
1555   if (!CanonicalMode || (S->getNumOperands() > 2))
1556     return expandAddRecExprLiterally(S);
1557 
1558   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1559   const Loop *L = S->getLoop();
1560 
1561   // First check for an existing canonical IV in a suitable type.
1562   PHINode *CanonicalIV = nullptr;
1563   if (PHINode *PN = L->getCanonicalInductionVariable())
1564     if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1565       CanonicalIV = PN;
1566 
1567   // Rewrite an AddRec in terms of the canonical induction variable, if
1568   // its type is more narrow.
1569   if (CanonicalIV &&
1570       SE.getTypeSizeInBits(CanonicalIV->getType()) >
1571       SE.getTypeSizeInBits(Ty)) {
1572     SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1573     for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1574       NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1575     Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1576                                        S->getNoWrapFlags(SCEV::FlagNW)));
1577     BasicBlock::iterator NewInsertPt =
1578         findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1579     V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1580                           &*NewInsertPt, false);
1581     return V;
1582   }
1583 
1584   // {X,+,F} --> X + {0,+,F}
1585   if (!S->getStart()->isZero()) {
1586     SmallVector<const SCEV *, 4> NewOps(S->operands());
1587     NewOps[0] = SE.getConstant(Ty, 0);
1588     const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1589                                         S->getNoWrapFlags(SCEV::FlagNW));
1590 
1591     // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1592     // comments on expandAddToGEP for details.
1593     const SCEV *Base = S->getStart();
1594     // Dig into the expression to find the pointer base for a GEP.
1595     const SCEV *ExposedRest = Rest;
1596     ExposePointerBase(Base, ExposedRest, SE);
1597     // If we found a pointer, expand the AddRec with a GEP.
1598     if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1599       // Make sure the Base isn't something exotic, such as a multiplied
1600       // or divided pointer value. In those cases, the result type isn't
1601       // actually a pointer type.
1602       if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1603         Value *StartV = expand(Base);
1604         assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1605         return expandAddToGEP(ExposedRest, PTy, Ty, StartV);
1606       }
1607     }
1608 
1609     // Just do a normal add. Pre-expand the operands to suppress folding.
1610     //
1611     // The LHS and RHS values are factored out of the expand call to make the
1612     // output independent of the argument evaluation order.
1613     const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1614     const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1615     return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1616   }
1617 
1618   // If we don't yet have a canonical IV, create one.
1619   if (!CanonicalIV) {
1620     // Create and insert the PHI node for the induction variable in the
1621     // specified loop.
1622     BasicBlock *Header = L->getHeader();
1623     pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1624     CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1625                                   &Header->front());
1626     rememberInstruction(CanonicalIV);
1627 
1628     SmallSet<BasicBlock *, 4> PredSeen;
1629     Constant *One = ConstantInt::get(Ty, 1);
1630     for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1631       BasicBlock *HP = *HPI;
1632       if (!PredSeen.insert(HP).second) {
1633         // There must be an incoming value for each predecessor, even the
1634         // duplicates!
1635         CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1636         continue;
1637       }
1638 
1639       if (L->contains(HP)) {
1640         // Insert a unit add instruction right before the terminator
1641         // corresponding to the back-edge.
1642         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1643                                                      "indvar.next",
1644                                                      HP->getTerminator());
1645         Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1646         rememberInstruction(Add);
1647         CanonicalIV->addIncoming(Add, HP);
1648       } else {
1649         CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1650       }
1651     }
1652   }
1653 
1654   // {0,+,1} --> Insert a canonical induction variable into the loop!
1655   if (S->isAffine() && S->getOperand(1)->isOne()) {
1656     assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1657            "IVs with types different from the canonical IV should "
1658            "already have been handled!");
1659     return CanonicalIV;
1660   }
1661 
1662   // {0,+,F} --> {0,+,1} * F
1663 
1664   // If this is a simple linear addrec, emit it now as a special case.
1665   if (S->isAffine())    // {0,+,F} --> i*F
1666     return
1667       expand(SE.getTruncateOrNoop(
1668         SE.getMulExpr(SE.getUnknown(CanonicalIV),
1669                       SE.getNoopOrAnyExtend(S->getOperand(1),
1670                                             CanonicalIV->getType())),
1671         Ty));
1672 
1673   // If this is a chain of recurrences, turn it into a closed form, using the
1674   // folders, then expandCodeFor the closed form.  This allows the folders to
1675   // simplify the expression without having to build a bunch of special code
1676   // into this folder.
1677   const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1678 
1679   // Promote S up to the canonical IV type, if the cast is foldable.
1680   const SCEV *NewS = S;
1681   const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1682   if (isa<SCEVAddRecExpr>(Ext))
1683     NewS = Ext;
1684 
1685   const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1686   //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
1687 
1688   // Truncate the result down to the original type, if needed.
1689   const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1690   return expand(T);
1691 }
1692 
visitPtrToIntExpr(const SCEVPtrToIntExpr * S)1693 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1694   Value *V =
1695       expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false);
1696   return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1697                            GetOptimalInsertionPointForCastOf(V));
1698 }
1699 
visitTruncateExpr(const SCEVTruncateExpr * S)1700 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1701   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1702   Value *V = expandCodeForImpl(
1703       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1704       false);
1705   return Builder.CreateTrunc(V, Ty);
1706 }
1707 
visitZeroExtendExpr(const SCEVZeroExtendExpr * S)1708 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1709   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1710   Value *V = expandCodeForImpl(
1711       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1712       false);
1713   return Builder.CreateZExt(V, Ty);
1714 }
1715 
visitSignExtendExpr(const SCEVSignExtendExpr * S)1716 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1717   Type *Ty = SE.getEffectiveSCEVType(S->getType());
1718   Value *V = expandCodeForImpl(
1719       S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1720       false);
1721   return Builder.CreateSExt(V, Ty);
1722 }
1723 
visitSMaxExpr(const SCEVSMaxExpr * S)1724 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1725   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1726   Type *Ty = LHS->getType();
1727   for (int i = S->getNumOperands()-2; i >= 0; --i) {
1728     // In the case of mixed integer and pointer types, do the
1729     // rest of the comparisons as integer.
1730     Type *OpTy = S->getOperand(i)->getType();
1731     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1732       Ty = SE.getEffectiveSCEVType(Ty);
1733       LHS = InsertNoopCastOfTo(LHS, Ty);
1734     }
1735     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1736     Value *Sel;
1737     if (Ty->isIntegerTy())
1738       Sel = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, {LHS, RHS},
1739                                     /*FMFSource=*/nullptr, "smax");
1740     else {
1741       Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1742       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1743     }
1744     LHS = Sel;
1745   }
1746   // In the case of mixed integer and pointer types, cast the
1747   // final result back to the pointer type.
1748   if (LHS->getType() != S->getType())
1749     LHS = InsertNoopCastOfTo(LHS, S->getType());
1750   return LHS;
1751 }
1752 
visitUMaxExpr(const SCEVUMaxExpr * S)1753 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1754   Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1755   Type *Ty = LHS->getType();
1756   for (int i = S->getNumOperands()-2; i >= 0; --i) {
1757     // In the case of mixed integer and pointer types, do the
1758     // rest of the comparisons as integer.
1759     Type *OpTy = S->getOperand(i)->getType();
1760     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1761       Ty = SE.getEffectiveSCEVType(Ty);
1762       LHS = InsertNoopCastOfTo(LHS, Ty);
1763     }
1764     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1765     Value *Sel;
1766     if (Ty->isIntegerTy())
1767       Sel = Builder.CreateIntrinsic(Intrinsic::umax, {Ty}, {LHS, RHS},
1768                                     /*FMFSource=*/nullptr, "umax");
1769     else {
1770       Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1771       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1772     }
1773     LHS = Sel;
1774   }
1775   // In the case of mixed integer and pointer types, cast the
1776   // final result back to the pointer type.
1777   if (LHS->getType() != S->getType())
1778     LHS = InsertNoopCastOfTo(LHS, S->getType());
1779   return LHS;
1780 }
1781 
visitSMinExpr(const SCEVSMinExpr * S)1782 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1783   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1784   Type *Ty = LHS->getType();
1785   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1786     // In the case of mixed integer and pointer types, do the
1787     // rest of the comparisons as integer.
1788     Type *OpTy = S->getOperand(i)->getType();
1789     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1790       Ty = SE.getEffectiveSCEVType(Ty);
1791       LHS = InsertNoopCastOfTo(LHS, Ty);
1792     }
1793     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1794     Value *Sel;
1795     if (Ty->isIntegerTy())
1796       Sel = Builder.CreateIntrinsic(Intrinsic::smin, {Ty}, {LHS, RHS},
1797                                     /*FMFSource=*/nullptr, "smin");
1798     else {
1799       Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
1800       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
1801     }
1802     LHS = Sel;
1803   }
1804   // In the case of mixed integer and pointer types, cast the
1805   // final result back to the pointer type.
1806   if (LHS->getType() != S->getType())
1807     LHS = InsertNoopCastOfTo(LHS, S->getType());
1808   return LHS;
1809 }
1810 
visitUMinExpr(const SCEVUMinExpr * S)1811 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1812   Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1813   Type *Ty = LHS->getType();
1814   for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1815     // In the case of mixed integer and pointer types, do the
1816     // rest of the comparisons as integer.
1817     Type *OpTy = S->getOperand(i)->getType();
1818     if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1819       Ty = SE.getEffectiveSCEVType(Ty);
1820       LHS = InsertNoopCastOfTo(LHS, Ty);
1821     }
1822     Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1823     Value *Sel;
1824     if (Ty->isIntegerTy())
1825       Sel = Builder.CreateIntrinsic(Intrinsic::umin, {Ty}, {LHS, RHS},
1826                                     /*FMFSource=*/nullptr, "umin");
1827     else {
1828       Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
1829       Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
1830     }
1831     LHS = Sel;
1832   }
1833   // In the case of mixed integer and pointer types, cast the
1834   // final result back to the pointer type.
1835   if (LHS->getType() != S->getType())
1836     LHS = InsertNoopCastOfTo(LHS, S->getType());
1837   return LHS;
1838 }
1839 
expandCodeForImpl(const SCEV * SH,Type * Ty,Instruction * IP,bool Root)1840 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1841                                        Instruction *IP, bool Root) {
1842   setInsertPoint(IP);
1843   Value *V = expandCodeForImpl(SH, Ty, Root);
1844   return V;
1845 }
1846 
expandCodeForImpl(const SCEV * SH,Type * Ty,bool Root)1847 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
1848   // Expand the code for this SCEV.
1849   Value *V = expand(SH);
1850 
1851   if (PreserveLCSSA) {
1852     if (auto *Inst = dyn_cast<Instruction>(V)) {
1853       // Create a temporary instruction to at the current insertion point, so we
1854       // can hand it off to the helper to create LCSSA PHIs if required for the
1855       // new use.
1856       // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
1857       // would accept a insertion point and return an LCSSA phi for that
1858       // insertion point, so there is no need to insert & remove the temporary
1859       // instruction.
1860       Instruction *Tmp;
1861       if (Inst->getType()->isIntegerTy())
1862         Tmp =
1863             cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user"));
1864       else {
1865         assert(Inst->getType()->isPointerTy());
1866         Tmp = cast<Instruction>(
1867             Builder.CreateGEP(Inst, Builder.getInt32(1), "tmp.lcssa.user"));
1868       }
1869       V = fixupLCSSAFormFor(Tmp, 0);
1870 
1871       // Clean up temporary instruction.
1872       InsertedValues.erase(Tmp);
1873       InsertedPostIncValues.erase(Tmp);
1874       Tmp->eraseFromParent();
1875     }
1876   }
1877 
1878   InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V;
1879   if (Ty) {
1880     assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1881            "non-trivial casts should be done with the SCEVs directly!");
1882     V = InsertNoopCastOfTo(V, Ty);
1883   }
1884   return V;
1885 }
1886 
1887 ScalarEvolution::ValueOffsetPair
FindValueInExprValueMap(const SCEV * S,const Instruction * InsertPt)1888 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1889                                       const Instruction *InsertPt) {
1890   SetVector<ScalarEvolution::ValueOffsetPair> *Set = SE.getSCEVValues(S);
1891   // If the expansion is not in CanonicalMode, and the SCEV contains any
1892   // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1893   if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1894     // If S is scConstant, it may be worse to reuse an existing Value.
1895     if (S->getSCEVType() != scConstant && Set) {
1896       // Choose a Value from the set which dominates the insertPt.
1897       // insertPt should be inside the Value's parent loop so as not to break
1898       // the LCSSA form.
1899       for (auto const &VOPair : *Set) {
1900         Value *V = VOPair.first;
1901         ConstantInt *Offset = VOPair.second;
1902         Instruction *EntInst = nullptr;
1903         if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1904             S->getType() == V->getType() &&
1905             EntInst->getFunction() == InsertPt->getFunction() &&
1906             SE.DT.dominates(EntInst, InsertPt) &&
1907             (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1908              SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1909           return {V, Offset};
1910       }
1911     }
1912   }
1913   return {nullptr, nullptr};
1914 }
1915 
1916 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1917 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1918 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1919 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1920 // the expansion will try to reuse Value from ExprValueMap, and only when it
1921 // fails, expand the SCEV literally.
expand(const SCEV * S)1922 Value *SCEVExpander::expand(const SCEV *S) {
1923   // Compute an insertion point for this SCEV object. Hoist the instructions
1924   // as far out in the loop nest as possible.
1925   Instruction *InsertPt = &*Builder.GetInsertPoint();
1926 
1927   // We can move insertion point only if there is no div or rem operations
1928   // otherwise we are risky to move it over the check for zero denominator.
1929   auto SafeToHoist = [](const SCEV *S) {
1930     return !SCEVExprContains(S, [](const SCEV *S) {
1931               if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1932                 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1933                   // Division by non-zero constants can be hoisted.
1934                   return SC->getValue()->isZero();
1935                 // All other divisions should not be moved as they may be
1936                 // divisions by zero and should be kept within the
1937                 // conditions of the surrounding loops that guard their
1938                 // execution (see PR35406).
1939                 return true;
1940               }
1941               return false;
1942             });
1943   };
1944   if (SafeToHoist(S)) {
1945     for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1946          L = L->getParentLoop()) {
1947       if (SE.isLoopInvariant(S, L)) {
1948         if (!L) break;
1949         if (BasicBlock *Preheader = L->getLoopPreheader())
1950           InsertPt = Preheader->getTerminator();
1951         else
1952           // LSR sets the insertion point for AddRec start/step values to the
1953           // block start to simplify value reuse, even though it's an invalid
1954           // position. SCEVExpander must correct for this in all cases.
1955           InsertPt = &*L->getHeader()->getFirstInsertionPt();
1956       } else {
1957         // If the SCEV is computable at this level, insert it into the header
1958         // after the PHIs (and after any other instructions that we've inserted
1959         // there) so that it is guaranteed to dominate any user inside the loop.
1960         if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1961           InsertPt = &*L->getHeader()->getFirstInsertionPt();
1962 
1963         while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1964                (isInsertedInstruction(InsertPt) ||
1965                 isa<DbgInfoIntrinsic>(InsertPt))) {
1966           InsertPt = &*std::next(InsertPt->getIterator());
1967         }
1968         break;
1969       }
1970     }
1971   }
1972 
1973   // Check to see if we already expanded this here.
1974   auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1975   if (I != InsertedExpressions.end())
1976     return I->second;
1977 
1978   SCEVInsertPointGuard Guard(Builder, this);
1979   Builder.SetInsertPoint(InsertPt);
1980 
1981   // Expand the expression into instructions.
1982   ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1983   Value *V = VO.first;
1984 
1985   if (!V)
1986     V = visit(S);
1987   else if (VO.second) {
1988     if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1989       Type *Ety = Vty->getPointerElementType();
1990       int64_t Offset = VO.second->getSExtValue();
1991       int64_t ESize = SE.getTypeSizeInBits(Ety);
1992       if ((Offset * 8) % ESize == 0) {
1993         ConstantInt *Idx =
1994             ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
1995         V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
1996       } else {
1997         ConstantInt *Idx =
1998             ConstantInt::getSigned(VO.second->getType(), -Offset);
1999         unsigned AS = Vty->getAddressSpace();
2000         V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
2001         V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
2002                               "uglygep");
2003         V = Builder.CreateBitCast(V, Vty);
2004       }
2005     } else {
2006       V = Builder.CreateSub(V, VO.second);
2007     }
2008   }
2009   // Remember the expanded value for this SCEV at this location.
2010   //
2011   // This is independent of PostIncLoops. The mapped value simply materializes
2012   // the expression at this insertion point. If the mapped value happened to be
2013   // a postinc expansion, it could be reused by a non-postinc user, but only if
2014   // its insertion point was already at the head of the loop.
2015   InsertedExpressions[std::make_pair(S, InsertPt)] = V;
2016   return V;
2017 }
2018 
rememberInstruction(Value * I)2019 void SCEVExpander::rememberInstruction(Value *I) {
2020   auto DoInsert = [this](Value *V) {
2021     if (!PostIncLoops.empty())
2022       InsertedPostIncValues.insert(V);
2023     else
2024       InsertedValues.insert(V);
2025   };
2026   DoInsert(I);
2027 
2028   if (!PreserveLCSSA)
2029     return;
2030 
2031   if (auto *Inst = dyn_cast<Instruction>(I)) {
2032     // A new instruction has been added, which might introduce new uses outside
2033     // a defining loop. Fix LCSSA from for each operand of the new instruction,
2034     // if required.
2035     for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd;
2036          OpIdx++)
2037       fixupLCSSAFormFor(Inst, OpIdx);
2038   }
2039 }
2040 
2041 /// replaceCongruentIVs - Check for congruent phis in this loop header and
2042 /// replace them with their most canonical representative. Return the number of
2043 /// phis eliminated.
2044 ///
2045 /// This does not depend on any SCEVExpander state but should be used in
2046 /// the same context that SCEVExpander is used.
2047 unsigned
replaceCongruentIVs(Loop * L,const DominatorTree * DT,SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetTransformInfo * TTI)2048 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
2049                                   SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2050                                   const TargetTransformInfo *TTI) {
2051   // Find integer phis in order of increasing width.
2052   SmallVector<PHINode*, 8> Phis;
2053   for (PHINode &PN : L->getHeader()->phis())
2054     Phis.push_back(&PN);
2055 
2056   if (TTI)
2057     llvm::sort(Phis, [](Value *LHS, Value *RHS) {
2058       // Put pointers at the back and make sure pointer < pointer = false.
2059       if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
2060         return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
2061       return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() <
2062              LHS->getType()->getPrimitiveSizeInBits().getFixedSize();
2063     });
2064 
2065   unsigned NumElim = 0;
2066   DenseMap<const SCEV *, PHINode *> ExprToIVMap;
2067   // Process phis from wide to narrow. Map wide phis to their truncation
2068   // so narrow phis can reuse them.
2069   for (PHINode *Phi : Phis) {
2070     auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
2071       if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
2072         return V;
2073       if (!SE.isSCEVable(PN->getType()))
2074         return nullptr;
2075       auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
2076       if (!Const)
2077         return nullptr;
2078       return Const->getValue();
2079     };
2080 
2081     // Fold constant phis. They may be congruent to other constant phis and
2082     // would confuse the logic below that expects proper IVs.
2083     if (Value *V = SimplifyPHINode(Phi)) {
2084       if (V->getType() != Phi->getType())
2085         continue;
2086       Phi->replaceAllUsesWith(V);
2087       DeadInsts.emplace_back(Phi);
2088       ++NumElim;
2089       DEBUG_WITH_TYPE(DebugType, dbgs()
2090                       << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
2091       continue;
2092     }
2093 
2094     if (!SE.isSCEVable(Phi->getType()))
2095       continue;
2096 
2097     PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
2098     if (!OrigPhiRef) {
2099       OrigPhiRef = Phi;
2100       if (Phi->getType()->isIntegerTy() && TTI &&
2101           TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
2102         // This phi can be freely truncated to the narrowest phi type. Map the
2103         // truncated expression to it so it will be reused for narrow types.
2104         const SCEV *TruncExpr =
2105           SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
2106         ExprToIVMap[TruncExpr] = Phi;
2107       }
2108       continue;
2109     }
2110 
2111     // Replacing a pointer phi with an integer phi or vice-versa doesn't make
2112     // sense.
2113     if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
2114       continue;
2115 
2116     if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2117       Instruction *OrigInc = dyn_cast<Instruction>(
2118           OrigPhiRef->getIncomingValueForBlock(LatchBlock));
2119       Instruction *IsomorphicInc =
2120           dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
2121 
2122       if (OrigInc && IsomorphicInc) {
2123         // If this phi has the same width but is more canonical, replace the
2124         // original with it. As part of the "more canonical" determination,
2125         // respect a prior decision to use an IV chain.
2126         if (OrigPhiRef->getType() == Phi->getType() &&
2127             !(ChainedPhis.count(Phi) ||
2128               isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
2129             (ChainedPhis.count(Phi) ||
2130              isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2131           std::swap(OrigPhiRef, Phi);
2132           std::swap(OrigInc, IsomorphicInc);
2133         }
2134         // Replacing the congruent phi is sufficient because acyclic
2135         // redundancy elimination, CSE/GVN, should handle the
2136         // rest. However, once SCEV proves that a phi is congruent,
2137         // it's often the head of an IV user cycle that is isomorphic
2138         // with the original phi. It's worth eagerly cleaning up the
2139         // common case of a single IV increment so that DeleteDeadPHIs
2140         // can remove cycles that had postinc uses.
2141         const SCEV *TruncExpr =
2142             SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2143         if (OrigInc != IsomorphicInc &&
2144             TruncExpr == SE.getSCEV(IsomorphicInc) &&
2145             SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2146             hoistIVInc(OrigInc, IsomorphicInc)) {
2147           DEBUG_WITH_TYPE(DebugType,
2148                           dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2149                                  << *IsomorphicInc << '\n');
2150           Value *NewInc = OrigInc;
2151           if (OrigInc->getType() != IsomorphicInc->getType()) {
2152             Instruction *IP = nullptr;
2153             if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2154               IP = &*PN->getParent()->getFirstInsertionPt();
2155             else
2156               IP = OrigInc->getNextNode();
2157 
2158             IRBuilder<> Builder(IP);
2159             Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2160             NewInc = Builder.CreateTruncOrBitCast(
2161                 OrigInc, IsomorphicInc->getType(), IVName);
2162           }
2163           IsomorphicInc->replaceAllUsesWith(NewInc);
2164           DeadInsts.emplace_back(IsomorphicInc);
2165         }
2166       }
2167     }
2168     DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: "
2169                                       << *Phi << '\n');
2170     DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Original iv: "
2171                                       << *OrigPhiRef << '\n');
2172     ++NumElim;
2173     Value *NewIV = OrigPhiRef;
2174     if (OrigPhiRef->getType() != Phi->getType()) {
2175       IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2176       Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2177       NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2178     }
2179     Phi->replaceAllUsesWith(NewIV);
2180     DeadInsts.emplace_back(Phi);
2181   }
2182   return NumElim;
2183 }
2184 
2185 Optional<ScalarEvolution::ValueOffsetPair>
getRelatedExistingExpansion(const SCEV * S,const Instruction * At,Loop * L)2186 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
2187                                           Loop *L) {
2188   using namespace llvm::PatternMatch;
2189 
2190   SmallVector<BasicBlock *, 4> ExitingBlocks;
2191   L->getExitingBlocks(ExitingBlocks);
2192 
2193   // Look for suitable value in simple conditions at the loop exits.
2194   for (BasicBlock *BB : ExitingBlocks) {
2195     ICmpInst::Predicate Pred;
2196     Instruction *LHS, *RHS;
2197 
2198     if (!match(BB->getTerminator(),
2199                m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
2200                     m_BasicBlock(), m_BasicBlock())))
2201       continue;
2202 
2203     if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2204       return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
2205 
2206     if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2207       return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2208   }
2209 
2210   // Use expand's logic which is used for reusing a previous Value in
2211   // ExprValueMap.
2212   ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2213   if (VO.first)
2214     return VO;
2215 
2216   // There is potential to make this significantly smarter, but this simple
2217   // heuristic already gets some interesting cases.
2218 
2219   // Can not find suitable value.
2220   return None;
2221 }
2222 
costAndCollectOperands(const SCEVOperand & WorkItem,const TargetTransformInfo & TTI,TargetTransformInfo::TargetCostKind CostKind,SmallVectorImpl<SCEVOperand> & Worklist)2223 template<typename T> static InstructionCost costAndCollectOperands(
2224   const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
2225   TargetTransformInfo::TargetCostKind CostKind,
2226   SmallVectorImpl<SCEVOperand> &Worklist) {
2227 
2228   const T *S = cast<T>(WorkItem.S);
2229   InstructionCost Cost = 0;
2230   // Object to help map SCEV operands to expanded IR instructions.
2231   struct OperationIndices {
2232     OperationIndices(unsigned Opc, size_t min, size_t max) :
2233       Opcode(Opc), MinIdx(min), MaxIdx(max) { }
2234     unsigned Opcode;
2235     size_t MinIdx;
2236     size_t MaxIdx;
2237   };
2238 
2239   // Collect the operations of all the instructions that will be needed to
2240   // expand the SCEVExpr. This is so that when we come to cost the operands,
2241   // we know what the generated user(s) will be.
2242   SmallVector<OperationIndices, 2> Operations;
2243 
2244   auto CastCost = [&](unsigned Opcode) -> InstructionCost {
2245     Operations.emplace_back(Opcode, 0, 0);
2246     return TTI.getCastInstrCost(Opcode, S->getType(),
2247                                 S->getOperand(0)->getType(),
2248                                 TTI::CastContextHint::None, CostKind);
2249   };
2250 
2251   auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
2252                        unsigned MinIdx = 0,
2253                        unsigned MaxIdx = 1) -> InstructionCost {
2254     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2255     return NumRequired *
2256       TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
2257   };
2258 
2259   auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
2260                         unsigned MaxIdx) -> InstructionCost {
2261     Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2262     Type *OpType = S->getOperand(0)->getType();
2263     return NumRequired * TTI.getCmpSelInstrCost(
2264                              Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2265                              CmpInst::BAD_ICMP_PREDICATE, CostKind);
2266   };
2267 
2268   switch (S->getSCEVType()) {
2269   case scCouldNotCompute:
2270     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2271   case scUnknown:
2272   case scConstant:
2273     return 0;
2274   case scPtrToInt:
2275     Cost = CastCost(Instruction::PtrToInt);
2276     break;
2277   case scTruncate:
2278     Cost = CastCost(Instruction::Trunc);
2279     break;
2280   case scZeroExtend:
2281     Cost = CastCost(Instruction::ZExt);
2282     break;
2283   case scSignExtend:
2284     Cost = CastCost(Instruction::SExt);
2285     break;
2286   case scUDivExpr: {
2287     unsigned Opcode = Instruction::UDiv;
2288     if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2289       if (SC->getAPInt().isPowerOf2())
2290         Opcode = Instruction::LShr;
2291     Cost = ArithCost(Opcode, 1);
2292     break;
2293   }
2294   case scAddExpr:
2295     Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2296     break;
2297   case scMulExpr:
2298     // TODO: this is a very pessimistic cost modelling for Mul,
2299     // because of Bin Pow algorithm actually used by the expander,
2300     // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2301     Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2302     break;
2303   case scSMaxExpr:
2304   case scUMaxExpr:
2305   case scSMinExpr:
2306   case scUMinExpr: {
2307     // FIXME: should this ask the cost for Intrinsic's?
2308     Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2309     Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2310     break;
2311   }
2312   case scAddRecExpr: {
2313     // In this polynominal, we may have some zero operands, and we shouldn't
2314     // really charge for those. So how many non-zero coeffients are there?
2315     int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
2316                                     return !Op->isZero();
2317                                   });
2318 
2319     assert(NumTerms >= 1 && "Polynominal should have at least one term.");
2320     assert(!(*std::prev(S->operands().end()))->isZero() &&
2321            "Last operand should not be zero");
2322 
2323     // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
2324     int NumNonZeroDegreeNonOneTerms =
2325       llvm::count_if(S->operands(), [](const SCEV *Op) {
2326                       auto *SConst = dyn_cast<SCEVConstant>(Op);
2327                       return !SConst || SConst->getAPInt().ugt(1);
2328                     });
2329 
2330     // Much like with normal add expr, the polynominal will require
2331     // one less addition than the number of it's terms.
2332     InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
2333                                         /*MinIdx*/ 1, /*MaxIdx*/ 1);
2334     // Here, *each* one of those will require a multiplication.
2335     InstructionCost MulCost =
2336         ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2337     Cost = AddCost + MulCost;
2338 
2339     // What is the degree of this polynominal?
2340     int PolyDegree = S->getNumOperands() - 1;
2341     assert(PolyDegree >= 1 && "Should be at least affine.");
2342 
2343     // The final term will be:
2344     //   Op_{PolyDegree} * x ^ {PolyDegree}
2345     // Where  x ^ {PolyDegree}  will again require PolyDegree-1 mul operations.
2346     // Note that  x ^ {PolyDegree} = x * x ^ {PolyDegree-1}  so charging for
2347     // x ^ {PolyDegree}  will give us  x ^ {2} .. x ^ {PolyDegree-1}  for free.
2348     // FIXME: this is conservatively correct, but might be overly pessimistic.
2349     Cost += MulCost * (PolyDegree - 1);
2350     break;
2351   }
2352   }
2353 
2354   for (auto &CostOp : Operations) {
2355     for (auto SCEVOp : enumerate(S->operands())) {
2356       // Clamp the index to account for multiple IR operations being chained.
2357       size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2358       size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2359       Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2360     }
2361   }
2362   return Cost;
2363 }
2364 
isHighCostExpansionHelper(const SCEVOperand & WorkItem,Loop * L,const Instruction & At,InstructionCost & Cost,unsigned Budget,const TargetTransformInfo & TTI,SmallPtrSetImpl<const SCEV * > & Processed,SmallVectorImpl<SCEVOperand> & Worklist)2365 bool SCEVExpander::isHighCostExpansionHelper(
2366     const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2367     InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2368     SmallPtrSetImpl<const SCEV *> &Processed,
2369     SmallVectorImpl<SCEVOperand> &Worklist) {
2370   if (Cost > Budget)
2371     return true; // Already run out of budget, give up.
2372 
2373   const SCEV *S = WorkItem.S;
2374   // Was the cost of expansion of this expression already accounted for?
2375   if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2376     return false; // We have already accounted for this expression.
2377 
2378   // If we can find an existing value for this scev available at the point "At"
2379   // then consider the expression cheap.
2380   if (getRelatedExistingExpansion(S, &At, L))
2381     return false; // Consider the expression to be free.
2382 
2383   TargetTransformInfo::TargetCostKind CostKind =
2384       L->getHeader()->getParent()->hasMinSize()
2385           ? TargetTransformInfo::TCK_CodeSize
2386           : TargetTransformInfo::TCK_RecipThroughput;
2387 
2388   switch (S->getSCEVType()) {
2389   case scCouldNotCompute:
2390     llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2391   case scUnknown:
2392     // Assume to be zero-cost.
2393     return false;
2394   case scConstant: {
2395     // Only evalulate the costs of constants when optimizing for size.
2396     if (CostKind != TargetTransformInfo::TCK_CodeSize)
2397       return 0;
2398     const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2399     Type *Ty = S->getType();
2400     Cost += TTI.getIntImmCostInst(
2401         WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2402     return Cost > Budget;
2403   }
2404   case scTruncate:
2405   case scPtrToInt:
2406   case scZeroExtend:
2407   case scSignExtend: {
2408     Cost +=
2409         costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
2410     return false; // Will answer upon next entry into this function.
2411   }
2412   case scUDivExpr: {
2413     // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2414     // HowManyLessThans produced to compute a precise expression, rather than a
2415     // UDiv from the user's code. If we can't find a UDiv in the code with some
2416     // simple searching, we need to account for it's cost.
2417 
2418     // At the beginning of this function we already tried to find existing
2419     // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2420     // pattern involving division. This is just a simple search heuristic.
2421     if (getRelatedExistingExpansion(
2422             SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2423       return false; // Consider it to be free.
2424 
2425     Cost +=
2426         costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2427     return false; // Will answer upon next entry into this function.
2428   }
2429   case scAddExpr:
2430   case scMulExpr:
2431   case scUMaxExpr:
2432   case scSMaxExpr:
2433   case scUMinExpr:
2434   case scSMinExpr: {
2435     assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2436            "Nary expr should have more than 1 operand.");
2437     // The simple nary expr will require one less op (or pair of ops)
2438     // than the number of it's terms.
2439     Cost +=
2440         costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2441     return Cost > Budget;
2442   }
2443   case scAddRecExpr: {
2444     assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2445            "Polynomial should be at least linear");
2446     Cost += costAndCollectOperands<SCEVAddRecExpr>(
2447         WorkItem, TTI, CostKind, Worklist);
2448     return Cost > Budget;
2449   }
2450   }
2451   llvm_unreachable("Unknown SCEV kind!");
2452 }
2453 
expandCodeForPredicate(const SCEVPredicate * Pred,Instruction * IP)2454 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2455                                             Instruction *IP) {
2456   assert(IP);
2457   switch (Pred->getKind()) {
2458   case SCEVPredicate::P_Union:
2459     return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2460   case SCEVPredicate::P_Equal:
2461     return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2462   case SCEVPredicate::P_Wrap: {
2463     auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2464     return expandWrapPredicate(AddRecPred, IP);
2465   }
2466   }
2467   llvm_unreachable("Unknown SCEV predicate type");
2468 }
2469 
expandEqualPredicate(const SCEVEqualPredicate * Pred,Instruction * IP)2470 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2471                                           Instruction *IP) {
2472   Value *Expr0 =
2473       expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
2474   Value *Expr1 =
2475       expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
2476 
2477   Builder.SetInsertPoint(IP);
2478   auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2479   return I;
2480 }
2481 
generateOverflowCheck(const SCEVAddRecExpr * AR,Instruction * Loc,bool Signed)2482 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2483                                            Instruction *Loc, bool Signed) {
2484   assert(AR->isAffine() && "Cannot generate RT check for "
2485                            "non-affine expression");
2486 
2487   SCEVUnionPredicate Pred;
2488   const SCEV *ExitCount =
2489       SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2490 
2491   assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2492 
2493   const SCEV *Step = AR->getStepRecurrence(SE);
2494   const SCEV *Start = AR->getStart();
2495 
2496   Type *ARTy = AR->getType();
2497   unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2498   unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2499 
2500   // The expression {Start,+,Step} has nusw/nssw if
2501   //   Step < 0, Start - |Step| * Backedge <= Start
2502   //   Step >= 0, Start + |Step| * Backedge > Start
2503   // and |Step| * Backedge doesn't unsigned overflow.
2504 
2505   IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2506   Builder.SetInsertPoint(Loc);
2507   Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false);
2508 
2509   IntegerType *Ty =
2510       IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2511   Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty;
2512 
2513   Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false);
2514   Value *NegStepValue =
2515       expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false);
2516   Value *StartValue = expandCodeForImpl(
2517       isa<PointerType>(ARExpandTy) ? Start
2518                                    : SE.getPtrToIntExpr(Start, ARExpandTy),
2519       ARExpandTy, Loc, false);
2520 
2521   ConstantInt *Zero =
2522       ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
2523 
2524   Builder.SetInsertPoint(Loc);
2525   // Compute |Step|
2526   Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2527   Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2528 
2529   // Get the backedge taken count and truncate or extended to the AR type.
2530   Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2531   auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2532                                          Intrinsic::umul_with_overflow, Ty);
2533 
2534   // Compute |Step| * Backedge
2535   CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2536   Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2537   Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2538 
2539   // Compute:
2540   //   Start + |Step| * Backedge < Start
2541   //   Start - |Step| * Backedge > Start
2542   Value *Add = nullptr, *Sub = nullptr;
2543   if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) {
2544     const SCEV *MulS = SE.getSCEV(MulV);
2545     const SCEV *NegMulS = SE.getNegativeSCEV(MulS);
2546     Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue),
2547                                 ARPtrTy);
2548     Sub = Builder.CreateBitCast(
2549         expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy);
2550   } else {
2551     Add = Builder.CreateAdd(StartValue, MulV);
2552     Sub = Builder.CreateSub(StartValue, MulV);
2553   }
2554 
2555   Value *EndCompareGT = Builder.CreateICmp(
2556       Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2557 
2558   Value *EndCompareLT = Builder.CreateICmp(
2559       Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2560 
2561   // Select the answer based on the sign of Step.
2562   Value *EndCheck =
2563       Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2564 
2565   // If the backedge taken count type is larger than the AR type,
2566   // check that we don't drop any bits by truncating it. If we are
2567   // dropping bits, then we have overflow (unless the step is zero).
2568   if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2569     auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2570     auto *BackedgeCheck =
2571         Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2572                            ConstantInt::get(Loc->getContext(), MaxVal));
2573     BackedgeCheck = Builder.CreateAnd(
2574         BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2575 
2576     EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2577   }
2578 
2579   return Builder.CreateOr(EndCheck, OfMul);
2580 }
2581 
expandWrapPredicate(const SCEVWrapPredicate * Pred,Instruction * IP)2582 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2583                                          Instruction *IP) {
2584   const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2585   Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2586 
2587   // Add a check for NUSW
2588   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2589     NUSWCheck = generateOverflowCheck(A, IP, false);
2590 
2591   // Add a check for NSSW
2592   if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2593     NSSWCheck = generateOverflowCheck(A, IP, true);
2594 
2595   if (NUSWCheck && NSSWCheck)
2596     return Builder.CreateOr(NUSWCheck, NSSWCheck);
2597 
2598   if (NUSWCheck)
2599     return NUSWCheck;
2600 
2601   if (NSSWCheck)
2602     return NSSWCheck;
2603 
2604   return ConstantInt::getFalse(IP->getContext());
2605 }
2606 
expandUnionPredicate(const SCEVUnionPredicate * Union,Instruction * IP)2607 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2608                                           Instruction *IP) {
2609   auto *BoolType = IntegerType::get(IP->getContext(), 1);
2610   Value *Check = ConstantInt::getNullValue(BoolType);
2611 
2612   // Loop over all checks in this set.
2613   for (auto Pred : Union->getPredicates()) {
2614     auto *NextCheck = expandCodeForPredicate(Pred, IP);
2615     Builder.SetInsertPoint(IP);
2616     Check = Builder.CreateOr(Check, NextCheck);
2617   }
2618 
2619   return Check;
2620 }
2621 
fixupLCSSAFormFor(Instruction * User,unsigned OpIdx)2622 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
2623   assert(PreserveLCSSA);
2624   SmallVector<Instruction *, 1> ToUpdate;
2625 
2626   auto *OpV = User->getOperand(OpIdx);
2627   auto *OpI = dyn_cast<Instruction>(OpV);
2628   if (!OpI)
2629     return OpV;
2630 
2631   Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent());
2632   Loop *UseLoop = SE.LI.getLoopFor(User->getParent());
2633   if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2634     return OpV;
2635 
2636   ToUpdate.push_back(OpI);
2637   SmallVector<PHINode *, 16> PHIsToRemove;
2638   formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove);
2639   for (PHINode *PN : PHIsToRemove) {
2640     if (!PN->use_empty())
2641       continue;
2642     InsertedValues.erase(PN);
2643     InsertedPostIncValues.erase(PN);
2644     PN->eraseFromParent();
2645   }
2646 
2647   return User->getOperand(OpIdx);
2648 }
2649 
2650 namespace {
2651 // Search for a SCEV subexpression that is not safe to expand.  Any expression
2652 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2653 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2654 // instruction, but the important thing is that we prove the denominator is
2655 // nonzero before expansion.
2656 //
2657 // IVUsers already checks that IV-derived expressions are safe. So this check is
2658 // only needed when the expression includes some subexpression that is not IV
2659 // derived.
2660 //
2661 // Currently, we only allow division by a nonzero constant here. If this is
2662 // inadequate, we could easily allow division by SCEVUnknown by using
2663 // ValueTracking to check isKnownNonZero().
2664 //
2665 // We cannot generally expand recurrences unless the step dominates the loop
2666 // header. The expander handles the special case of affine recurrences by
2667 // scaling the recurrence outside the loop, but this technique isn't generally
2668 // applicable. Expanding a nested recurrence outside a loop requires computing
2669 // binomial coefficients. This could be done, but the recurrence has to be in a
2670 // perfectly reduced form, which can't be guaranteed.
2671 struct SCEVFindUnsafe {
2672   ScalarEvolution &SE;
2673   bool IsUnsafe;
2674 
SCEVFindUnsafe__anone27e8c410f11::SCEVFindUnsafe2675   SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2676 
follow__anone27e8c410f11::SCEVFindUnsafe2677   bool follow(const SCEV *S) {
2678     if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2679       const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2680       if (!SC || SC->getValue()->isZero()) {
2681         IsUnsafe = true;
2682         return false;
2683       }
2684     }
2685     if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2686       const SCEV *Step = AR->getStepRecurrence(SE);
2687       if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2688         IsUnsafe = true;
2689         return false;
2690       }
2691     }
2692     return true;
2693   }
isDone__anone27e8c410f11::SCEVFindUnsafe2694   bool isDone() const { return IsUnsafe; }
2695 };
2696 }
2697 
2698 namespace llvm {
isSafeToExpand(const SCEV * S,ScalarEvolution & SE)2699 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2700   SCEVFindUnsafe Search(SE);
2701   visitAll(S, Search);
2702   return !Search.IsUnsafe;
2703 }
2704 
isSafeToExpandAt(const SCEV * S,const Instruction * InsertionPoint,ScalarEvolution & SE)2705 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2706                       ScalarEvolution &SE) {
2707   if (!isSafeToExpand(S, SE))
2708     return false;
2709   // We have to prove that the expanded site of S dominates InsertionPoint.
2710   // This is easy when not in the same block, but hard when S is an instruction
2711   // to be expanded somewhere inside the same block as our insertion point.
2712   // What we really need here is something analogous to an OrderedBasicBlock,
2713   // but for the moment, we paper over the problem by handling two common and
2714   // cheap to check cases.
2715   if (SE.properlyDominates(S, InsertionPoint->getParent()))
2716     return true;
2717   if (SE.dominates(S, InsertionPoint->getParent())) {
2718     if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2719       return true;
2720     if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2721       if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2722         return true;
2723   }
2724   return false;
2725 }
2726 
cleanup()2727 void SCEVExpanderCleaner::cleanup() {
2728   // Result is used, nothing to remove.
2729   if (ResultUsed)
2730     return;
2731 
2732   auto InsertedInstructions = Expander.getAllInsertedInstructions();
2733 #ifndef NDEBUG
2734   SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2735                                             InsertedInstructions.end());
2736   (void)InsertedSet;
2737 #endif
2738   // Remove sets with value handles.
2739   Expander.clear();
2740 
2741   // Sort so that earlier instructions do not dominate later instructions.
2742   stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) {
2743     return DT.dominates(B, A);
2744   });
2745   // Remove all inserted instructions.
2746   for (Instruction *I : InsertedInstructions) {
2747 
2748 #ifndef NDEBUG
2749     assert(all_of(I->users(),
2750                   [&InsertedSet](Value *U) {
2751                     return InsertedSet.contains(cast<Instruction>(U));
2752                   }) &&
2753            "removed instruction should only be used by instructions inserted "
2754            "during expansion");
2755 #endif
2756     assert(!I->getType()->isVoidTy() &&
2757            "inserted instruction should have non-void types");
2758     I->replaceAllUsesWith(UndefValue::get(I->getType()));
2759     I->eraseFromParent();
2760   }
2761 }
2762 }
2763