1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Transforms/Utils/LoopUtils.h"
31
32 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
33 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34 #else
35 #define SCEV_DEBUG_WITH_TYPE(TYPE, X)
36 #endif
37
38 using namespace llvm;
39
40 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
41 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
42 cl::desc("When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
44
45 using namespace PatternMatch;
46
47 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
48 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
49 /// creating a new one.
ReuseOrCreateCast(Value * V,Type * Ty,Instruction::CastOps Op,BasicBlock::iterator IP)50 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
51 Instruction::CastOps Op,
52 BasicBlock::iterator IP) {
53 // This function must be called with the builder having a valid insertion
54 // point. It doesn't need to be the actual IP where the uses of the returned
55 // cast will be added, but it must dominate such IP.
56 // We use this precondition to produce a cast that will dominate all its
57 // uses. In particular, this is crucial for the case where the builder's
58 // insertion point *is* the point where we were asked to put the cast.
59 // Since we don't know the builder's insertion point is actually
60 // where the uses will be added (only that it dominates it), we are
61 // not allowed to move it.
62 BasicBlock::iterator BIP = Builder.GetInsertPoint();
63
64 Value *Ret = nullptr;
65
66 // Check to see if there is already a cast!
67 for (User *U : V->users()) {
68 if (U->getType() != Ty)
69 continue;
70 CastInst *CI = dyn_cast<CastInst>(U);
71 if (!CI || CI->getOpcode() != Op)
72 continue;
73
74 // Found a suitable cast that is at IP or comes before IP. Use it. Note that
75 // the cast must also properly dominate the Builder's insertion point.
76 if (IP->getParent() == CI->getParent() && &*BIP != CI &&
77 (&*IP == CI || CI->comesBefore(&*IP))) {
78 Ret = CI;
79 break;
80 }
81 }
82
83 // Create a new cast.
84 if (!Ret) {
85 SCEVInsertPointGuard Guard(Builder, this);
86 Builder.SetInsertPoint(&*IP);
87 Ret = Builder.CreateCast(Op, V, Ty, V->getName());
88 }
89
90 // We assert at the end of the function since IP might point to an
91 // instruction with different dominance properties than a cast
92 // (an invoke for example) and not dominate BIP (but the cast does).
93 assert(!isa<Instruction>(Ret) ||
94 SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
95
96 return Ret;
97 }
98
99 BasicBlock::iterator
findInsertPointAfter(Instruction * I,Instruction * MustDominate) const100 SCEVExpander::findInsertPointAfter(Instruction *I,
101 Instruction *MustDominate) const {
102 BasicBlock::iterator IP = ++I->getIterator();
103 if (auto *II = dyn_cast<InvokeInst>(I))
104 IP = II->getNormalDest()->begin();
105
106 while (isa<PHINode>(IP))
107 ++IP;
108
109 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
110 ++IP;
111 } else if (isa<CatchSwitchInst>(IP)) {
112 IP = MustDominate->getParent()->getFirstInsertionPt();
113 } else {
114 assert(!IP->isEHPad() && "unexpected eh pad!");
115 }
116
117 // Adjust insert point to be after instructions inserted by the expander, so
118 // we can re-use already inserted instructions. Avoid skipping past the
119 // original \p MustDominate, in case it is an inserted instruction.
120 while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
121 ++IP;
122
123 return IP;
124 }
125
126 BasicBlock::iterator
GetOptimalInsertionPointForCastOf(Value * V) const127 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
128 // Cast the argument at the beginning of the entry block, after
129 // any bitcasts of other arguments.
130 if (Argument *A = dyn_cast<Argument>(V)) {
131 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
132 while ((isa<BitCastInst>(IP) &&
133 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
134 cast<BitCastInst>(IP)->getOperand(0) != A) ||
135 isa<DbgInfoIntrinsic>(IP))
136 ++IP;
137 return IP;
138 }
139
140 // Cast the instruction immediately after the instruction.
141 if (Instruction *I = dyn_cast<Instruction>(V))
142 return findInsertPointAfter(I, &*Builder.GetInsertPoint());
143
144 // Otherwise, this must be some kind of a constant,
145 // so let's plop this cast into the function's entry block.
146 assert(isa<Constant>(V) &&
147 "Expected the cast argument to be a global/constant");
148 return Builder.GetInsertBlock()
149 ->getParent()
150 ->getEntryBlock()
151 .getFirstInsertionPt();
152 }
153
154 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
155 /// which must be possible with a noop cast, doing what we can to share
156 /// the casts.
InsertNoopCastOfTo(Value * V,Type * Ty)157 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
158 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
159 assert((Op == Instruction::BitCast ||
160 Op == Instruction::PtrToInt ||
161 Op == Instruction::IntToPtr) &&
162 "InsertNoopCastOfTo cannot perform non-noop casts!");
163 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
164 "InsertNoopCastOfTo cannot change sizes!");
165
166 // inttoptr only works for integral pointers. For non-integral pointers, we
167 // can create a GEP on i8* null with the integral value as index. Note that
168 // it is safe to use GEP of null instead of inttoptr here, because only
169 // expressions already based on a GEP of null should be converted to pointers
170 // during expansion.
171 if (Op == Instruction::IntToPtr) {
172 auto *PtrTy = cast<PointerType>(Ty);
173 if (DL.isNonIntegralPointerType(PtrTy)) {
174 auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
175 assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 &&
176 "alloc size of i8 must by 1 byte for the GEP to be correct");
177 auto *GEP = Builder.CreateGEP(
178 Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
179 return Builder.CreateBitCast(GEP, Ty);
180 }
181 }
182 // Short-circuit unnecessary bitcasts.
183 if (Op == Instruction::BitCast) {
184 if (V->getType() == Ty)
185 return V;
186 if (CastInst *CI = dyn_cast<CastInst>(V)) {
187 if (CI->getOperand(0)->getType() == Ty)
188 return CI->getOperand(0);
189 }
190 }
191 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
192 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
193 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
194 if (CastInst *CI = dyn_cast<CastInst>(V))
195 if ((CI->getOpcode() == Instruction::PtrToInt ||
196 CI->getOpcode() == Instruction::IntToPtr) &&
197 SE.getTypeSizeInBits(CI->getType()) ==
198 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
199 return CI->getOperand(0);
200 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
201 if ((CE->getOpcode() == Instruction::PtrToInt ||
202 CE->getOpcode() == Instruction::IntToPtr) &&
203 SE.getTypeSizeInBits(CE->getType()) ==
204 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
205 return CE->getOperand(0);
206 }
207
208 // Fold a cast of a constant.
209 if (Constant *C = dyn_cast<Constant>(V))
210 return ConstantExpr::getCast(Op, C, Ty);
211
212 // Try to reuse existing cast, or insert one.
213 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
214 }
215
216 /// InsertBinop - Insert the specified binary operator, doing a small amount
217 /// of work to avoid inserting an obviously redundant operation, and hoisting
218 /// to an outer loop when the opportunity is there and it is safe.
InsertBinop(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,SCEV::NoWrapFlags Flags,bool IsSafeToHoist)219 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
220 Value *LHS, Value *RHS,
221 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
222 // Fold a binop with constant operands.
223 if (Constant *CLHS = dyn_cast<Constant>(LHS))
224 if (Constant *CRHS = dyn_cast<Constant>(RHS))
225 return ConstantExpr::get(Opcode, CLHS, CRHS);
226
227 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
228 unsigned ScanLimit = 6;
229 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
230 // Scanning starts from the last instruction before the insertion point.
231 BasicBlock::iterator IP = Builder.GetInsertPoint();
232 if (IP != BlockBegin) {
233 --IP;
234 for (; ScanLimit; --IP, --ScanLimit) {
235 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
236 // generated code.
237 if (isa<DbgInfoIntrinsic>(IP))
238 ScanLimit++;
239
240 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
241 // Ensure that no-wrap flags match.
242 if (isa<OverflowingBinaryOperator>(I)) {
243 if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
244 return true;
245 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
246 return true;
247 }
248 // Conservatively, do not use any instruction which has any of exact
249 // flags installed.
250 if (isa<PossiblyExactOperator>(I) && I->isExact())
251 return true;
252 return false;
253 };
254 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
255 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
256 return &*IP;
257 if (IP == BlockBegin) break;
258 }
259 }
260
261 // Save the original insertion point so we can restore it when we're done.
262 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
263 SCEVInsertPointGuard Guard(Builder, this);
264
265 if (IsSafeToHoist) {
266 // Move the insertion point out of as many loops as we can.
267 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
268 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
269 BasicBlock *Preheader = L->getLoopPreheader();
270 if (!Preheader) break;
271
272 // Ok, move up a level.
273 Builder.SetInsertPoint(Preheader->getTerminator());
274 }
275 }
276
277 // If we haven't found this binop, insert it.
278 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
279 BO->setDebugLoc(Loc);
280 if (Flags & SCEV::FlagNUW)
281 BO->setHasNoUnsignedWrap();
282 if (Flags & SCEV::FlagNSW)
283 BO->setHasNoSignedWrap();
284
285 return BO;
286 }
287
288 /// FactorOutConstant - Test if S is divisible by Factor, using signed
289 /// division. If so, update S with Factor divided out and return true.
290 /// S need not be evenly divisible if a reasonable remainder can be
291 /// computed.
FactorOutConstant(const SCEV * & S,const SCEV * & Remainder,const SCEV * Factor,ScalarEvolution & SE,const DataLayout & DL)292 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
293 const SCEV *Factor, ScalarEvolution &SE,
294 const DataLayout &DL) {
295 // Everything is divisible by one.
296 if (Factor->isOne())
297 return true;
298
299 // x/x == 1.
300 if (S == Factor) {
301 S = SE.getConstant(S->getType(), 1);
302 return true;
303 }
304
305 // For a Constant, check for a multiple of the given factor.
306 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
307 // 0/x == 0.
308 if (C->isZero())
309 return true;
310 // Check for divisibility.
311 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
312 ConstantInt *CI =
313 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
314 // If the quotient is zero and the remainder is non-zero, reject
315 // the value at this scale. It will be considered for subsequent
316 // smaller scales.
317 if (!CI->isZero()) {
318 const SCEV *Div = SE.getConstant(CI);
319 S = Div;
320 Remainder = SE.getAddExpr(
321 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
322 return true;
323 }
324 }
325 }
326
327 // In a Mul, check if there is a constant operand which is a multiple
328 // of the given factor.
329 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
330 // Size is known, check if there is a constant operand which is a multiple
331 // of the given factor. If so, we can factor it.
332 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
333 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
334 if (!C->getAPInt().srem(FC->getAPInt())) {
335 SmallVector<const SCEV *, 4> NewMulOps(M->operands());
336 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
337 S = SE.getMulExpr(NewMulOps);
338 return true;
339 }
340 }
341
342 // In an AddRec, check if both start and step are divisible.
343 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
344 const SCEV *Step = A->getStepRecurrence(SE);
345 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
346 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
347 return false;
348 if (!StepRem->isZero())
349 return false;
350 const SCEV *Start = A->getStart();
351 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
352 return false;
353 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
354 A->getNoWrapFlags(SCEV::FlagNW));
355 return true;
356 }
357
358 return false;
359 }
360
361 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
362 /// is the number of SCEVAddRecExprs present, which are kept at the end of
363 /// the list.
364 ///
SimplifyAddOperands(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)365 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
366 Type *Ty,
367 ScalarEvolution &SE) {
368 unsigned NumAddRecs = 0;
369 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
370 ++NumAddRecs;
371 // Group Ops into non-addrecs and addrecs.
372 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
373 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
374 // Let ScalarEvolution sort and simplify the non-addrecs list.
375 const SCEV *Sum = NoAddRecs.empty() ?
376 SE.getConstant(Ty, 0) :
377 SE.getAddExpr(NoAddRecs);
378 // If it returned an add, use the operands. Otherwise it simplified
379 // the sum into a single value, so just use that.
380 Ops.clear();
381 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
382 Ops.append(Add->op_begin(), Add->op_end());
383 else if (!Sum->isZero())
384 Ops.push_back(Sum);
385 // Then append the addrecs.
386 Ops.append(AddRecs.begin(), AddRecs.end());
387 }
388
389 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
390 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
391 /// This helps expose more opportunities for folding parts of the expressions
392 /// into GEP indices.
393 ///
SplitAddRecs(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)394 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
395 Type *Ty,
396 ScalarEvolution &SE) {
397 // Find the addrecs.
398 SmallVector<const SCEV *, 8> AddRecs;
399 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
400 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
401 const SCEV *Start = A->getStart();
402 if (Start->isZero()) break;
403 const SCEV *Zero = SE.getConstant(Ty, 0);
404 AddRecs.push_back(SE.getAddRecExpr(Zero,
405 A->getStepRecurrence(SE),
406 A->getLoop(),
407 A->getNoWrapFlags(SCEV::FlagNW)));
408 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
409 Ops[i] = Zero;
410 Ops.append(Add->op_begin(), Add->op_end());
411 e += Add->getNumOperands();
412 } else {
413 Ops[i] = Start;
414 }
415 }
416 if (!AddRecs.empty()) {
417 // Add the addrecs onto the end of the list.
418 Ops.append(AddRecs.begin(), AddRecs.end());
419 // Resort the operand list, moving any constants to the front.
420 SimplifyAddOperands(Ops, Ty, SE);
421 }
422 }
423
424 /// expandAddToGEP - Expand an addition expression with a pointer type into
425 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
426 /// BasicAliasAnalysis and other passes analyze the result. See the rules
427 /// for getelementptr vs. inttoptr in
428 /// http://llvm.org/docs/LangRef.html#pointeraliasing
429 /// for details.
430 ///
431 /// Design note: The correctness of using getelementptr here depends on
432 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
433 /// they may introduce pointer arithmetic which may not be safely converted
434 /// into getelementptr.
435 ///
436 /// Design note: It might seem desirable for this function to be more
437 /// loop-aware. If some of the indices are loop-invariant while others
438 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
439 /// loop-invariant portions of the overall computation outside the loop.
440 /// However, there are a few reasons this is not done here. Hoisting simple
441 /// arithmetic is a low-level optimization that often isn't very
442 /// important until late in the optimization process. In fact, passes
443 /// like InstructionCombining will combine GEPs, even if it means
444 /// pushing loop-invariant computation down into loops, so even if the
445 /// GEPs were split here, the work would quickly be undone. The
446 /// LoopStrengthReduction pass, which is usually run quite late (and
447 /// after the last InstructionCombining pass), takes care of hoisting
448 /// loop-invariant portions of expressions, after considering what
449 /// can be folded using target addressing modes.
450 ///
expandAddToGEP(const SCEV * const * op_begin,const SCEV * const * op_end,PointerType * PTy,Type * Ty,Value * V)451 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
452 const SCEV *const *op_end,
453 PointerType *PTy,
454 Type *Ty,
455 Value *V) {
456 SmallVector<Value *, 4> GepIndices;
457 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
458 bool AnyNonZeroIndices = false;
459
460 // Split AddRecs up into parts as either of the parts may be usable
461 // without the other.
462 SplitAddRecs(Ops, Ty, SE);
463
464 Type *IntIdxTy = DL.getIndexType(PTy);
465
466 // For opaque pointers, always generate i8 GEP.
467 if (!PTy->isOpaque()) {
468 // Descend down the pointer's type and attempt to convert the other
469 // operands into GEP indices, at each level. The first index in a GEP
470 // indexes into the array implied by the pointer operand; the rest of
471 // the indices index into the element or field type selected by the
472 // preceding index.
473 Type *ElTy = PTy->getElementType();
474 for (;;) {
475 // If the scale size is not 0, attempt to factor out a scale for
476 // array indexing.
477 SmallVector<const SCEV *, 8> ScaledOps;
478 if (ElTy->isSized()) {
479 const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
480 if (!ElSize->isZero()) {
481 SmallVector<const SCEV *, 8> NewOps;
482 for (const SCEV *Op : Ops) {
483 const SCEV *Remainder = SE.getConstant(Ty, 0);
484 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
485 // Op now has ElSize factored out.
486 ScaledOps.push_back(Op);
487 if (!Remainder->isZero())
488 NewOps.push_back(Remainder);
489 AnyNonZeroIndices = true;
490 } else {
491 // The operand was not divisible, so add it to the list of
492 // operands we'll scan next iteration.
493 NewOps.push_back(Op);
494 }
495 }
496 // If we made any changes, update Ops.
497 if (!ScaledOps.empty()) {
498 Ops = NewOps;
499 SimplifyAddOperands(Ops, Ty, SE);
500 }
501 }
502 }
503
504 // Record the scaled array index for this level of the type. If
505 // we didn't find any operands that could be factored, tentatively
506 // assume that element zero was selected (since the zero offset
507 // would obviously be folded away).
508 Value *Scaled =
509 ScaledOps.empty()
510 ? Constant::getNullValue(Ty)
511 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false);
512 GepIndices.push_back(Scaled);
513
514 // Collect struct field index operands.
515 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
516 bool FoundFieldNo = false;
517 // An empty struct has no fields.
518 if (STy->getNumElements() == 0) break;
519 // Field offsets are known. See if a constant offset falls within any of
520 // the struct fields.
521 if (Ops.empty())
522 break;
523 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
524 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
525 const StructLayout &SL = *DL.getStructLayout(STy);
526 uint64_t FullOffset = C->getValue()->getZExtValue();
527 if (FullOffset < SL.getSizeInBytes()) {
528 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
529 GepIndices.push_back(
530 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
531 ElTy = STy->getTypeAtIndex(ElIdx);
532 Ops[0] =
533 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
534 AnyNonZeroIndices = true;
535 FoundFieldNo = true;
536 }
537 }
538 // If no struct field offsets were found, tentatively assume that
539 // field zero was selected (since the zero offset would obviously
540 // be folded away).
541 if (!FoundFieldNo) {
542 ElTy = STy->getTypeAtIndex(0u);
543 GepIndices.push_back(
544 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
545 }
546 }
547
548 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
549 ElTy = ATy->getElementType();
550 else
551 // FIXME: Handle VectorType.
552 // E.g., If ElTy is scalable vector, then ElSize is not a compile-time
553 // constant, therefore can not be factored out. The generated IR is less
554 // ideal with base 'V' cast to i8* and do ugly getelementptr over that.
555 break;
556 }
557 }
558
559 // If none of the operands were convertible to proper GEP indices, cast
560 // the base to i8* and do an ugly getelementptr with that. It's still
561 // better than ptrtoint+arithmetic+inttoptr at least.
562 if (!AnyNonZeroIndices) {
563 // Cast the base to i8*.
564 if (!PTy->isOpaque())
565 V = InsertNoopCastOfTo(V,
566 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
567
568 assert(!isa<Instruction>(V) ||
569 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
570
571 // Expand the operands for a plain byte offset.
572 Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false);
573
574 // Fold a GEP with constant operands.
575 if (Constant *CLHS = dyn_cast<Constant>(V))
576 if (Constant *CRHS = dyn_cast<Constant>(Idx))
577 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
578 CLHS, CRHS);
579
580 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
581 unsigned ScanLimit = 6;
582 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
583 // Scanning starts from the last instruction before the insertion point.
584 BasicBlock::iterator IP = Builder.GetInsertPoint();
585 if (IP != BlockBegin) {
586 --IP;
587 for (; ScanLimit; --IP, --ScanLimit) {
588 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
589 // generated code.
590 if (isa<DbgInfoIntrinsic>(IP))
591 ScanLimit++;
592 if (IP->getOpcode() == Instruction::GetElementPtr &&
593 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
594 return &*IP;
595 if (IP == BlockBegin) break;
596 }
597 }
598
599 // Save the original insertion point so we can restore it when we're done.
600 SCEVInsertPointGuard Guard(Builder, this);
601
602 // Move the insertion point out of as many loops as we can.
603 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
604 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
605 BasicBlock *Preheader = L->getLoopPreheader();
606 if (!Preheader) break;
607
608 // Ok, move up a level.
609 Builder.SetInsertPoint(Preheader->getTerminator());
610 }
611
612 // Emit a GEP.
613 return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
614 }
615
616 {
617 SCEVInsertPointGuard Guard(Builder, this);
618
619 // Move the insertion point out of as many loops as we can.
620 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
621 if (!L->isLoopInvariant(V)) break;
622
623 bool AnyIndexNotLoopInvariant = any_of(
624 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
625
626 if (AnyIndexNotLoopInvariant)
627 break;
628
629 BasicBlock *Preheader = L->getLoopPreheader();
630 if (!Preheader) break;
631
632 // Ok, move up a level.
633 Builder.SetInsertPoint(Preheader->getTerminator());
634 }
635
636 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
637 // because ScalarEvolution may have changed the address arithmetic to
638 // compute a value which is beyond the end of the allocated object.
639 Value *Casted = V;
640 if (V->getType() != PTy)
641 Casted = InsertNoopCastOfTo(Casted, PTy);
642 Value *GEP = Builder.CreateGEP(PTy->getElementType(), Casted, GepIndices,
643 "scevgep");
644 Ops.push_back(SE.getUnknown(GEP));
645 }
646
647 return expand(SE.getAddExpr(Ops));
648 }
649
expandAddToGEP(const SCEV * Op,PointerType * PTy,Type * Ty,Value * V)650 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
651 Value *V) {
652 const SCEV *const Ops[1] = {Op};
653 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
654 }
655
656 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
657 /// SCEV expansion. If they are nested, this is the most nested. If they are
658 /// neighboring, pick the later.
PickMostRelevantLoop(const Loop * A,const Loop * B,DominatorTree & DT)659 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
660 DominatorTree &DT) {
661 if (!A) return B;
662 if (!B) return A;
663 if (A->contains(B)) return B;
664 if (B->contains(A)) return A;
665 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
666 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
667 return A; // Arbitrarily break the tie.
668 }
669
670 /// getRelevantLoop - Get the most relevant loop associated with the given
671 /// expression, according to PickMostRelevantLoop.
getRelevantLoop(const SCEV * S)672 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
673 // Test whether we've already computed the most relevant loop for this SCEV.
674 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
675 if (!Pair.second)
676 return Pair.first->second;
677
678 if (isa<SCEVConstant>(S))
679 // A constant has no relevant loops.
680 return nullptr;
681 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
682 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
683 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
684 // A non-instruction has no relevant loops.
685 return nullptr;
686 }
687 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
688 const Loop *L = nullptr;
689 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
690 L = AR->getLoop();
691 for (const SCEV *Op : N->operands())
692 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
693 return RelevantLoops[N] = L;
694 }
695 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
696 const Loop *Result = getRelevantLoop(C->getOperand());
697 return RelevantLoops[C] = Result;
698 }
699 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
700 const Loop *Result = PickMostRelevantLoop(
701 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
702 return RelevantLoops[D] = Result;
703 }
704 llvm_unreachable("Unexpected SCEV type!");
705 }
706
707 namespace {
708
709 /// LoopCompare - Compare loops by PickMostRelevantLoop.
710 class LoopCompare {
711 DominatorTree &DT;
712 public:
LoopCompare(DominatorTree & dt)713 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
714
operator ()(std::pair<const Loop *,const SCEV * > LHS,std::pair<const Loop *,const SCEV * > RHS) const715 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
716 std::pair<const Loop *, const SCEV *> RHS) const {
717 // Keep pointer operands sorted at the end.
718 if (LHS.second->getType()->isPointerTy() !=
719 RHS.second->getType()->isPointerTy())
720 return LHS.second->getType()->isPointerTy();
721
722 // Compare loops with PickMostRelevantLoop.
723 if (LHS.first != RHS.first)
724 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
725
726 // If one operand is a non-constant negative and the other is not,
727 // put the non-constant negative on the right so that a sub can
728 // be used instead of a negate and add.
729 if (LHS.second->isNonConstantNegative()) {
730 if (!RHS.second->isNonConstantNegative())
731 return false;
732 } else if (RHS.second->isNonConstantNegative())
733 return true;
734
735 // Otherwise they are equivalent according to this comparison.
736 return false;
737 }
738 };
739
740 }
741
visitAddExpr(const SCEVAddExpr * S)742 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
743 Type *Ty = SE.getEffectiveSCEVType(S->getType());
744
745 // Collect all the add operands in a loop, along with their associated loops.
746 // Iterate in reverse so that constants are emitted last, all else equal, and
747 // so that pointer operands are inserted first, which the code below relies on
748 // to form more involved GEPs.
749 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
750 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
751 E(S->op_begin()); I != E; ++I)
752 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
753
754 // Sort by loop. Use a stable sort so that constants follow non-constants and
755 // pointer operands precede non-pointer operands.
756 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
757
758 // Emit instructions to add all the operands. Hoist as much as possible
759 // out of loops, and form meaningful getelementptrs where possible.
760 Value *Sum = nullptr;
761 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
762 const Loop *CurLoop = I->first;
763 const SCEV *Op = I->second;
764 if (!Sum) {
765 // This is the first operand. Just expand it.
766 Sum = expand(Op);
767 ++I;
768 continue;
769 }
770
771 assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
772 if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
773 // The running sum expression is a pointer. Try to form a getelementptr
774 // at this level with that as the base.
775 SmallVector<const SCEV *, 4> NewOps;
776 for (; I != E && I->first == CurLoop; ++I) {
777 // If the operand is SCEVUnknown and not instructions, peek through
778 // it, to enable more of it to be folded into the GEP.
779 const SCEV *X = I->second;
780 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
781 if (!isa<Instruction>(U->getValue()))
782 X = SE.getSCEV(U->getValue());
783 NewOps.push_back(X);
784 }
785 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
786 } else if (Op->isNonConstantNegative()) {
787 // Instead of doing a negate and add, just do a subtract.
788 Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false);
789 Sum = InsertNoopCastOfTo(Sum, Ty);
790 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
791 /*IsSafeToHoist*/ true);
792 ++I;
793 } else {
794 // A simple add.
795 Value *W = expandCodeForImpl(Op, Ty, false);
796 Sum = InsertNoopCastOfTo(Sum, Ty);
797 // Canonicalize a constant to the RHS.
798 if (isa<Constant>(Sum)) std::swap(Sum, W);
799 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
800 /*IsSafeToHoist*/ true);
801 ++I;
802 }
803 }
804
805 return Sum;
806 }
807
visitMulExpr(const SCEVMulExpr * S)808 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
809 Type *Ty = SE.getEffectiveSCEVType(S->getType());
810
811 // Collect all the mul operands in a loop, along with their associated loops.
812 // Iterate in reverse so that constants are emitted last, all else equal.
813 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
814 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
815 E(S->op_begin()); I != E; ++I)
816 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
817
818 // Sort by loop. Use a stable sort so that constants follow non-constants.
819 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
820
821 // Emit instructions to mul all the operands. Hoist as much as possible
822 // out of loops.
823 Value *Prod = nullptr;
824 auto I = OpsAndLoops.begin();
825
826 // Expand the calculation of X pow N in the following manner:
827 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
828 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
829 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
830 auto E = I;
831 // Calculate how many times the same operand from the same loop is included
832 // into this power.
833 uint64_t Exponent = 0;
834 const uint64_t MaxExponent = UINT64_MAX >> 1;
835 // No one sane will ever try to calculate such huge exponents, but if we
836 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
837 // below when the power of 2 exceeds our Exponent, and we want it to be
838 // 1u << 31 at most to not deal with unsigned overflow.
839 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
840 ++Exponent;
841 ++E;
842 }
843 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
844
845 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
846 // that are needed into the result.
847 Value *P = expandCodeForImpl(I->second, Ty, false);
848 Value *Result = nullptr;
849 if (Exponent & 1)
850 Result = P;
851 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
852 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
853 /*IsSafeToHoist*/ true);
854 if (Exponent & BinExp)
855 Result = Result ? InsertBinop(Instruction::Mul, Result, P,
856 SCEV::FlagAnyWrap,
857 /*IsSafeToHoist*/ true)
858 : P;
859 }
860
861 I = E;
862 assert(Result && "Nothing was expanded?");
863 return Result;
864 };
865
866 while (I != OpsAndLoops.end()) {
867 if (!Prod) {
868 // This is the first operand. Just expand it.
869 Prod = ExpandOpBinPowN();
870 } else if (I->second->isAllOnesValue()) {
871 // Instead of doing a multiply by negative one, just do a negate.
872 Prod = InsertNoopCastOfTo(Prod, Ty);
873 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
874 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
875 ++I;
876 } else {
877 // A simple mul.
878 Value *W = ExpandOpBinPowN();
879 Prod = InsertNoopCastOfTo(Prod, Ty);
880 // Canonicalize a constant to the RHS.
881 if (isa<Constant>(Prod)) std::swap(Prod, W);
882 const APInt *RHS;
883 if (match(W, m_Power2(RHS))) {
884 // Canonicalize Prod*(1<<C) to Prod<<C.
885 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
886 auto NWFlags = S->getNoWrapFlags();
887 // clear nsw flag if shl will produce poison value.
888 if (RHS->logBase2() == RHS->getBitWidth() - 1)
889 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
890 Prod = InsertBinop(Instruction::Shl, Prod,
891 ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
892 /*IsSafeToHoist*/ true);
893 } else {
894 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
895 /*IsSafeToHoist*/ true);
896 }
897 }
898 }
899
900 return Prod;
901 }
902
visitUDivExpr(const SCEVUDivExpr * S)903 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
904 Type *Ty = SE.getEffectiveSCEVType(S->getType());
905
906 Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false);
907 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
908 const APInt &RHS = SC->getAPInt();
909 if (RHS.isPowerOf2())
910 return InsertBinop(Instruction::LShr, LHS,
911 ConstantInt::get(Ty, RHS.logBase2()),
912 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
913 }
914
915 Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false);
916 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
917 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
918 }
919
920 /// Determine if this is a well-behaved chain of instructions leading back to
921 /// the PHI. If so, it may be reused by expanded expressions.
isNormalAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)922 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
923 const Loop *L) {
924 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
925 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
926 return false;
927 // If any of the operands don't dominate the insert position, bail.
928 // Addrec operands are always loop-invariant, so this can only happen
929 // if there are instructions which haven't been hoisted.
930 if (L == IVIncInsertLoop) {
931 for (Use &Op : llvm::drop_begin(IncV->operands()))
932 if (Instruction *OInst = dyn_cast<Instruction>(Op))
933 if (!SE.DT.dominates(OInst, IVIncInsertPos))
934 return false;
935 }
936 // Advance to the next instruction.
937 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
938 if (!IncV)
939 return false;
940
941 if (IncV->mayHaveSideEffects())
942 return false;
943
944 if (IncV == PN)
945 return true;
946
947 return isNormalAddRecExprPHI(PN, IncV, L);
948 }
949
950 /// getIVIncOperand returns an induction variable increment's induction
951 /// variable operand.
952 ///
953 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
954 /// operands dominate InsertPos.
955 ///
956 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
957 /// simple patterns generated by getAddRecExprPHILiterally and
958 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
getIVIncOperand(Instruction * IncV,Instruction * InsertPos,bool allowScale)959 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
960 Instruction *InsertPos,
961 bool allowScale) {
962 if (IncV == InsertPos)
963 return nullptr;
964
965 switch (IncV->getOpcode()) {
966 default:
967 return nullptr;
968 // Check for a simple Add/Sub or GEP of a loop invariant step.
969 case Instruction::Add:
970 case Instruction::Sub: {
971 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
972 if (!OInst || SE.DT.dominates(OInst, InsertPos))
973 return dyn_cast<Instruction>(IncV->getOperand(0));
974 return nullptr;
975 }
976 case Instruction::BitCast:
977 return dyn_cast<Instruction>(IncV->getOperand(0));
978 case Instruction::GetElementPtr:
979 for (Use &U : llvm::drop_begin(IncV->operands())) {
980 if (isa<Constant>(U))
981 continue;
982 if (Instruction *OInst = dyn_cast<Instruction>(U)) {
983 if (!SE.DT.dominates(OInst, InsertPos))
984 return nullptr;
985 }
986 if (allowScale) {
987 // allow any kind of GEP as long as it can be hoisted.
988 continue;
989 }
990 // This must be a pointer addition of constants (pretty), which is already
991 // handled, or some number of address-size elements (ugly). Ugly geps
992 // have 2 operands. i1* is used by the expander to represent an
993 // address-size element.
994 if (IncV->getNumOperands() != 2)
995 return nullptr;
996 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
997 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
998 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
999 return nullptr;
1000 break;
1001 }
1002 return dyn_cast<Instruction>(IncV->getOperand(0));
1003 }
1004 }
1005
1006 /// If the insert point of the current builder or any of the builders on the
1007 /// stack of saved builders has 'I' as its insert point, update it to point to
1008 /// the instruction after 'I'. This is intended to be used when the instruction
1009 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
1010 /// different block, the inconsistent insert point (with a mismatched
1011 /// Instruction and Block) can lead to an instruction being inserted in a block
1012 /// other than its parent.
fixupInsertPoints(Instruction * I)1013 void SCEVExpander::fixupInsertPoints(Instruction *I) {
1014 BasicBlock::iterator It(*I);
1015 BasicBlock::iterator NewInsertPt = std::next(It);
1016 if (Builder.GetInsertPoint() == It)
1017 Builder.SetInsertPoint(&*NewInsertPt);
1018 for (auto *InsertPtGuard : InsertPointGuards)
1019 if (InsertPtGuard->GetInsertPoint() == It)
1020 InsertPtGuard->SetInsertPoint(NewInsertPt);
1021 }
1022
1023 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
1024 /// it available to other uses in this loop. Recursively hoist any operands,
1025 /// until we reach a value that dominates InsertPos.
hoistIVInc(Instruction * IncV,Instruction * InsertPos)1026 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
1027 if (SE.DT.dominates(IncV, InsertPos))
1028 return true;
1029
1030 // InsertPos must itself dominate IncV so that IncV's new position satisfies
1031 // its existing users.
1032 if (isa<PHINode>(InsertPos) ||
1033 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1034 return false;
1035
1036 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1037 return false;
1038
1039 // Check that the chain of IV operands leading back to Phi can be hoisted.
1040 SmallVector<Instruction*, 4> IVIncs;
1041 for(;;) {
1042 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1043 if (!Oper)
1044 return false;
1045 // IncV is safe to hoist.
1046 IVIncs.push_back(IncV);
1047 IncV = Oper;
1048 if (SE.DT.dominates(IncV, InsertPos))
1049 break;
1050 }
1051 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
1052 fixupInsertPoints(*I);
1053 (*I)->moveBefore(InsertPos);
1054 }
1055 return true;
1056 }
1057
1058 /// Determine if this cyclic phi is in a form that would have been generated by
1059 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1060 /// as it is in a low-cost form, for example, no implied multiplication. This
1061 /// should match any patterns generated by getAddRecExprPHILiterally and
1062 /// expandAddtoGEP.
isExpandedAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)1063 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1064 const Loop *L) {
1065 for(Instruction *IVOper = IncV;
1066 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1067 /*allowScale=*/false));) {
1068 if (IVOper == PN)
1069 return true;
1070 }
1071 return false;
1072 }
1073
1074 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1075 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1076 /// need to materialize IV increments elsewhere to handle difficult situations.
expandIVInc(PHINode * PN,Value * StepV,const Loop * L,Type * ExpandTy,Type * IntTy,bool useSubtract)1077 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1078 Type *ExpandTy, Type *IntTy,
1079 bool useSubtract) {
1080 Value *IncV;
1081 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1082 if (ExpandTy->isPointerTy()) {
1083 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1084 // If the step isn't constant, don't use an implicitly scaled GEP, because
1085 // that would require a multiply inside the loop.
1086 if (!isa<ConstantInt>(StepV))
1087 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1088 GEPPtrTy->getAddressSpace());
1089 IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1090 if (IncV->getType() != PN->getType())
1091 IncV = Builder.CreateBitCast(IncV, PN->getType());
1092 } else {
1093 IncV = useSubtract ?
1094 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1095 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1096 }
1097 return IncV;
1098 }
1099
1100 /// Check whether we can cheaply express the requested SCEV in terms of
1101 /// the available PHI SCEV by truncation and/or inversion of the step.
canBeCheaplyTransformed(ScalarEvolution & SE,const SCEVAddRecExpr * Phi,const SCEVAddRecExpr * Requested,bool & InvertStep)1102 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1103 const SCEVAddRecExpr *Phi,
1104 const SCEVAddRecExpr *Requested,
1105 bool &InvertStep) {
1106 // We can't transform to match a pointer PHI.
1107 if (Phi->getType()->isPointerTy())
1108 return false;
1109
1110 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1111 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1112
1113 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1114 return false;
1115
1116 // Try truncate it if necessary.
1117 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1118 if (!Phi)
1119 return false;
1120
1121 // Check whether truncation will help.
1122 if (Phi == Requested) {
1123 InvertStep = false;
1124 return true;
1125 }
1126
1127 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1128 if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
1129 InvertStep = true;
1130 return true;
1131 }
1132
1133 return false;
1134 }
1135
IsIncrementNSW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1136 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1137 if (!isa<IntegerType>(AR->getType()))
1138 return false;
1139
1140 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1141 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1142 const SCEV *Step = AR->getStepRecurrence(SE);
1143 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1144 SE.getSignExtendExpr(AR, WideTy));
1145 const SCEV *ExtendAfterOp =
1146 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1147 return ExtendAfterOp == OpAfterExtend;
1148 }
1149
IsIncrementNUW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1150 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1151 if (!isa<IntegerType>(AR->getType()))
1152 return false;
1153
1154 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1155 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1156 const SCEV *Step = AR->getStepRecurrence(SE);
1157 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1158 SE.getZeroExtendExpr(AR, WideTy));
1159 const SCEV *ExtendAfterOp =
1160 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1161 return ExtendAfterOp == OpAfterExtend;
1162 }
1163
1164 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1165 /// the base addrec, which is the addrec without any non-loop-dominating
1166 /// values, and return the PHI.
1167 PHINode *
getAddRecExprPHILiterally(const SCEVAddRecExpr * Normalized,const Loop * L,Type * ExpandTy,Type * IntTy,Type * & TruncTy,bool & InvertStep)1168 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1169 const Loop *L,
1170 Type *ExpandTy,
1171 Type *IntTy,
1172 Type *&TruncTy,
1173 bool &InvertStep) {
1174 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1175
1176 // Reuse a previously-inserted PHI, if present.
1177 BasicBlock *LatchBlock = L->getLoopLatch();
1178 if (LatchBlock) {
1179 PHINode *AddRecPhiMatch = nullptr;
1180 Instruction *IncV = nullptr;
1181 TruncTy = nullptr;
1182 InvertStep = false;
1183
1184 // Only try partially matching scevs that need truncation and/or
1185 // step-inversion if we know this loop is outside the current loop.
1186 bool TryNonMatchingSCEV =
1187 IVIncInsertLoop &&
1188 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1189
1190 for (PHINode &PN : L->getHeader()->phis()) {
1191 if (!SE.isSCEVable(PN.getType()))
1192 continue;
1193
1194 // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1195 // PHI has no meaning at all.
1196 if (!PN.isComplete()) {
1197 SCEV_DEBUG_WITH_TYPE(
1198 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1199 continue;
1200 }
1201
1202 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1203 if (!PhiSCEV)
1204 continue;
1205
1206 bool IsMatchingSCEV = PhiSCEV == Normalized;
1207 // We only handle truncation and inversion of phi recurrences for the
1208 // expanded expression if the expanded expression's loop dominates the
1209 // loop we insert to. Check now, so we can bail out early.
1210 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1211 continue;
1212
1213 // TODO: this possibly can be reworked to avoid this cast at all.
1214 Instruction *TempIncV =
1215 dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
1216 if (!TempIncV)
1217 continue;
1218
1219 // Check whether we can reuse this PHI node.
1220 if (LSRMode) {
1221 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1222 continue;
1223 } else {
1224 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1225 continue;
1226 }
1227
1228 // Stop if we have found an exact match SCEV.
1229 if (IsMatchingSCEV) {
1230 IncV = TempIncV;
1231 TruncTy = nullptr;
1232 InvertStep = false;
1233 AddRecPhiMatch = &PN;
1234 break;
1235 }
1236
1237 // Try whether the phi can be translated into the requested form
1238 // (truncated and/or offset by a constant).
1239 if ((!TruncTy || InvertStep) &&
1240 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1241 // Record the phi node. But don't stop we might find an exact match
1242 // later.
1243 AddRecPhiMatch = &PN;
1244 IncV = TempIncV;
1245 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1246 }
1247 }
1248
1249 if (AddRecPhiMatch) {
1250 // Ok, the add recurrence looks usable.
1251 // Remember this PHI, even in post-inc mode.
1252 InsertedValues.insert(AddRecPhiMatch);
1253 // Remember the increment.
1254 rememberInstruction(IncV);
1255 // Those values were not actually inserted but re-used.
1256 ReusedValues.insert(AddRecPhiMatch);
1257 ReusedValues.insert(IncV);
1258 return AddRecPhiMatch;
1259 }
1260 }
1261
1262 // Save the original insertion point so we can restore it when we're done.
1263 SCEVInsertPointGuard Guard(Builder, this);
1264
1265 // Another AddRec may need to be recursively expanded below. For example, if
1266 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1267 // loop. Remove this loop from the PostIncLoops set before expanding such
1268 // AddRecs. Otherwise, we cannot find a valid position for the step
1269 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1270 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1271 // so it's not worth implementing SmallPtrSet::swap.
1272 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1273 PostIncLoops.clear();
1274
1275 // Expand code for the start value into the loop preheader.
1276 assert(L->getLoopPreheader() &&
1277 "Can't expand add recurrences without a loop preheader!");
1278 Value *StartV =
1279 expandCodeForImpl(Normalized->getStart(), ExpandTy,
1280 L->getLoopPreheader()->getTerminator(), false);
1281
1282 // StartV must have been be inserted into L's preheader to dominate the new
1283 // phi.
1284 assert(!isa<Instruction>(StartV) ||
1285 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1286 L->getHeader()));
1287
1288 // Expand code for the step value. Do this before creating the PHI so that PHI
1289 // reuse code doesn't see an incomplete PHI.
1290 const SCEV *Step = Normalized->getStepRecurrence(SE);
1291 // If the stride is negative, insert a sub instead of an add for the increment
1292 // (unless it's a constant, because subtracts of constants are canonicalized
1293 // to adds).
1294 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1295 if (useSubtract)
1296 Step = SE.getNegativeSCEV(Step);
1297 // Expand the step somewhere that dominates the loop header.
1298 Value *StepV = expandCodeForImpl(
1299 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1300
1301 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1302 // we actually do emit an addition. It does not apply if we emit a
1303 // subtraction.
1304 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1305 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1306
1307 // Create the PHI.
1308 BasicBlock *Header = L->getHeader();
1309 Builder.SetInsertPoint(Header, Header->begin());
1310 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1311 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1312 Twine(IVName) + ".iv");
1313
1314 // Create the step instructions and populate the PHI.
1315 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1316 BasicBlock *Pred = *HPI;
1317
1318 // Add a start value.
1319 if (!L->contains(Pred)) {
1320 PN->addIncoming(StartV, Pred);
1321 continue;
1322 }
1323
1324 // Create a step value and add it to the PHI.
1325 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1326 // instructions at IVIncInsertPos.
1327 Instruction *InsertPos = L == IVIncInsertLoop ?
1328 IVIncInsertPos : Pred->getTerminator();
1329 Builder.SetInsertPoint(InsertPos);
1330 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1331
1332 if (isa<OverflowingBinaryOperator>(IncV)) {
1333 if (IncrementIsNUW)
1334 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1335 if (IncrementIsNSW)
1336 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1337 }
1338 PN->addIncoming(IncV, Pred);
1339 }
1340
1341 // After expanding subexpressions, restore the PostIncLoops set so the caller
1342 // can ensure that IVIncrement dominates the current uses.
1343 PostIncLoops = SavedPostIncLoops;
1344
1345 // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1346 // effective when we are able to use an IV inserted here, so record it.
1347 InsertedValues.insert(PN);
1348 InsertedIVs.push_back(PN);
1349 return PN;
1350 }
1351
expandAddRecExprLiterally(const SCEVAddRecExpr * S)1352 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1353 Type *STy = S->getType();
1354 Type *IntTy = SE.getEffectiveSCEVType(STy);
1355 const Loop *L = S->getLoop();
1356
1357 // Determine a normalized form of this expression, which is the expression
1358 // before any post-inc adjustment is made.
1359 const SCEVAddRecExpr *Normalized = S;
1360 if (PostIncLoops.count(L)) {
1361 PostIncLoopSet Loops;
1362 Loops.insert(L);
1363 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1364 }
1365
1366 // Strip off any non-loop-dominating component from the addrec start.
1367 const SCEV *Start = Normalized->getStart();
1368 const SCEV *PostLoopOffset = nullptr;
1369 if (!SE.properlyDominates(Start, L->getHeader())) {
1370 PostLoopOffset = Start;
1371 Start = SE.getConstant(Normalized->getType(), 0);
1372 Normalized = cast<SCEVAddRecExpr>(
1373 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1374 Normalized->getLoop(),
1375 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1376 }
1377
1378 // Strip off any non-loop-dominating component from the addrec step.
1379 const SCEV *Step = Normalized->getStepRecurrence(SE);
1380 const SCEV *PostLoopScale = nullptr;
1381 if (!SE.dominates(Step, L->getHeader())) {
1382 PostLoopScale = Step;
1383 Step = SE.getConstant(Normalized->getType(), 1);
1384 if (!Start->isZero()) {
1385 // The normalization below assumes that Start is constant zero, so if
1386 // it isn't re-associate Start to PostLoopOffset.
1387 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1388 PostLoopOffset = Start;
1389 Start = SE.getConstant(Normalized->getType(), 0);
1390 }
1391 Normalized =
1392 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1393 Start, Step, Normalized->getLoop(),
1394 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1395 }
1396
1397 // Expand the core addrec. If we need post-loop scaling, force it to
1398 // expand to an integer type to avoid the need for additional casting.
1399 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1400 // We can't use a pointer type for the addrec if the pointer type is
1401 // non-integral.
1402 Type *AddRecPHIExpandTy =
1403 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1404
1405 // In some cases, we decide to reuse an existing phi node but need to truncate
1406 // it and/or invert the step.
1407 Type *TruncTy = nullptr;
1408 bool InvertStep = false;
1409 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1410 IntTy, TruncTy, InvertStep);
1411
1412 // Accommodate post-inc mode, if necessary.
1413 Value *Result;
1414 if (!PostIncLoops.count(L))
1415 Result = PN;
1416 else {
1417 // In PostInc mode, use the post-incremented value.
1418 BasicBlock *LatchBlock = L->getLoopLatch();
1419 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1420 Result = PN->getIncomingValueForBlock(LatchBlock);
1421
1422 // We might be introducing a new use of the post-inc IV that is not poison
1423 // safe, in which case we should drop poison generating flags. Only keep
1424 // those flags for which SCEV has proven that they always hold.
1425 if (isa<OverflowingBinaryOperator>(Result)) {
1426 auto *I = cast<Instruction>(Result);
1427 if (!S->hasNoUnsignedWrap())
1428 I->setHasNoUnsignedWrap(false);
1429 if (!S->hasNoSignedWrap())
1430 I->setHasNoSignedWrap(false);
1431 }
1432
1433 // For an expansion to use the postinc form, the client must call
1434 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1435 // or dominated by IVIncInsertPos.
1436 if (isa<Instruction>(Result) &&
1437 !SE.DT.dominates(cast<Instruction>(Result),
1438 &*Builder.GetInsertPoint())) {
1439 // The induction variable's postinc expansion does not dominate this use.
1440 // IVUsers tries to prevent this case, so it is rare. However, it can
1441 // happen when an IVUser outside the loop is not dominated by the latch
1442 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1443 // all cases. Consider a phi outside whose operand is replaced during
1444 // expansion with the value of the postinc user. Without fundamentally
1445 // changing the way postinc users are tracked, the only remedy is
1446 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1447 // but hopefully expandCodeFor handles that.
1448 bool useSubtract =
1449 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1450 if (useSubtract)
1451 Step = SE.getNegativeSCEV(Step);
1452 Value *StepV;
1453 {
1454 // Expand the step somewhere that dominates the loop header.
1455 SCEVInsertPointGuard Guard(Builder, this);
1456 StepV = expandCodeForImpl(
1457 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1458 }
1459 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1460 }
1461 }
1462
1463 // We have decided to reuse an induction variable of a dominating loop. Apply
1464 // truncation and/or inversion of the step.
1465 if (TruncTy) {
1466 Type *ResTy = Result->getType();
1467 // Normalize the result type.
1468 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1469 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1470 // Truncate the result.
1471 if (TruncTy != Result->getType())
1472 Result = Builder.CreateTrunc(Result, TruncTy);
1473
1474 // Invert the result.
1475 if (InvertStep)
1476 Result = Builder.CreateSub(
1477 expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result);
1478 }
1479
1480 // Re-apply any non-loop-dominating scale.
1481 if (PostLoopScale) {
1482 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1483 Result = InsertNoopCastOfTo(Result, IntTy);
1484 Result = Builder.CreateMul(Result,
1485 expandCodeForImpl(PostLoopScale, IntTy, false));
1486 }
1487
1488 // Re-apply any non-loop-dominating offset.
1489 if (PostLoopOffset) {
1490 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1491 if (Result->getType()->isIntegerTy()) {
1492 Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false);
1493 Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1494 } else {
1495 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1496 }
1497 } else {
1498 Result = InsertNoopCastOfTo(Result, IntTy);
1499 Result = Builder.CreateAdd(
1500 Result, expandCodeForImpl(PostLoopOffset, IntTy, false));
1501 }
1502 }
1503
1504 return Result;
1505 }
1506
visitAddRecExpr(const SCEVAddRecExpr * S)1507 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1508 // In canonical mode we compute the addrec as an expression of a canonical IV
1509 // using evaluateAtIteration and expand the resulting SCEV expression. This
1510 // way we avoid introducing new IVs to carry on the comutation of the addrec
1511 // throughout the loop.
1512 //
1513 // For nested addrecs evaluateAtIteration might need a canonical IV of a
1514 // type wider than the addrec itself. Emitting a canonical IV of the
1515 // proper type might produce non-legal types, for example expanding an i64
1516 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1517 // back to non-canonical mode for nested addrecs.
1518 if (!CanonicalMode || (S->getNumOperands() > 2))
1519 return expandAddRecExprLiterally(S);
1520
1521 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1522 const Loop *L = S->getLoop();
1523
1524 // First check for an existing canonical IV in a suitable type.
1525 PHINode *CanonicalIV = nullptr;
1526 if (PHINode *PN = L->getCanonicalInductionVariable())
1527 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1528 CanonicalIV = PN;
1529
1530 // Rewrite an AddRec in terms of the canonical induction variable, if
1531 // its type is more narrow.
1532 if (CanonicalIV &&
1533 SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1534 !S->getType()->isPointerTy()) {
1535 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1536 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1537 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1538 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1539 S->getNoWrapFlags(SCEV::FlagNW)));
1540 BasicBlock::iterator NewInsertPt =
1541 findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1542 V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1543 &*NewInsertPt, false);
1544 return V;
1545 }
1546
1547 // {X,+,F} --> X + {0,+,F}
1548 if (!S->getStart()->isZero()) {
1549 if (PointerType *PTy = dyn_cast<PointerType>(S->getType())) {
1550 Value *StartV = expand(SE.getPointerBase(S));
1551 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1552 return expandAddToGEP(SE.removePointerBase(S), PTy, Ty, StartV);
1553 }
1554
1555 SmallVector<const SCEV *, 4> NewOps(S->operands());
1556 NewOps[0] = SE.getConstant(Ty, 0);
1557 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1558 S->getNoWrapFlags(SCEV::FlagNW));
1559
1560 // Just do a normal add. Pre-expand the operands to suppress folding.
1561 //
1562 // The LHS and RHS values are factored out of the expand call to make the
1563 // output independent of the argument evaluation order.
1564 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1565 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1566 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1567 }
1568
1569 // If we don't yet have a canonical IV, create one.
1570 if (!CanonicalIV) {
1571 // Create and insert the PHI node for the induction variable in the
1572 // specified loop.
1573 BasicBlock *Header = L->getHeader();
1574 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1575 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1576 &Header->front());
1577 rememberInstruction(CanonicalIV);
1578
1579 SmallSet<BasicBlock *, 4> PredSeen;
1580 Constant *One = ConstantInt::get(Ty, 1);
1581 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1582 BasicBlock *HP = *HPI;
1583 if (!PredSeen.insert(HP).second) {
1584 // There must be an incoming value for each predecessor, even the
1585 // duplicates!
1586 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1587 continue;
1588 }
1589
1590 if (L->contains(HP)) {
1591 // Insert a unit add instruction right before the terminator
1592 // corresponding to the back-edge.
1593 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1594 "indvar.next",
1595 HP->getTerminator());
1596 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1597 rememberInstruction(Add);
1598 CanonicalIV->addIncoming(Add, HP);
1599 } else {
1600 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1601 }
1602 }
1603 }
1604
1605 // {0,+,1} --> Insert a canonical induction variable into the loop!
1606 if (S->isAffine() && S->getOperand(1)->isOne()) {
1607 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1608 "IVs with types different from the canonical IV should "
1609 "already have been handled!");
1610 return CanonicalIV;
1611 }
1612
1613 // {0,+,F} --> {0,+,1} * F
1614
1615 // If this is a simple linear addrec, emit it now as a special case.
1616 if (S->isAffine()) // {0,+,F} --> i*F
1617 return
1618 expand(SE.getTruncateOrNoop(
1619 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1620 SE.getNoopOrAnyExtend(S->getOperand(1),
1621 CanonicalIV->getType())),
1622 Ty));
1623
1624 // If this is a chain of recurrences, turn it into a closed form, using the
1625 // folders, then expandCodeFor the closed form. This allows the folders to
1626 // simplify the expression without having to build a bunch of special code
1627 // into this folder.
1628 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1629
1630 // Promote S up to the canonical IV type, if the cast is foldable.
1631 const SCEV *NewS = S;
1632 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1633 if (isa<SCEVAddRecExpr>(Ext))
1634 NewS = Ext;
1635
1636 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1637 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1638
1639 // Truncate the result down to the original type, if needed.
1640 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1641 return expand(T);
1642 }
1643
visitPtrToIntExpr(const SCEVPtrToIntExpr * S)1644 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1645 Value *V =
1646 expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false);
1647 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1648 GetOptimalInsertionPointForCastOf(V));
1649 }
1650
visitTruncateExpr(const SCEVTruncateExpr * S)1651 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1652 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1653 Value *V = expandCodeForImpl(
1654 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1655 false);
1656 return Builder.CreateTrunc(V, Ty);
1657 }
1658
visitZeroExtendExpr(const SCEVZeroExtendExpr * S)1659 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1660 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1661 Value *V = expandCodeForImpl(
1662 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1663 false);
1664 return Builder.CreateZExt(V, Ty);
1665 }
1666
visitSignExtendExpr(const SCEVSignExtendExpr * S)1667 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1668 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1669 Value *V = expandCodeForImpl(
1670 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1671 false);
1672 return Builder.CreateSExt(V, Ty);
1673 }
1674
visitSMaxExpr(const SCEVSMaxExpr * S)1675 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1676 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1677 Type *Ty = LHS->getType();
1678 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1679 // In the case of mixed integer and pointer types, do the
1680 // rest of the comparisons as integer.
1681 Type *OpTy = S->getOperand(i)->getType();
1682 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1683 Ty = SE.getEffectiveSCEVType(Ty);
1684 LHS = InsertNoopCastOfTo(LHS, Ty);
1685 }
1686 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1687 Value *Sel;
1688 if (Ty->isIntegerTy())
1689 Sel = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, {LHS, RHS},
1690 /*FMFSource=*/nullptr, "smax");
1691 else {
1692 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1693 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1694 }
1695 LHS = Sel;
1696 }
1697 // In the case of mixed integer and pointer types, cast the
1698 // final result back to the pointer type.
1699 if (LHS->getType() != S->getType())
1700 LHS = InsertNoopCastOfTo(LHS, S->getType());
1701 return LHS;
1702 }
1703
visitUMaxExpr(const SCEVUMaxExpr * S)1704 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1705 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1706 Type *Ty = LHS->getType();
1707 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1708 // In the case of mixed integer and pointer types, do the
1709 // rest of the comparisons as integer.
1710 Type *OpTy = S->getOperand(i)->getType();
1711 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1712 Ty = SE.getEffectiveSCEVType(Ty);
1713 LHS = InsertNoopCastOfTo(LHS, Ty);
1714 }
1715 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1716 Value *Sel;
1717 if (Ty->isIntegerTy())
1718 Sel = Builder.CreateIntrinsic(Intrinsic::umax, {Ty}, {LHS, RHS},
1719 /*FMFSource=*/nullptr, "umax");
1720 else {
1721 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1722 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1723 }
1724 LHS = Sel;
1725 }
1726 // In the case of mixed integer and pointer types, cast the
1727 // final result back to the pointer type.
1728 if (LHS->getType() != S->getType())
1729 LHS = InsertNoopCastOfTo(LHS, S->getType());
1730 return LHS;
1731 }
1732
visitSMinExpr(const SCEVSMinExpr * S)1733 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1734 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1735 Type *Ty = LHS->getType();
1736 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1737 // In the case of mixed integer and pointer types, do the
1738 // rest of the comparisons as integer.
1739 Type *OpTy = S->getOperand(i)->getType();
1740 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1741 Ty = SE.getEffectiveSCEVType(Ty);
1742 LHS = InsertNoopCastOfTo(LHS, Ty);
1743 }
1744 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1745 Value *Sel;
1746 if (Ty->isIntegerTy())
1747 Sel = Builder.CreateIntrinsic(Intrinsic::smin, {Ty}, {LHS, RHS},
1748 /*FMFSource=*/nullptr, "smin");
1749 else {
1750 Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
1751 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
1752 }
1753 LHS = Sel;
1754 }
1755 // In the case of mixed integer and pointer types, cast the
1756 // final result back to the pointer type.
1757 if (LHS->getType() != S->getType())
1758 LHS = InsertNoopCastOfTo(LHS, S->getType());
1759 return LHS;
1760 }
1761
visitUMinExpr(const SCEVUMinExpr * S)1762 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1763 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1764 Type *Ty = LHS->getType();
1765 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1766 // In the case of mixed integer and pointer types, do the
1767 // rest of the comparisons as integer.
1768 Type *OpTy = S->getOperand(i)->getType();
1769 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1770 Ty = SE.getEffectiveSCEVType(Ty);
1771 LHS = InsertNoopCastOfTo(LHS, Ty);
1772 }
1773 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1774 Value *Sel;
1775 if (Ty->isIntegerTy())
1776 Sel = Builder.CreateIntrinsic(Intrinsic::umin, {Ty}, {LHS, RHS},
1777 /*FMFSource=*/nullptr, "umin");
1778 else {
1779 Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
1780 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
1781 }
1782 LHS = Sel;
1783 }
1784 // In the case of mixed integer and pointer types, cast the
1785 // final result back to the pointer type.
1786 if (LHS->getType() != S->getType())
1787 LHS = InsertNoopCastOfTo(LHS, S->getType());
1788 return LHS;
1789 }
1790
expandCodeForImpl(const SCEV * SH,Type * Ty,Instruction * IP,bool Root)1791 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1792 Instruction *IP, bool Root) {
1793 setInsertPoint(IP);
1794 Value *V = expandCodeForImpl(SH, Ty, Root);
1795 return V;
1796 }
1797
expandCodeForImpl(const SCEV * SH,Type * Ty,bool Root)1798 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
1799 // Expand the code for this SCEV.
1800 Value *V = expand(SH);
1801
1802 if (PreserveLCSSA) {
1803 if (auto *Inst = dyn_cast<Instruction>(V)) {
1804 // Create a temporary instruction to at the current insertion point, so we
1805 // can hand it off to the helper to create LCSSA PHIs if required for the
1806 // new use.
1807 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
1808 // would accept a insertion point and return an LCSSA phi for that
1809 // insertion point, so there is no need to insert & remove the temporary
1810 // instruction.
1811 Instruction *Tmp;
1812 if (Inst->getType()->isIntegerTy())
1813 Tmp =
1814 cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user"));
1815 else {
1816 assert(Inst->getType()->isPointerTy());
1817 Tmp = cast<Instruction>(Builder.CreatePtrToInt(
1818 Inst, Type::getInt32Ty(Inst->getContext()), "tmp.lcssa.user"));
1819 }
1820 V = fixupLCSSAFormFor(Tmp, 0);
1821
1822 // Clean up temporary instruction.
1823 InsertedValues.erase(Tmp);
1824 InsertedPostIncValues.erase(Tmp);
1825 Tmp->eraseFromParent();
1826 }
1827 }
1828
1829 InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V;
1830 if (Ty) {
1831 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1832 "non-trivial casts should be done with the SCEVs directly!");
1833 V = InsertNoopCastOfTo(V, Ty);
1834 }
1835 return V;
1836 }
1837
1838 ScalarEvolution::ValueOffsetPair
FindValueInExprValueMap(const SCEV * S,const Instruction * InsertPt)1839 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1840 const Instruction *InsertPt) {
1841 auto *Set = SE.getSCEVValues(S);
1842 // If the expansion is not in CanonicalMode, and the SCEV contains any
1843 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1844 if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1845 // If S is scConstant, it may be worse to reuse an existing Value.
1846 if (S->getSCEVType() != scConstant && Set) {
1847 // Choose a Value from the set which dominates the insertPt.
1848 // insertPt should be inside the Value's parent loop so as not to break
1849 // the LCSSA form.
1850 for (auto const &VOPair : *Set) {
1851 Value *V = VOPair.first;
1852 ConstantInt *Offset = VOPair.second;
1853 Instruction *EntInst = nullptr;
1854 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1855 S->getType() == V->getType() &&
1856 EntInst->getFunction() == InsertPt->getFunction() &&
1857 SE.DT.dominates(EntInst, InsertPt) &&
1858 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1859 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1860 return {V, Offset};
1861 }
1862 }
1863 }
1864 return {nullptr, nullptr};
1865 }
1866
1867 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1868 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1869 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1870 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1871 // the expansion will try to reuse Value from ExprValueMap, and only when it
1872 // fails, expand the SCEV literally.
expand(const SCEV * S)1873 Value *SCEVExpander::expand(const SCEV *S) {
1874 // Compute an insertion point for this SCEV object. Hoist the instructions
1875 // as far out in the loop nest as possible.
1876 Instruction *InsertPt = &*Builder.GetInsertPoint();
1877
1878 // We can move insertion point only if there is no div or rem operations
1879 // otherwise we are risky to move it over the check for zero denominator.
1880 auto SafeToHoist = [](const SCEV *S) {
1881 return !SCEVExprContains(S, [](const SCEV *S) {
1882 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1883 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1884 // Division by non-zero constants can be hoisted.
1885 return SC->getValue()->isZero();
1886 // All other divisions should not be moved as they may be
1887 // divisions by zero and should be kept within the
1888 // conditions of the surrounding loops that guard their
1889 // execution (see PR35406).
1890 return true;
1891 }
1892 return false;
1893 });
1894 };
1895 if (SafeToHoist(S)) {
1896 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1897 L = L->getParentLoop()) {
1898 if (SE.isLoopInvariant(S, L)) {
1899 if (!L) break;
1900 if (BasicBlock *Preheader = L->getLoopPreheader())
1901 InsertPt = Preheader->getTerminator();
1902 else
1903 // LSR sets the insertion point for AddRec start/step values to the
1904 // block start to simplify value reuse, even though it's an invalid
1905 // position. SCEVExpander must correct for this in all cases.
1906 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1907 } else {
1908 // If the SCEV is computable at this level, insert it into the header
1909 // after the PHIs (and after any other instructions that we've inserted
1910 // there) so that it is guaranteed to dominate any user inside the loop.
1911 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1912 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1913
1914 while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1915 (isInsertedInstruction(InsertPt) ||
1916 isa<DbgInfoIntrinsic>(InsertPt))) {
1917 InsertPt = &*std::next(InsertPt->getIterator());
1918 }
1919 break;
1920 }
1921 }
1922 }
1923
1924 // Check to see if we already expanded this here.
1925 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1926 if (I != InsertedExpressions.end())
1927 return I->second;
1928
1929 SCEVInsertPointGuard Guard(Builder, this);
1930 Builder.SetInsertPoint(InsertPt);
1931
1932 // Expand the expression into instructions.
1933 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1934 Value *V = VO.first;
1935
1936 if (!V)
1937 V = visit(S);
1938 else if (VO.second) {
1939 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1940 Type *Ety = Vty->getPointerElementType();
1941 int64_t Offset = VO.second->getSExtValue();
1942 int64_t ESize = SE.getTypeSizeInBits(Ety);
1943 if ((Offset * 8) % ESize == 0) {
1944 ConstantInt *Idx =
1945 ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
1946 V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
1947 } else {
1948 ConstantInt *Idx =
1949 ConstantInt::getSigned(VO.second->getType(), -Offset);
1950 unsigned AS = Vty->getAddressSpace();
1951 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
1952 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
1953 "uglygep");
1954 V = Builder.CreateBitCast(V, Vty);
1955 }
1956 } else {
1957 V = Builder.CreateSub(V, VO.second);
1958 }
1959 }
1960 // Remember the expanded value for this SCEV at this location.
1961 //
1962 // This is independent of PostIncLoops. The mapped value simply materializes
1963 // the expression at this insertion point. If the mapped value happened to be
1964 // a postinc expansion, it could be reused by a non-postinc user, but only if
1965 // its insertion point was already at the head of the loop.
1966 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1967 return V;
1968 }
1969
rememberInstruction(Value * I)1970 void SCEVExpander::rememberInstruction(Value *I) {
1971 auto DoInsert = [this](Value *V) {
1972 if (!PostIncLoops.empty())
1973 InsertedPostIncValues.insert(V);
1974 else
1975 InsertedValues.insert(V);
1976 };
1977 DoInsert(I);
1978
1979 if (!PreserveLCSSA)
1980 return;
1981
1982 if (auto *Inst = dyn_cast<Instruction>(I)) {
1983 // A new instruction has been added, which might introduce new uses outside
1984 // a defining loop. Fix LCSSA from for each operand of the new instruction,
1985 // if required.
1986 for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd;
1987 OpIdx++)
1988 fixupLCSSAFormFor(Inst, OpIdx);
1989 }
1990 }
1991
1992 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1993 /// replace them with their most canonical representative. Return the number of
1994 /// phis eliminated.
1995 ///
1996 /// This does not depend on any SCEVExpander state but should be used in
1997 /// the same context that SCEVExpander is used.
1998 unsigned
replaceCongruentIVs(Loop * L,const DominatorTree * DT,SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetTransformInfo * TTI)1999 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
2000 SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2001 const TargetTransformInfo *TTI) {
2002 // Find integer phis in order of increasing width.
2003 SmallVector<PHINode*, 8> Phis;
2004 for (PHINode &PN : L->getHeader()->phis())
2005 Phis.push_back(&PN);
2006
2007 if (TTI)
2008 llvm::sort(Phis, [](Value *LHS, Value *RHS) {
2009 // Put pointers at the back and make sure pointer < pointer = false.
2010 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
2011 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
2012 return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() <
2013 LHS->getType()->getPrimitiveSizeInBits().getFixedSize();
2014 });
2015
2016 unsigned NumElim = 0;
2017 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
2018 // Process phis from wide to narrow. Map wide phis to their truncation
2019 // so narrow phis can reuse them.
2020 for (PHINode *Phi : Phis) {
2021 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
2022 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
2023 return V;
2024 if (!SE.isSCEVable(PN->getType()))
2025 return nullptr;
2026 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
2027 if (!Const)
2028 return nullptr;
2029 return Const->getValue();
2030 };
2031
2032 // Fold constant phis. They may be congruent to other constant phis and
2033 // would confuse the logic below that expects proper IVs.
2034 if (Value *V = SimplifyPHINode(Phi)) {
2035 if (V->getType() != Phi->getType())
2036 continue;
2037 Phi->replaceAllUsesWith(V);
2038 DeadInsts.emplace_back(Phi);
2039 ++NumElim;
2040 SCEV_DEBUG_WITH_TYPE(DebugType,
2041 dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
2042 << '\n');
2043 continue;
2044 }
2045
2046 if (!SE.isSCEVable(Phi->getType()))
2047 continue;
2048
2049 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
2050 if (!OrigPhiRef) {
2051 OrigPhiRef = Phi;
2052 if (Phi->getType()->isIntegerTy() && TTI &&
2053 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
2054 // This phi can be freely truncated to the narrowest phi type. Map the
2055 // truncated expression to it so it will be reused for narrow types.
2056 const SCEV *TruncExpr =
2057 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
2058 ExprToIVMap[TruncExpr] = Phi;
2059 }
2060 continue;
2061 }
2062
2063 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
2064 // sense.
2065 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
2066 continue;
2067
2068 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2069 Instruction *OrigInc = dyn_cast<Instruction>(
2070 OrigPhiRef->getIncomingValueForBlock(LatchBlock));
2071 Instruction *IsomorphicInc =
2072 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
2073
2074 if (OrigInc && IsomorphicInc) {
2075 // If this phi has the same width but is more canonical, replace the
2076 // original with it. As part of the "more canonical" determination,
2077 // respect a prior decision to use an IV chain.
2078 if (OrigPhiRef->getType() == Phi->getType() &&
2079 !(ChainedPhis.count(Phi) ||
2080 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
2081 (ChainedPhis.count(Phi) ||
2082 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2083 std::swap(OrigPhiRef, Phi);
2084 std::swap(OrigInc, IsomorphicInc);
2085 }
2086 // Replacing the congruent phi is sufficient because acyclic
2087 // redundancy elimination, CSE/GVN, should handle the
2088 // rest. However, once SCEV proves that a phi is congruent,
2089 // it's often the head of an IV user cycle that is isomorphic
2090 // with the original phi. It's worth eagerly cleaning up the
2091 // common case of a single IV increment so that DeleteDeadPHIs
2092 // can remove cycles that had postinc uses.
2093 const SCEV *TruncExpr =
2094 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2095 if (OrigInc != IsomorphicInc &&
2096 TruncExpr == SE.getSCEV(IsomorphicInc) &&
2097 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2098 hoistIVInc(OrigInc, IsomorphicInc)) {
2099 SCEV_DEBUG_WITH_TYPE(
2100 DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2101 << *IsomorphicInc << '\n');
2102 Value *NewInc = OrigInc;
2103 if (OrigInc->getType() != IsomorphicInc->getType()) {
2104 Instruction *IP = nullptr;
2105 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2106 IP = &*PN->getParent()->getFirstInsertionPt();
2107 else
2108 IP = OrigInc->getNextNode();
2109
2110 IRBuilder<> Builder(IP);
2111 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2112 NewInc = Builder.CreateTruncOrBitCast(
2113 OrigInc, IsomorphicInc->getType(), IVName);
2114 }
2115 IsomorphicInc->replaceAllUsesWith(NewInc);
2116 DeadInsts.emplace_back(IsomorphicInc);
2117 }
2118 }
2119 }
2120 SCEV_DEBUG_WITH_TYPE(DebugType,
2121 dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
2122 << '\n');
2123 SCEV_DEBUG_WITH_TYPE(
2124 DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
2125 ++NumElim;
2126 Value *NewIV = OrigPhiRef;
2127 if (OrigPhiRef->getType() != Phi->getType()) {
2128 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2129 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2130 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2131 }
2132 Phi->replaceAllUsesWith(NewIV);
2133 DeadInsts.emplace_back(Phi);
2134 }
2135 return NumElim;
2136 }
2137
2138 Optional<ScalarEvolution::ValueOffsetPair>
getRelatedExistingExpansion(const SCEV * S,const Instruction * At,Loop * L)2139 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
2140 Loop *L) {
2141 using namespace llvm::PatternMatch;
2142
2143 SmallVector<BasicBlock *, 4> ExitingBlocks;
2144 L->getExitingBlocks(ExitingBlocks);
2145
2146 // Look for suitable value in simple conditions at the loop exits.
2147 for (BasicBlock *BB : ExitingBlocks) {
2148 ICmpInst::Predicate Pred;
2149 Instruction *LHS, *RHS;
2150
2151 if (!match(BB->getTerminator(),
2152 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
2153 m_BasicBlock(), m_BasicBlock())))
2154 continue;
2155
2156 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2157 return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
2158
2159 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2160 return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2161 }
2162
2163 // Use expand's logic which is used for reusing a previous Value in
2164 // ExprValueMap.
2165 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2166 if (VO.first)
2167 return VO;
2168
2169 // There is potential to make this significantly smarter, but this simple
2170 // heuristic already gets some interesting cases.
2171
2172 // Can not find suitable value.
2173 return None;
2174 }
2175
costAndCollectOperands(const SCEVOperand & WorkItem,const TargetTransformInfo & TTI,TargetTransformInfo::TargetCostKind CostKind,SmallVectorImpl<SCEVOperand> & Worklist)2176 template<typename T> static InstructionCost costAndCollectOperands(
2177 const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
2178 TargetTransformInfo::TargetCostKind CostKind,
2179 SmallVectorImpl<SCEVOperand> &Worklist) {
2180
2181 const T *S = cast<T>(WorkItem.S);
2182 InstructionCost Cost = 0;
2183 // Object to help map SCEV operands to expanded IR instructions.
2184 struct OperationIndices {
2185 OperationIndices(unsigned Opc, size_t min, size_t max) :
2186 Opcode(Opc), MinIdx(min), MaxIdx(max) { }
2187 unsigned Opcode;
2188 size_t MinIdx;
2189 size_t MaxIdx;
2190 };
2191
2192 // Collect the operations of all the instructions that will be needed to
2193 // expand the SCEVExpr. This is so that when we come to cost the operands,
2194 // we know what the generated user(s) will be.
2195 SmallVector<OperationIndices, 2> Operations;
2196
2197 auto CastCost = [&](unsigned Opcode) -> InstructionCost {
2198 Operations.emplace_back(Opcode, 0, 0);
2199 return TTI.getCastInstrCost(Opcode, S->getType(),
2200 S->getOperand(0)->getType(),
2201 TTI::CastContextHint::None, CostKind);
2202 };
2203
2204 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
2205 unsigned MinIdx = 0,
2206 unsigned MaxIdx = 1) -> InstructionCost {
2207 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2208 return NumRequired *
2209 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
2210 };
2211
2212 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
2213 unsigned MaxIdx) -> InstructionCost {
2214 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2215 Type *OpType = S->getOperand(0)->getType();
2216 return NumRequired * TTI.getCmpSelInstrCost(
2217 Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2218 CmpInst::BAD_ICMP_PREDICATE, CostKind);
2219 };
2220
2221 switch (S->getSCEVType()) {
2222 case scCouldNotCompute:
2223 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2224 case scUnknown:
2225 case scConstant:
2226 return 0;
2227 case scPtrToInt:
2228 Cost = CastCost(Instruction::PtrToInt);
2229 break;
2230 case scTruncate:
2231 Cost = CastCost(Instruction::Trunc);
2232 break;
2233 case scZeroExtend:
2234 Cost = CastCost(Instruction::ZExt);
2235 break;
2236 case scSignExtend:
2237 Cost = CastCost(Instruction::SExt);
2238 break;
2239 case scUDivExpr: {
2240 unsigned Opcode = Instruction::UDiv;
2241 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2242 if (SC->getAPInt().isPowerOf2())
2243 Opcode = Instruction::LShr;
2244 Cost = ArithCost(Opcode, 1);
2245 break;
2246 }
2247 case scAddExpr:
2248 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2249 break;
2250 case scMulExpr:
2251 // TODO: this is a very pessimistic cost modelling for Mul,
2252 // because of Bin Pow algorithm actually used by the expander,
2253 // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2254 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2255 break;
2256 case scSMaxExpr:
2257 case scUMaxExpr:
2258 case scSMinExpr:
2259 case scUMinExpr: {
2260 // FIXME: should this ask the cost for Intrinsic's?
2261 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2262 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2263 break;
2264 }
2265 case scAddRecExpr: {
2266 // In this polynominal, we may have some zero operands, and we shouldn't
2267 // really charge for those. So how many non-zero coeffients are there?
2268 int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
2269 return !Op->isZero();
2270 });
2271
2272 assert(NumTerms >= 1 && "Polynominal should have at least one term.");
2273 assert(!(*std::prev(S->operands().end()))->isZero() &&
2274 "Last operand should not be zero");
2275
2276 // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
2277 int NumNonZeroDegreeNonOneTerms =
2278 llvm::count_if(S->operands(), [](const SCEV *Op) {
2279 auto *SConst = dyn_cast<SCEVConstant>(Op);
2280 return !SConst || SConst->getAPInt().ugt(1);
2281 });
2282
2283 // Much like with normal add expr, the polynominal will require
2284 // one less addition than the number of it's terms.
2285 InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1,
2286 /*MinIdx*/ 1, /*MaxIdx*/ 1);
2287 // Here, *each* one of those will require a multiplication.
2288 InstructionCost MulCost =
2289 ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2290 Cost = AddCost + MulCost;
2291
2292 // What is the degree of this polynominal?
2293 int PolyDegree = S->getNumOperands() - 1;
2294 assert(PolyDegree >= 1 && "Should be at least affine.");
2295
2296 // The final term will be:
2297 // Op_{PolyDegree} * x ^ {PolyDegree}
2298 // Where x ^ {PolyDegree} will again require PolyDegree-1 mul operations.
2299 // Note that x ^ {PolyDegree} = x * x ^ {PolyDegree-1} so charging for
2300 // x ^ {PolyDegree} will give us x ^ {2} .. x ^ {PolyDegree-1} for free.
2301 // FIXME: this is conservatively correct, but might be overly pessimistic.
2302 Cost += MulCost * (PolyDegree - 1);
2303 break;
2304 }
2305 }
2306
2307 for (auto &CostOp : Operations) {
2308 for (auto SCEVOp : enumerate(S->operands())) {
2309 // Clamp the index to account for multiple IR operations being chained.
2310 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2311 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2312 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2313 }
2314 }
2315 return Cost;
2316 }
2317
isHighCostExpansionHelper(const SCEVOperand & WorkItem,Loop * L,const Instruction & At,InstructionCost & Cost,unsigned Budget,const TargetTransformInfo & TTI,SmallPtrSetImpl<const SCEV * > & Processed,SmallVectorImpl<SCEVOperand> & Worklist)2318 bool SCEVExpander::isHighCostExpansionHelper(
2319 const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2320 InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2321 SmallPtrSetImpl<const SCEV *> &Processed,
2322 SmallVectorImpl<SCEVOperand> &Worklist) {
2323 if (Cost > Budget)
2324 return true; // Already run out of budget, give up.
2325
2326 const SCEV *S = WorkItem.S;
2327 // Was the cost of expansion of this expression already accounted for?
2328 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2329 return false; // We have already accounted for this expression.
2330
2331 // If we can find an existing value for this scev available at the point "At"
2332 // then consider the expression cheap.
2333 if (getRelatedExistingExpansion(S, &At, L))
2334 return false; // Consider the expression to be free.
2335
2336 TargetTransformInfo::TargetCostKind CostKind =
2337 L->getHeader()->getParent()->hasMinSize()
2338 ? TargetTransformInfo::TCK_CodeSize
2339 : TargetTransformInfo::TCK_RecipThroughput;
2340
2341 switch (S->getSCEVType()) {
2342 case scCouldNotCompute:
2343 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2344 case scUnknown:
2345 // Assume to be zero-cost.
2346 return false;
2347 case scConstant: {
2348 // Only evalulate the costs of constants when optimizing for size.
2349 if (CostKind != TargetTransformInfo::TCK_CodeSize)
2350 return 0;
2351 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2352 Type *Ty = S->getType();
2353 Cost += TTI.getIntImmCostInst(
2354 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2355 return Cost > Budget;
2356 }
2357 case scTruncate:
2358 case scPtrToInt:
2359 case scZeroExtend:
2360 case scSignExtend: {
2361 Cost +=
2362 costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
2363 return false; // Will answer upon next entry into this function.
2364 }
2365 case scUDivExpr: {
2366 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2367 // HowManyLessThans produced to compute a precise expression, rather than a
2368 // UDiv from the user's code. If we can't find a UDiv in the code with some
2369 // simple searching, we need to account for it's cost.
2370
2371 // At the beginning of this function we already tried to find existing
2372 // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2373 // pattern involving division. This is just a simple search heuristic.
2374 if (getRelatedExistingExpansion(
2375 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2376 return false; // Consider it to be free.
2377
2378 Cost +=
2379 costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2380 return false; // Will answer upon next entry into this function.
2381 }
2382 case scAddExpr:
2383 case scMulExpr:
2384 case scUMaxExpr:
2385 case scSMaxExpr:
2386 case scUMinExpr:
2387 case scSMinExpr: {
2388 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2389 "Nary expr should have more than 1 operand.");
2390 // The simple nary expr will require one less op (or pair of ops)
2391 // than the number of it's terms.
2392 Cost +=
2393 costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2394 return Cost > Budget;
2395 }
2396 case scAddRecExpr: {
2397 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2398 "Polynomial should be at least linear");
2399 Cost += costAndCollectOperands<SCEVAddRecExpr>(
2400 WorkItem, TTI, CostKind, Worklist);
2401 return Cost > Budget;
2402 }
2403 }
2404 llvm_unreachable("Unknown SCEV kind!");
2405 }
2406
expandCodeForPredicate(const SCEVPredicate * Pred,Instruction * IP)2407 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2408 Instruction *IP) {
2409 assert(IP);
2410 switch (Pred->getKind()) {
2411 case SCEVPredicate::P_Union:
2412 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2413 case SCEVPredicate::P_Equal:
2414 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2415 case SCEVPredicate::P_Wrap: {
2416 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2417 return expandWrapPredicate(AddRecPred, IP);
2418 }
2419 }
2420 llvm_unreachable("Unknown SCEV predicate type");
2421 }
2422
expandEqualPredicate(const SCEVEqualPredicate * Pred,Instruction * IP)2423 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2424 Instruction *IP) {
2425 Value *Expr0 =
2426 expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
2427 Value *Expr1 =
2428 expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
2429
2430 Builder.SetInsertPoint(IP);
2431 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2432 return I;
2433 }
2434
generateOverflowCheck(const SCEVAddRecExpr * AR,Instruction * Loc,bool Signed)2435 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2436 Instruction *Loc, bool Signed) {
2437 assert(AR->isAffine() && "Cannot generate RT check for "
2438 "non-affine expression");
2439
2440 SCEVUnionPredicate Pred;
2441 const SCEV *ExitCount =
2442 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2443
2444 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2445
2446 const SCEV *Step = AR->getStepRecurrence(SE);
2447 const SCEV *Start = AR->getStart();
2448
2449 Type *ARTy = AR->getType();
2450 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2451 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2452
2453 // The expression {Start,+,Step} has nusw/nssw if
2454 // Step < 0, Start - |Step| * Backedge <= Start
2455 // Step >= 0, Start + |Step| * Backedge > Start
2456 // and |Step| * Backedge doesn't unsigned overflow.
2457
2458 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2459 Builder.SetInsertPoint(Loc);
2460 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false);
2461
2462 IntegerType *Ty =
2463 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2464
2465 Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false);
2466 Value *NegStepValue =
2467 expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false);
2468 Value *StartValue = expandCodeForImpl(Start, ARTy, Loc, false);
2469
2470 ConstantInt *Zero =
2471 ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2472
2473 Builder.SetInsertPoint(Loc);
2474 // Compute |Step|
2475 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2476 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2477
2478 // Get the backedge taken count and truncate or extended to the AR type.
2479 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2480 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2481 Intrinsic::umul_with_overflow, Ty);
2482
2483 // Compute |Step| * Backedge
2484 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2485 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2486 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2487
2488 // Compute:
2489 // Start + |Step| * Backedge < Start
2490 // Start - |Step| * Backedge > Start
2491 Value *Add = nullptr, *Sub = nullptr;
2492 if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
2493 StartValue = InsertNoopCastOfTo(
2494 StartValue, Builder.getInt8PtrTy(ARPtrTy->getAddressSpace()));
2495 Value *NegMulV = Builder.CreateNeg(MulV);
2496 Add = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, MulV);
2497 Sub = Builder.CreateGEP(Builder.getInt8Ty(), StartValue, NegMulV);
2498 } else {
2499 Add = Builder.CreateAdd(StartValue, MulV);
2500 Sub = Builder.CreateSub(StartValue, MulV);
2501 }
2502
2503 Value *EndCompareGT = Builder.CreateICmp(
2504 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2505
2506 Value *EndCompareLT = Builder.CreateICmp(
2507 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2508
2509 // Select the answer based on the sign of Step.
2510 Value *EndCheck =
2511 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2512
2513 // If the backedge taken count type is larger than the AR type,
2514 // check that we don't drop any bits by truncating it. If we are
2515 // dropping bits, then we have overflow (unless the step is zero).
2516 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2517 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2518 auto *BackedgeCheck =
2519 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2520 ConstantInt::get(Loc->getContext(), MaxVal));
2521 BackedgeCheck = Builder.CreateAnd(
2522 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2523
2524 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2525 }
2526
2527 return Builder.CreateOr(EndCheck, OfMul);
2528 }
2529
expandWrapPredicate(const SCEVWrapPredicate * Pred,Instruction * IP)2530 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2531 Instruction *IP) {
2532 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2533 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2534
2535 // Add a check for NUSW
2536 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2537 NUSWCheck = generateOverflowCheck(A, IP, false);
2538
2539 // Add a check for NSSW
2540 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2541 NSSWCheck = generateOverflowCheck(A, IP, true);
2542
2543 if (NUSWCheck && NSSWCheck)
2544 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2545
2546 if (NUSWCheck)
2547 return NUSWCheck;
2548
2549 if (NSSWCheck)
2550 return NSSWCheck;
2551
2552 return ConstantInt::getFalse(IP->getContext());
2553 }
2554
expandUnionPredicate(const SCEVUnionPredicate * Union,Instruction * IP)2555 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2556 Instruction *IP) {
2557 auto *BoolType = IntegerType::get(IP->getContext(), 1);
2558 Value *Check = ConstantInt::getNullValue(BoolType);
2559
2560 // Loop over all checks in this set.
2561 for (auto Pred : Union->getPredicates()) {
2562 auto *NextCheck = expandCodeForPredicate(Pred, IP);
2563 Builder.SetInsertPoint(IP);
2564 Check = Builder.CreateOr(Check, NextCheck);
2565 }
2566
2567 return Check;
2568 }
2569
fixupLCSSAFormFor(Instruction * User,unsigned OpIdx)2570 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
2571 assert(PreserveLCSSA);
2572 SmallVector<Instruction *, 1> ToUpdate;
2573
2574 auto *OpV = User->getOperand(OpIdx);
2575 auto *OpI = dyn_cast<Instruction>(OpV);
2576 if (!OpI)
2577 return OpV;
2578
2579 Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent());
2580 Loop *UseLoop = SE.LI.getLoopFor(User->getParent());
2581 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2582 return OpV;
2583
2584 ToUpdate.push_back(OpI);
2585 SmallVector<PHINode *, 16> PHIsToRemove;
2586 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove);
2587 for (PHINode *PN : PHIsToRemove) {
2588 if (!PN->use_empty())
2589 continue;
2590 InsertedValues.erase(PN);
2591 InsertedPostIncValues.erase(PN);
2592 PN->eraseFromParent();
2593 }
2594
2595 return User->getOperand(OpIdx);
2596 }
2597
2598 namespace {
2599 // Search for a SCEV subexpression that is not safe to expand. Any expression
2600 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2601 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2602 // instruction, but the important thing is that we prove the denominator is
2603 // nonzero before expansion.
2604 //
2605 // IVUsers already checks that IV-derived expressions are safe. So this check is
2606 // only needed when the expression includes some subexpression that is not IV
2607 // derived.
2608 //
2609 // Currently, we only allow division by a nonzero constant here. If this is
2610 // inadequate, we could easily allow division by SCEVUnknown by using
2611 // ValueTracking to check isKnownNonZero().
2612 //
2613 // We cannot generally expand recurrences unless the step dominates the loop
2614 // header. The expander handles the special case of affine recurrences by
2615 // scaling the recurrence outside the loop, but this technique isn't generally
2616 // applicable. Expanding a nested recurrence outside a loop requires computing
2617 // binomial coefficients. This could be done, but the recurrence has to be in a
2618 // perfectly reduced form, which can't be guaranteed.
2619 struct SCEVFindUnsafe {
2620 ScalarEvolution &SE;
2621 bool IsUnsafe;
2622
SCEVFindUnsafe__anon03aa77d90f11::SCEVFindUnsafe2623 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2624
follow__anon03aa77d90f11::SCEVFindUnsafe2625 bool follow(const SCEV *S) {
2626 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2627 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2628 if (!SC || SC->getValue()->isZero()) {
2629 IsUnsafe = true;
2630 return false;
2631 }
2632 }
2633 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2634 const SCEV *Step = AR->getStepRecurrence(SE);
2635 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2636 IsUnsafe = true;
2637 return false;
2638 }
2639 }
2640 return true;
2641 }
isDone__anon03aa77d90f11::SCEVFindUnsafe2642 bool isDone() const { return IsUnsafe; }
2643 };
2644 }
2645
2646 namespace llvm {
isSafeToExpand(const SCEV * S,ScalarEvolution & SE)2647 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2648 SCEVFindUnsafe Search(SE);
2649 visitAll(S, Search);
2650 return !Search.IsUnsafe;
2651 }
2652
isSafeToExpandAt(const SCEV * S,const Instruction * InsertionPoint,ScalarEvolution & SE)2653 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2654 ScalarEvolution &SE) {
2655 if (!isSafeToExpand(S, SE))
2656 return false;
2657 // We have to prove that the expanded site of S dominates InsertionPoint.
2658 // This is easy when not in the same block, but hard when S is an instruction
2659 // to be expanded somewhere inside the same block as our insertion point.
2660 // What we really need here is something analogous to an OrderedBasicBlock,
2661 // but for the moment, we paper over the problem by handling two common and
2662 // cheap to check cases.
2663 if (SE.properlyDominates(S, InsertionPoint->getParent()))
2664 return true;
2665 if (SE.dominates(S, InsertionPoint->getParent())) {
2666 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2667 return true;
2668 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2669 if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2670 return true;
2671 }
2672 return false;
2673 }
2674
cleanup()2675 void SCEVExpanderCleaner::cleanup() {
2676 // Result is used, nothing to remove.
2677 if (ResultUsed)
2678 return;
2679
2680 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2681 #ifndef NDEBUG
2682 SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2683 InsertedInstructions.end());
2684 (void)InsertedSet;
2685 #endif
2686 // Remove sets with value handles.
2687 Expander.clear();
2688
2689 // Sort so that earlier instructions do not dominate later instructions.
2690 stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) {
2691 return DT.dominates(B, A);
2692 });
2693 // Remove all inserted instructions.
2694 for (Instruction *I : InsertedInstructions) {
2695
2696 #ifndef NDEBUG
2697 assert(all_of(I->users(),
2698 [&InsertedSet](Value *U) {
2699 return InsertedSet.contains(cast<Instruction>(U));
2700 }) &&
2701 "removed instruction should only be used by instructions inserted "
2702 "during expansion");
2703 #endif
2704 assert(!I->getType()->isVoidTy() &&
2705 "inserted instruction should have non-void types");
2706 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2707 I->eraseFromParent();
2708 }
2709 }
2710 }
2711