1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Transforms/Utils/LoopUtils.h"
31
32 using namespace llvm;
33
34 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
35 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
36 cl::desc("When performing SCEV expansion only if it is cheap to do, this "
37 "controls the budget that is considered cheap (default = 4)"));
38
39 using namespace PatternMatch;
40
41 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
42 /// reusing an existing cast if a suitable one (= dominating IP) exists, or
43 /// creating a new one.
ReuseOrCreateCast(Value * V,Type * Ty,Instruction::CastOps Op,BasicBlock::iterator IP)44 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
45 Instruction::CastOps Op,
46 BasicBlock::iterator IP) {
47 // This function must be called with the builder having a valid insertion
48 // point. It doesn't need to be the actual IP where the uses of the returned
49 // cast will be added, but it must dominate such IP.
50 // We use this precondition to produce a cast that will dominate all its
51 // uses. In particular, this is crucial for the case where the builder's
52 // insertion point *is* the point where we were asked to put the cast.
53 // Since we don't know the builder's insertion point is actually
54 // where the uses will be added (only that it dominates it), we are
55 // not allowed to move it.
56 BasicBlock::iterator BIP = Builder.GetInsertPoint();
57
58 Instruction *Ret = nullptr;
59
60 // Check to see if there is already a cast!
61 for (User *U : V->users()) {
62 if (U->getType() != Ty)
63 continue;
64 CastInst *CI = dyn_cast<CastInst>(U);
65 if (!CI || CI->getOpcode() != Op)
66 continue;
67
68 // Found a suitable cast that is at IP or comes before IP. Use it. Note that
69 // the cast must also properly dominate the Builder's insertion point.
70 if (IP->getParent() == CI->getParent() && &*BIP != CI &&
71 (&*IP == CI || CI->comesBefore(&*IP))) {
72 Ret = CI;
73 break;
74 }
75 }
76
77 // Create a new cast.
78 if (!Ret) {
79 Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
80 rememberInstruction(Ret);
81 }
82
83 // We assert at the end of the function since IP might point to an
84 // instruction with different dominance properties than a cast
85 // (an invoke for example) and not dominate BIP (but the cast does).
86 assert(SE.DT.dominates(Ret, &*BIP));
87
88 return Ret;
89 }
90
91 BasicBlock::iterator
findInsertPointAfter(Instruction * I,Instruction * MustDominate)92 SCEVExpander::findInsertPointAfter(Instruction *I, Instruction *MustDominate) {
93 BasicBlock::iterator IP = ++I->getIterator();
94 if (auto *II = dyn_cast<InvokeInst>(I))
95 IP = II->getNormalDest()->begin();
96
97 while (isa<PHINode>(IP))
98 ++IP;
99
100 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
101 ++IP;
102 } else if (isa<CatchSwitchInst>(IP)) {
103 IP = MustDominate->getParent()->getFirstInsertionPt();
104 } else {
105 assert(!IP->isEHPad() && "unexpected eh pad!");
106 }
107
108 // Adjust insert point to be after instructions inserted by the expander, so
109 // we can re-use already inserted instructions. Avoid skipping past the
110 // original \p MustDominate, in case it is an inserted instruction.
111 while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
112 ++IP;
113
114 return IP;
115 }
116
117 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
118 /// which must be possible with a noop cast, doing what we can to share
119 /// the casts.
InsertNoopCastOfTo(Value * V,Type * Ty)120 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
121 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
122 assert((Op == Instruction::BitCast ||
123 Op == Instruction::PtrToInt ||
124 Op == Instruction::IntToPtr) &&
125 "InsertNoopCastOfTo cannot perform non-noop casts!");
126 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
127 "InsertNoopCastOfTo cannot change sizes!");
128
129 // inttoptr only works for integral pointers. For non-integral pointers, we
130 // can create a GEP on i8* null with the integral value as index. Note that
131 // it is safe to use GEP of null instead of inttoptr here, because only
132 // expressions already based on a GEP of null should be converted to pointers
133 // during expansion.
134 if (Op == Instruction::IntToPtr) {
135 auto *PtrTy = cast<PointerType>(Ty);
136 if (DL.isNonIntegralPointerType(PtrTy)) {
137 auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace());
138 assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 &&
139 "alloc size of i8 must by 1 byte for the GEP to be correct");
140 auto *GEP = Builder.CreateGEP(
141 Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep");
142 return Builder.CreateBitCast(GEP, Ty);
143 }
144 }
145 // Short-circuit unnecessary bitcasts.
146 if (Op == Instruction::BitCast) {
147 if (V->getType() == Ty)
148 return V;
149 if (CastInst *CI = dyn_cast<CastInst>(V)) {
150 if (CI->getOperand(0)->getType() == Ty)
151 return CI->getOperand(0);
152 }
153 }
154 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
155 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
156 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
157 if (CastInst *CI = dyn_cast<CastInst>(V))
158 if ((CI->getOpcode() == Instruction::PtrToInt ||
159 CI->getOpcode() == Instruction::IntToPtr) &&
160 SE.getTypeSizeInBits(CI->getType()) ==
161 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
162 return CI->getOperand(0);
163 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
164 if ((CE->getOpcode() == Instruction::PtrToInt ||
165 CE->getOpcode() == Instruction::IntToPtr) &&
166 SE.getTypeSizeInBits(CE->getType()) ==
167 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
168 return CE->getOperand(0);
169 }
170
171 // Fold a cast of a constant.
172 if (Constant *C = dyn_cast<Constant>(V))
173 return ConstantExpr::getCast(Op, C, Ty);
174
175 // Cast the argument at the beginning of the entry block, after
176 // any bitcasts of other arguments.
177 if (Argument *A = dyn_cast<Argument>(V)) {
178 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
179 while ((isa<BitCastInst>(IP) &&
180 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
181 cast<BitCastInst>(IP)->getOperand(0) != A) ||
182 isa<DbgInfoIntrinsic>(IP))
183 ++IP;
184 return ReuseOrCreateCast(A, Ty, Op, IP);
185 }
186
187 // Cast the instruction immediately after the instruction.
188 Instruction *I = cast<Instruction>(V);
189 BasicBlock::iterator IP = findInsertPointAfter(I, &*Builder.GetInsertPoint());
190 return ReuseOrCreateCast(I, Ty, Op, IP);
191 }
192
193 /// InsertBinop - Insert the specified binary operator, doing a small amount
194 /// of work to avoid inserting an obviously redundant operation, and hoisting
195 /// to an outer loop when the opportunity is there and it is safe.
InsertBinop(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,SCEV::NoWrapFlags Flags,bool IsSafeToHoist)196 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
197 Value *LHS, Value *RHS,
198 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
199 // Fold a binop with constant operands.
200 if (Constant *CLHS = dyn_cast<Constant>(LHS))
201 if (Constant *CRHS = dyn_cast<Constant>(RHS))
202 return ConstantExpr::get(Opcode, CLHS, CRHS);
203
204 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
205 unsigned ScanLimit = 6;
206 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
207 // Scanning starts from the last instruction before the insertion point.
208 BasicBlock::iterator IP = Builder.GetInsertPoint();
209 if (IP != BlockBegin) {
210 --IP;
211 for (; ScanLimit; --IP, --ScanLimit) {
212 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
213 // generated code.
214 if (isa<DbgInfoIntrinsic>(IP))
215 ScanLimit++;
216
217 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
218 // Ensure that no-wrap flags match.
219 if (isa<OverflowingBinaryOperator>(I)) {
220 if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
221 return true;
222 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
223 return true;
224 }
225 // Conservatively, do not use any instruction which has any of exact
226 // flags installed.
227 if (isa<PossiblyExactOperator>(I) && I->isExact())
228 return true;
229 return false;
230 };
231 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
232 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
233 return &*IP;
234 if (IP == BlockBegin) break;
235 }
236 }
237
238 // Save the original insertion point so we can restore it when we're done.
239 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
240 SCEVInsertPointGuard Guard(Builder, this);
241
242 if (IsSafeToHoist) {
243 // Move the insertion point out of as many loops as we can.
244 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
245 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
246 BasicBlock *Preheader = L->getLoopPreheader();
247 if (!Preheader) break;
248
249 // Ok, move up a level.
250 Builder.SetInsertPoint(Preheader->getTerminator());
251 }
252 }
253
254 // If we haven't found this binop, insert it.
255 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
256 BO->setDebugLoc(Loc);
257 if (Flags & SCEV::FlagNUW)
258 BO->setHasNoUnsignedWrap();
259 if (Flags & SCEV::FlagNSW)
260 BO->setHasNoSignedWrap();
261
262 return BO;
263 }
264
265 /// FactorOutConstant - Test if S is divisible by Factor, using signed
266 /// division. If so, update S with Factor divided out and return true.
267 /// S need not be evenly divisible if a reasonable remainder can be
268 /// computed.
FactorOutConstant(const SCEV * & S,const SCEV * & Remainder,const SCEV * Factor,ScalarEvolution & SE,const DataLayout & DL)269 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
270 const SCEV *Factor, ScalarEvolution &SE,
271 const DataLayout &DL) {
272 // Everything is divisible by one.
273 if (Factor->isOne())
274 return true;
275
276 // x/x == 1.
277 if (S == Factor) {
278 S = SE.getConstant(S->getType(), 1);
279 return true;
280 }
281
282 // For a Constant, check for a multiple of the given factor.
283 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
284 // 0/x == 0.
285 if (C->isZero())
286 return true;
287 // Check for divisibility.
288 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
289 ConstantInt *CI =
290 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
291 // If the quotient is zero and the remainder is non-zero, reject
292 // the value at this scale. It will be considered for subsequent
293 // smaller scales.
294 if (!CI->isZero()) {
295 const SCEV *Div = SE.getConstant(CI);
296 S = Div;
297 Remainder = SE.getAddExpr(
298 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
299 return true;
300 }
301 }
302 }
303
304 // In a Mul, check if there is a constant operand which is a multiple
305 // of the given factor.
306 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
307 // Size is known, check if there is a constant operand which is a multiple
308 // of the given factor. If so, we can factor it.
309 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
310 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
311 if (!C->getAPInt().srem(FC->getAPInt())) {
312 SmallVector<const SCEV *, 4> NewMulOps(M->operands());
313 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
314 S = SE.getMulExpr(NewMulOps);
315 return true;
316 }
317 }
318
319 // In an AddRec, check if both start and step are divisible.
320 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
321 const SCEV *Step = A->getStepRecurrence(SE);
322 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
323 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
324 return false;
325 if (!StepRem->isZero())
326 return false;
327 const SCEV *Start = A->getStart();
328 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
329 return false;
330 S = SE.getAddRecExpr(Start, Step, A->getLoop(),
331 A->getNoWrapFlags(SCEV::FlagNW));
332 return true;
333 }
334
335 return false;
336 }
337
338 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
339 /// is the number of SCEVAddRecExprs present, which are kept at the end of
340 /// the list.
341 ///
SimplifyAddOperands(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)342 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
343 Type *Ty,
344 ScalarEvolution &SE) {
345 unsigned NumAddRecs = 0;
346 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
347 ++NumAddRecs;
348 // Group Ops into non-addrecs and addrecs.
349 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
350 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
351 // Let ScalarEvolution sort and simplify the non-addrecs list.
352 const SCEV *Sum = NoAddRecs.empty() ?
353 SE.getConstant(Ty, 0) :
354 SE.getAddExpr(NoAddRecs);
355 // If it returned an add, use the operands. Otherwise it simplified
356 // the sum into a single value, so just use that.
357 Ops.clear();
358 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
359 Ops.append(Add->op_begin(), Add->op_end());
360 else if (!Sum->isZero())
361 Ops.push_back(Sum);
362 // Then append the addrecs.
363 Ops.append(AddRecs.begin(), AddRecs.end());
364 }
365
366 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
367 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
368 /// This helps expose more opportunities for folding parts of the expressions
369 /// into GEP indices.
370 ///
SplitAddRecs(SmallVectorImpl<const SCEV * > & Ops,Type * Ty,ScalarEvolution & SE)371 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
372 Type *Ty,
373 ScalarEvolution &SE) {
374 // Find the addrecs.
375 SmallVector<const SCEV *, 8> AddRecs;
376 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
377 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
378 const SCEV *Start = A->getStart();
379 if (Start->isZero()) break;
380 const SCEV *Zero = SE.getConstant(Ty, 0);
381 AddRecs.push_back(SE.getAddRecExpr(Zero,
382 A->getStepRecurrence(SE),
383 A->getLoop(),
384 A->getNoWrapFlags(SCEV::FlagNW)));
385 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
386 Ops[i] = Zero;
387 Ops.append(Add->op_begin(), Add->op_end());
388 e += Add->getNumOperands();
389 } else {
390 Ops[i] = Start;
391 }
392 }
393 if (!AddRecs.empty()) {
394 // Add the addrecs onto the end of the list.
395 Ops.append(AddRecs.begin(), AddRecs.end());
396 // Resort the operand list, moving any constants to the front.
397 SimplifyAddOperands(Ops, Ty, SE);
398 }
399 }
400
401 /// expandAddToGEP - Expand an addition expression with a pointer type into
402 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
403 /// BasicAliasAnalysis and other passes analyze the result. See the rules
404 /// for getelementptr vs. inttoptr in
405 /// http://llvm.org/docs/LangRef.html#pointeraliasing
406 /// for details.
407 ///
408 /// Design note: The correctness of using getelementptr here depends on
409 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
410 /// they may introduce pointer arithmetic which may not be safely converted
411 /// into getelementptr.
412 ///
413 /// Design note: It might seem desirable for this function to be more
414 /// loop-aware. If some of the indices are loop-invariant while others
415 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
416 /// loop-invariant portions of the overall computation outside the loop.
417 /// However, there are a few reasons this is not done here. Hoisting simple
418 /// arithmetic is a low-level optimization that often isn't very
419 /// important until late in the optimization process. In fact, passes
420 /// like InstructionCombining will combine GEPs, even if it means
421 /// pushing loop-invariant computation down into loops, so even if the
422 /// GEPs were split here, the work would quickly be undone. The
423 /// LoopStrengthReduction pass, which is usually run quite late (and
424 /// after the last InstructionCombining pass), takes care of hoisting
425 /// loop-invariant portions of expressions, after considering what
426 /// can be folded using target addressing modes.
427 ///
expandAddToGEP(const SCEV * const * op_begin,const SCEV * const * op_end,PointerType * PTy,Type * Ty,Value * V)428 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
429 const SCEV *const *op_end,
430 PointerType *PTy,
431 Type *Ty,
432 Value *V) {
433 Type *OriginalElTy = PTy->getElementType();
434 Type *ElTy = OriginalElTy;
435 SmallVector<Value *, 4> GepIndices;
436 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
437 bool AnyNonZeroIndices = false;
438
439 // Split AddRecs up into parts as either of the parts may be usable
440 // without the other.
441 SplitAddRecs(Ops, Ty, SE);
442
443 Type *IntIdxTy = DL.getIndexType(PTy);
444
445 // Descend down the pointer's type and attempt to convert the other
446 // operands into GEP indices, at each level. The first index in a GEP
447 // indexes into the array implied by the pointer operand; the rest of
448 // the indices index into the element or field type selected by the
449 // preceding index.
450 for (;;) {
451 // If the scale size is not 0, attempt to factor out a scale for
452 // array indexing.
453 SmallVector<const SCEV *, 8> ScaledOps;
454 if (ElTy->isSized()) {
455 const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy);
456 if (!ElSize->isZero()) {
457 SmallVector<const SCEV *, 8> NewOps;
458 for (const SCEV *Op : Ops) {
459 const SCEV *Remainder = SE.getConstant(Ty, 0);
460 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
461 // Op now has ElSize factored out.
462 ScaledOps.push_back(Op);
463 if (!Remainder->isZero())
464 NewOps.push_back(Remainder);
465 AnyNonZeroIndices = true;
466 } else {
467 // The operand was not divisible, so add it to the list of operands
468 // we'll scan next iteration.
469 NewOps.push_back(Op);
470 }
471 }
472 // If we made any changes, update Ops.
473 if (!ScaledOps.empty()) {
474 Ops = NewOps;
475 SimplifyAddOperands(Ops, Ty, SE);
476 }
477 }
478 }
479
480 // Record the scaled array index for this level of the type. If
481 // we didn't find any operands that could be factored, tentatively
482 // assume that element zero was selected (since the zero offset
483 // would obviously be folded away).
484 Value *Scaled =
485 ScaledOps.empty()
486 ? Constant::getNullValue(Ty)
487 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false);
488 GepIndices.push_back(Scaled);
489
490 // Collect struct field index operands.
491 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
492 bool FoundFieldNo = false;
493 // An empty struct has no fields.
494 if (STy->getNumElements() == 0) break;
495 // Field offsets are known. See if a constant offset falls within any of
496 // the struct fields.
497 if (Ops.empty())
498 break;
499 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
500 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
501 const StructLayout &SL = *DL.getStructLayout(STy);
502 uint64_t FullOffset = C->getValue()->getZExtValue();
503 if (FullOffset < SL.getSizeInBytes()) {
504 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
505 GepIndices.push_back(
506 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
507 ElTy = STy->getTypeAtIndex(ElIdx);
508 Ops[0] =
509 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
510 AnyNonZeroIndices = true;
511 FoundFieldNo = true;
512 }
513 }
514 // If no struct field offsets were found, tentatively assume that
515 // field zero was selected (since the zero offset would obviously
516 // be folded away).
517 if (!FoundFieldNo) {
518 ElTy = STy->getTypeAtIndex(0u);
519 GepIndices.push_back(
520 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
521 }
522 }
523
524 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
525 ElTy = ATy->getElementType();
526 else
527 // FIXME: Handle VectorType.
528 // E.g., If ElTy is scalable vector, then ElSize is not a compile-time
529 // constant, therefore can not be factored out. The generated IR is less
530 // ideal with base 'V' cast to i8* and do ugly getelementptr over that.
531 break;
532 }
533
534 // If none of the operands were convertible to proper GEP indices, cast
535 // the base to i8* and do an ugly getelementptr with that. It's still
536 // better than ptrtoint+arithmetic+inttoptr at least.
537 if (!AnyNonZeroIndices) {
538 // Cast the base to i8*.
539 V = InsertNoopCastOfTo(V,
540 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
541
542 assert(!isa<Instruction>(V) ||
543 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
544
545 // Expand the operands for a plain byte offset.
546 Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false);
547
548 // Fold a GEP with constant operands.
549 if (Constant *CLHS = dyn_cast<Constant>(V))
550 if (Constant *CRHS = dyn_cast<Constant>(Idx))
551 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
552 CLHS, CRHS);
553
554 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
555 unsigned ScanLimit = 6;
556 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
557 // Scanning starts from the last instruction before the insertion point.
558 BasicBlock::iterator IP = Builder.GetInsertPoint();
559 if (IP != BlockBegin) {
560 --IP;
561 for (; ScanLimit; --IP, --ScanLimit) {
562 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
563 // generated code.
564 if (isa<DbgInfoIntrinsic>(IP))
565 ScanLimit++;
566 if (IP->getOpcode() == Instruction::GetElementPtr &&
567 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
568 return &*IP;
569 if (IP == BlockBegin) break;
570 }
571 }
572
573 // Save the original insertion point so we can restore it when we're done.
574 SCEVInsertPointGuard Guard(Builder, this);
575
576 // Move the insertion point out of as many loops as we can.
577 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
578 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
579 BasicBlock *Preheader = L->getLoopPreheader();
580 if (!Preheader) break;
581
582 // Ok, move up a level.
583 Builder.SetInsertPoint(Preheader->getTerminator());
584 }
585
586 // Emit a GEP.
587 return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
588 }
589
590 {
591 SCEVInsertPointGuard Guard(Builder, this);
592
593 // Move the insertion point out of as many loops as we can.
594 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
595 if (!L->isLoopInvariant(V)) break;
596
597 bool AnyIndexNotLoopInvariant = any_of(
598 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
599
600 if (AnyIndexNotLoopInvariant)
601 break;
602
603 BasicBlock *Preheader = L->getLoopPreheader();
604 if (!Preheader) break;
605
606 // Ok, move up a level.
607 Builder.SetInsertPoint(Preheader->getTerminator());
608 }
609
610 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
611 // because ScalarEvolution may have changed the address arithmetic to
612 // compute a value which is beyond the end of the allocated object.
613 Value *Casted = V;
614 if (V->getType() != PTy)
615 Casted = InsertNoopCastOfTo(Casted, PTy);
616 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
617 Ops.push_back(SE.getUnknown(GEP));
618 }
619
620 return expand(SE.getAddExpr(Ops));
621 }
622
expandAddToGEP(const SCEV * Op,PointerType * PTy,Type * Ty,Value * V)623 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
624 Value *V) {
625 const SCEV *const Ops[1] = {Op};
626 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
627 }
628
629 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
630 /// SCEV expansion. If they are nested, this is the most nested. If they are
631 /// neighboring, pick the later.
PickMostRelevantLoop(const Loop * A,const Loop * B,DominatorTree & DT)632 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
633 DominatorTree &DT) {
634 if (!A) return B;
635 if (!B) return A;
636 if (A->contains(B)) return B;
637 if (B->contains(A)) return A;
638 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
639 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
640 return A; // Arbitrarily break the tie.
641 }
642
643 /// getRelevantLoop - Get the most relevant loop associated with the given
644 /// expression, according to PickMostRelevantLoop.
getRelevantLoop(const SCEV * S)645 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
646 // Test whether we've already computed the most relevant loop for this SCEV.
647 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
648 if (!Pair.second)
649 return Pair.first->second;
650
651 if (isa<SCEVConstant>(S))
652 // A constant has no relevant loops.
653 return nullptr;
654 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
655 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
656 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
657 // A non-instruction has no relevant loops.
658 return nullptr;
659 }
660 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
661 const Loop *L = nullptr;
662 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
663 L = AR->getLoop();
664 for (const SCEV *Op : N->operands())
665 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
666 return RelevantLoops[N] = L;
667 }
668 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
669 const Loop *Result = getRelevantLoop(C->getOperand());
670 return RelevantLoops[C] = Result;
671 }
672 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
673 const Loop *Result = PickMostRelevantLoop(
674 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
675 return RelevantLoops[D] = Result;
676 }
677 llvm_unreachable("Unexpected SCEV type!");
678 }
679
680 namespace {
681
682 /// LoopCompare - Compare loops by PickMostRelevantLoop.
683 class LoopCompare {
684 DominatorTree &DT;
685 public:
LoopCompare(DominatorTree & dt)686 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
687
operator ()(std::pair<const Loop *,const SCEV * > LHS,std::pair<const Loop *,const SCEV * > RHS) const688 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
689 std::pair<const Loop *, const SCEV *> RHS) const {
690 // Keep pointer operands sorted at the end.
691 if (LHS.second->getType()->isPointerTy() !=
692 RHS.second->getType()->isPointerTy())
693 return LHS.second->getType()->isPointerTy();
694
695 // Compare loops with PickMostRelevantLoop.
696 if (LHS.first != RHS.first)
697 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
698
699 // If one operand is a non-constant negative and the other is not,
700 // put the non-constant negative on the right so that a sub can
701 // be used instead of a negate and add.
702 if (LHS.second->isNonConstantNegative()) {
703 if (!RHS.second->isNonConstantNegative())
704 return false;
705 } else if (RHS.second->isNonConstantNegative())
706 return true;
707
708 // Otherwise they are equivalent according to this comparison.
709 return false;
710 }
711 };
712
713 }
714
visitAddExpr(const SCEVAddExpr * S)715 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
716 Type *Ty = SE.getEffectiveSCEVType(S->getType());
717
718 // Collect all the add operands in a loop, along with their associated loops.
719 // Iterate in reverse so that constants are emitted last, all else equal, and
720 // so that pointer operands are inserted first, which the code below relies on
721 // to form more involved GEPs.
722 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
723 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
724 E(S->op_begin()); I != E; ++I)
725 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
726
727 // Sort by loop. Use a stable sort so that constants follow non-constants and
728 // pointer operands precede non-pointer operands.
729 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
730
731 // Emit instructions to add all the operands. Hoist as much as possible
732 // out of loops, and form meaningful getelementptrs where possible.
733 Value *Sum = nullptr;
734 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
735 const Loop *CurLoop = I->first;
736 const SCEV *Op = I->second;
737 if (!Sum) {
738 // This is the first operand. Just expand it.
739 Sum = expand(Op);
740 ++I;
741 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
742 // The running sum expression is a pointer. Try to form a getelementptr
743 // at this level with that as the base.
744 SmallVector<const SCEV *, 4> NewOps;
745 for (; I != E && I->first == CurLoop; ++I) {
746 // If the operand is SCEVUnknown and not instructions, peek through
747 // it, to enable more of it to be folded into the GEP.
748 const SCEV *X = I->second;
749 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
750 if (!isa<Instruction>(U->getValue()))
751 X = SE.getSCEV(U->getValue());
752 NewOps.push_back(X);
753 }
754 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
755 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
756 // The running sum is an integer, and there's a pointer at this level.
757 // Try to form a getelementptr. If the running sum is instructions,
758 // use a SCEVUnknown to avoid re-analyzing them.
759 SmallVector<const SCEV *, 4> NewOps;
760 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
761 SE.getSCEV(Sum));
762 for (++I; I != E && I->first == CurLoop; ++I)
763 NewOps.push_back(I->second);
764 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
765 } else if (Op->isNonConstantNegative()) {
766 // Instead of doing a negate and add, just do a subtract.
767 Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false);
768 Sum = InsertNoopCastOfTo(Sum, Ty);
769 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
770 /*IsSafeToHoist*/ true);
771 ++I;
772 } else {
773 // A simple add.
774 Value *W = expandCodeForImpl(Op, Ty, false);
775 Sum = InsertNoopCastOfTo(Sum, Ty);
776 // Canonicalize a constant to the RHS.
777 if (isa<Constant>(Sum)) std::swap(Sum, W);
778 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
779 /*IsSafeToHoist*/ true);
780 ++I;
781 }
782 }
783
784 return Sum;
785 }
786
visitMulExpr(const SCEVMulExpr * S)787 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
788 Type *Ty = SE.getEffectiveSCEVType(S->getType());
789
790 // Collect all the mul operands in a loop, along with their associated loops.
791 // Iterate in reverse so that constants are emitted last, all else equal.
792 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
793 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
794 E(S->op_begin()); I != E; ++I)
795 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
796
797 // Sort by loop. Use a stable sort so that constants follow non-constants.
798 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
799
800 // Emit instructions to mul all the operands. Hoist as much as possible
801 // out of loops.
802 Value *Prod = nullptr;
803 auto I = OpsAndLoops.begin();
804
805 // Expand the calculation of X pow N in the following manner:
806 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
807 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
808 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
809 auto E = I;
810 // Calculate how many times the same operand from the same loop is included
811 // into this power.
812 uint64_t Exponent = 0;
813 const uint64_t MaxExponent = UINT64_MAX >> 1;
814 // No one sane will ever try to calculate such huge exponents, but if we
815 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
816 // below when the power of 2 exceeds our Exponent, and we want it to be
817 // 1u << 31 at most to not deal with unsigned overflow.
818 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
819 ++Exponent;
820 ++E;
821 }
822 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
823
824 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
825 // that are needed into the result.
826 Value *P = expandCodeForImpl(I->second, Ty, false);
827 Value *Result = nullptr;
828 if (Exponent & 1)
829 Result = P;
830 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
831 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
832 /*IsSafeToHoist*/ true);
833 if (Exponent & BinExp)
834 Result = Result ? InsertBinop(Instruction::Mul, Result, P,
835 SCEV::FlagAnyWrap,
836 /*IsSafeToHoist*/ true)
837 : P;
838 }
839
840 I = E;
841 assert(Result && "Nothing was expanded?");
842 return Result;
843 };
844
845 while (I != OpsAndLoops.end()) {
846 if (!Prod) {
847 // This is the first operand. Just expand it.
848 Prod = ExpandOpBinPowN();
849 } else if (I->second->isAllOnesValue()) {
850 // Instead of doing a multiply by negative one, just do a negate.
851 Prod = InsertNoopCastOfTo(Prod, Ty);
852 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
853 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
854 ++I;
855 } else {
856 // A simple mul.
857 Value *W = ExpandOpBinPowN();
858 Prod = InsertNoopCastOfTo(Prod, Ty);
859 // Canonicalize a constant to the RHS.
860 if (isa<Constant>(Prod)) std::swap(Prod, W);
861 const APInt *RHS;
862 if (match(W, m_Power2(RHS))) {
863 // Canonicalize Prod*(1<<C) to Prod<<C.
864 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
865 auto NWFlags = S->getNoWrapFlags();
866 // clear nsw flag if shl will produce poison value.
867 if (RHS->logBase2() == RHS->getBitWidth() - 1)
868 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
869 Prod = InsertBinop(Instruction::Shl, Prod,
870 ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
871 /*IsSafeToHoist*/ true);
872 } else {
873 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
874 /*IsSafeToHoist*/ true);
875 }
876 }
877 }
878
879 return Prod;
880 }
881
visitUDivExpr(const SCEVUDivExpr * S)882 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
883 Type *Ty = SE.getEffectiveSCEVType(S->getType());
884
885 Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false);
886 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
887 const APInt &RHS = SC->getAPInt();
888 if (RHS.isPowerOf2())
889 return InsertBinop(Instruction::LShr, LHS,
890 ConstantInt::get(Ty, RHS.logBase2()),
891 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
892 }
893
894 Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false);
895 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
896 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
897 }
898
899 /// Move parts of Base into Rest to leave Base with the minimal
900 /// expression that provides a pointer operand suitable for a
901 /// GEP expansion.
ExposePointerBase(const SCEV * & Base,const SCEV * & Rest,ScalarEvolution & SE)902 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
903 ScalarEvolution &SE) {
904 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
905 Base = A->getStart();
906 Rest = SE.getAddExpr(Rest,
907 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
908 A->getStepRecurrence(SE),
909 A->getLoop(),
910 A->getNoWrapFlags(SCEV::FlagNW)));
911 }
912 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
913 Base = A->getOperand(A->getNumOperands()-1);
914 SmallVector<const SCEV *, 8> NewAddOps(A->operands());
915 NewAddOps.back() = Rest;
916 Rest = SE.getAddExpr(NewAddOps);
917 ExposePointerBase(Base, Rest, SE);
918 }
919 }
920
921 /// Determine if this is a well-behaved chain of instructions leading back to
922 /// the PHI. If so, it may be reused by expanded expressions.
isNormalAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)923 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
924 const Loop *L) {
925 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
926 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
927 return false;
928 // If any of the operands don't dominate the insert position, bail.
929 // Addrec operands are always loop-invariant, so this can only happen
930 // if there are instructions which haven't been hoisted.
931 if (L == IVIncInsertLoop) {
932 for (User::op_iterator OI = IncV->op_begin()+1,
933 OE = IncV->op_end(); OI != OE; ++OI)
934 if (Instruction *OInst = dyn_cast<Instruction>(OI))
935 if (!SE.DT.dominates(OInst, IVIncInsertPos))
936 return false;
937 }
938 // Advance to the next instruction.
939 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
940 if (!IncV)
941 return false;
942
943 if (IncV->mayHaveSideEffects())
944 return false;
945
946 if (IncV == PN)
947 return true;
948
949 return isNormalAddRecExprPHI(PN, IncV, L);
950 }
951
952 /// getIVIncOperand returns an induction variable increment's induction
953 /// variable operand.
954 ///
955 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
956 /// operands dominate InsertPos.
957 ///
958 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
959 /// simple patterns generated by getAddRecExprPHILiterally and
960 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
getIVIncOperand(Instruction * IncV,Instruction * InsertPos,bool allowScale)961 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
962 Instruction *InsertPos,
963 bool allowScale) {
964 if (IncV == InsertPos)
965 return nullptr;
966
967 switch (IncV->getOpcode()) {
968 default:
969 return nullptr;
970 // Check for a simple Add/Sub or GEP of a loop invariant step.
971 case Instruction::Add:
972 case Instruction::Sub: {
973 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
974 if (!OInst || SE.DT.dominates(OInst, InsertPos))
975 return dyn_cast<Instruction>(IncV->getOperand(0));
976 return nullptr;
977 }
978 case Instruction::BitCast:
979 return dyn_cast<Instruction>(IncV->getOperand(0));
980 case Instruction::GetElementPtr:
981 for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
982 if (isa<Constant>(*I))
983 continue;
984 if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
985 if (!SE.DT.dominates(OInst, InsertPos))
986 return nullptr;
987 }
988 if (allowScale) {
989 // allow any kind of GEP as long as it can be hoisted.
990 continue;
991 }
992 // This must be a pointer addition of constants (pretty), which is already
993 // handled, or some number of address-size elements (ugly). Ugly geps
994 // have 2 operands. i1* is used by the expander to represent an
995 // address-size element.
996 if (IncV->getNumOperands() != 2)
997 return nullptr;
998 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
999 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
1000 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
1001 return nullptr;
1002 break;
1003 }
1004 return dyn_cast<Instruction>(IncV->getOperand(0));
1005 }
1006 }
1007
1008 /// If the insert point of the current builder or any of the builders on the
1009 /// stack of saved builders has 'I' as its insert point, update it to point to
1010 /// the instruction after 'I'. This is intended to be used when the instruction
1011 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
1012 /// different block, the inconsistent insert point (with a mismatched
1013 /// Instruction and Block) can lead to an instruction being inserted in a block
1014 /// other than its parent.
fixupInsertPoints(Instruction * I)1015 void SCEVExpander::fixupInsertPoints(Instruction *I) {
1016 BasicBlock::iterator It(*I);
1017 BasicBlock::iterator NewInsertPt = std::next(It);
1018 if (Builder.GetInsertPoint() == It)
1019 Builder.SetInsertPoint(&*NewInsertPt);
1020 for (auto *InsertPtGuard : InsertPointGuards)
1021 if (InsertPtGuard->GetInsertPoint() == It)
1022 InsertPtGuard->SetInsertPoint(NewInsertPt);
1023 }
1024
1025 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
1026 /// it available to other uses in this loop. Recursively hoist any operands,
1027 /// until we reach a value that dominates InsertPos.
hoistIVInc(Instruction * IncV,Instruction * InsertPos)1028 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
1029 if (SE.DT.dominates(IncV, InsertPos))
1030 return true;
1031
1032 // InsertPos must itself dominate IncV so that IncV's new position satisfies
1033 // its existing users.
1034 if (isa<PHINode>(InsertPos) ||
1035 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1036 return false;
1037
1038 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1039 return false;
1040
1041 // Check that the chain of IV operands leading back to Phi can be hoisted.
1042 SmallVector<Instruction*, 4> IVIncs;
1043 for(;;) {
1044 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1045 if (!Oper)
1046 return false;
1047 // IncV is safe to hoist.
1048 IVIncs.push_back(IncV);
1049 IncV = Oper;
1050 if (SE.DT.dominates(IncV, InsertPos))
1051 break;
1052 }
1053 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
1054 fixupInsertPoints(*I);
1055 (*I)->moveBefore(InsertPos);
1056 }
1057 return true;
1058 }
1059
1060 /// Determine if this cyclic phi is in a form that would have been generated by
1061 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1062 /// as it is in a low-cost form, for example, no implied multiplication. This
1063 /// should match any patterns generated by getAddRecExprPHILiterally and
1064 /// expandAddtoGEP.
isExpandedAddRecExprPHI(PHINode * PN,Instruction * IncV,const Loop * L)1065 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1066 const Loop *L) {
1067 for(Instruction *IVOper = IncV;
1068 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1069 /*allowScale=*/false));) {
1070 if (IVOper == PN)
1071 return true;
1072 }
1073 return false;
1074 }
1075
1076 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1077 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1078 /// need to materialize IV increments elsewhere to handle difficult situations.
expandIVInc(PHINode * PN,Value * StepV,const Loop * L,Type * ExpandTy,Type * IntTy,bool useSubtract)1079 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1080 Type *ExpandTy, Type *IntTy,
1081 bool useSubtract) {
1082 Value *IncV;
1083 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1084 if (ExpandTy->isPointerTy()) {
1085 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1086 // If the step isn't constant, don't use an implicitly scaled GEP, because
1087 // that would require a multiply inside the loop.
1088 if (!isa<ConstantInt>(StepV))
1089 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1090 GEPPtrTy->getAddressSpace());
1091 IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1092 if (IncV->getType() != PN->getType())
1093 IncV = Builder.CreateBitCast(IncV, PN->getType());
1094 } else {
1095 IncV = useSubtract ?
1096 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1097 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1098 }
1099 return IncV;
1100 }
1101
1102 /// Hoist the addrec instruction chain rooted in the loop phi above the
1103 /// position. This routine assumes that this is possible (has been checked).
hoistBeforePos(DominatorTree * DT,Instruction * InstToHoist,Instruction * Pos,PHINode * LoopPhi)1104 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1105 Instruction *Pos, PHINode *LoopPhi) {
1106 do {
1107 if (DT->dominates(InstToHoist, Pos))
1108 break;
1109 // Make sure the increment is where we want it. But don't move it
1110 // down past a potential existing post-inc user.
1111 fixupInsertPoints(InstToHoist);
1112 InstToHoist->moveBefore(Pos);
1113 Pos = InstToHoist;
1114 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1115 } while (InstToHoist != LoopPhi);
1116 }
1117
1118 /// Check whether we can cheaply express the requested SCEV in terms of
1119 /// the available PHI SCEV by truncation and/or inversion of the step.
canBeCheaplyTransformed(ScalarEvolution & SE,const SCEVAddRecExpr * Phi,const SCEVAddRecExpr * Requested,bool & InvertStep)1120 static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1121 const SCEVAddRecExpr *Phi,
1122 const SCEVAddRecExpr *Requested,
1123 bool &InvertStep) {
1124 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1125 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1126
1127 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1128 return false;
1129
1130 // Try truncate it if necessary.
1131 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1132 if (!Phi)
1133 return false;
1134
1135 // Check whether truncation will help.
1136 if (Phi == Requested) {
1137 InvertStep = false;
1138 return true;
1139 }
1140
1141 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1142 if (SE.getAddExpr(Requested->getStart(),
1143 SE.getNegativeSCEV(Requested)) == Phi) {
1144 InvertStep = true;
1145 return true;
1146 }
1147
1148 return false;
1149 }
1150
IsIncrementNSW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1151 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1152 if (!isa<IntegerType>(AR->getType()))
1153 return false;
1154
1155 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1156 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1157 const SCEV *Step = AR->getStepRecurrence(SE);
1158 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1159 SE.getSignExtendExpr(AR, WideTy));
1160 const SCEV *ExtendAfterOp =
1161 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1162 return ExtendAfterOp == OpAfterExtend;
1163 }
1164
IsIncrementNUW(ScalarEvolution & SE,const SCEVAddRecExpr * AR)1165 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1166 if (!isa<IntegerType>(AR->getType()))
1167 return false;
1168
1169 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1170 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1171 const SCEV *Step = AR->getStepRecurrence(SE);
1172 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1173 SE.getZeroExtendExpr(AR, WideTy));
1174 const SCEV *ExtendAfterOp =
1175 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1176 return ExtendAfterOp == OpAfterExtend;
1177 }
1178
1179 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1180 /// the base addrec, which is the addrec without any non-loop-dominating
1181 /// values, and return the PHI.
1182 PHINode *
getAddRecExprPHILiterally(const SCEVAddRecExpr * Normalized,const Loop * L,Type * ExpandTy,Type * IntTy,Type * & TruncTy,bool & InvertStep)1183 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1184 const Loop *L,
1185 Type *ExpandTy,
1186 Type *IntTy,
1187 Type *&TruncTy,
1188 bool &InvertStep) {
1189 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1190
1191 // Reuse a previously-inserted PHI, if present.
1192 BasicBlock *LatchBlock = L->getLoopLatch();
1193 if (LatchBlock) {
1194 PHINode *AddRecPhiMatch = nullptr;
1195 Instruction *IncV = nullptr;
1196 TruncTy = nullptr;
1197 InvertStep = false;
1198
1199 // Only try partially matching scevs that need truncation and/or
1200 // step-inversion if we know this loop is outside the current loop.
1201 bool TryNonMatchingSCEV =
1202 IVIncInsertLoop &&
1203 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1204
1205 for (PHINode &PN : L->getHeader()->phis()) {
1206 if (!SE.isSCEVable(PN.getType()))
1207 continue;
1208
1209 // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1210 // PHI has no meaning at all.
1211 if (!PN.isComplete()) {
1212 DEBUG_WITH_TYPE(
1213 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1214 continue;
1215 }
1216
1217 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1218 if (!PhiSCEV)
1219 continue;
1220
1221 bool IsMatchingSCEV = PhiSCEV == Normalized;
1222 // We only handle truncation and inversion of phi recurrences for the
1223 // expanded expression if the expanded expression's loop dominates the
1224 // loop we insert to. Check now, so we can bail out early.
1225 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1226 continue;
1227
1228 // TODO: this possibly can be reworked to avoid this cast at all.
1229 Instruction *TempIncV =
1230 dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock));
1231 if (!TempIncV)
1232 continue;
1233
1234 // Check whether we can reuse this PHI node.
1235 if (LSRMode) {
1236 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1237 continue;
1238 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1239 continue;
1240 } else {
1241 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1242 continue;
1243 }
1244
1245 // Stop if we have found an exact match SCEV.
1246 if (IsMatchingSCEV) {
1247 IncV = TempIncV;
1248 TruncTy = nullptr;
1249 InvertStep = false;
1250 AddRecPhiMatch = &PN;
1251 break;
1252 }
1253
1254 // Try whether the phi can be translated into the requested form
1255 // (truncated and/or offset by a constant).
1256 if ((!TruncTy || InvertStep) &&
1257 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1258 // Record the phi node. But don't stop we might find an exact match
1259 // later.
1260 AddRecPhiMatch = &PN;
1261 IncV = TempIncV;
1262 TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1263 }
1264 }
1265
1266 if (AddRecPhiMatch) {
1267 // Potentially, move the increment. We have made sure in
1268 // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1269 if (L == IVIncInsertLoop)
1270 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1271
1272 // Ok, the add recurrence looks usable.
1273 // Remember this PHI, even in post-inc mode.
1274 InsertedValues.insert(AddRecPhiMatch);
1275 // Remember the increment.
1276 rememberInstruction(IncV);
1277 // Those values were not actually inserted but re-used.
1278 ReusedValues.insert(AddRecPhiMatch);
1279 ReusedValues.insert(IncV);
1280 return AddRecPhiMatch;
1281 }
1282 }
1283
1284 // Save the original insertion point so we can restore it when we're done.
1285 SCEVInsertPointGuard Guard(Builder, this);
1286
1287 // Another AddRec may need to be recursively expanded below. For example, if
1288 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1289 // loop. Remove this loop from the PostIncLoops set before expanding such
1290 // AddRecs. Otherwise, we cannot find a valid position for the step
1291 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1292 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1293 // so it's not worth implementing SmallPtrSet::swap.
1294 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1295 PostIncLoops.clear();
1296
1297 // Expand code for the start value into the loop preheader.
1298 assert(L->getLoopPreheader() &&
1299 "Can't expand add recurrences without a loop preheader!");
1300 Value *StartV =
1301 expandCodeForImpl(Normalized->getStart(), ExpandTy,
1302 L->getLoopPreheader()->getTerminator(), false);
1303
1304 // StartV must have been be inserted into L's preheader to dominate the new
1305 // phi.
1306 assert(!isa<Instruction>(StartV) ||
1307 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1308 L->getHeader()));
1309
1310 // Expand code for the step value. Do this before creating the PHI so that PHI
1311 // reuse code doesn't see an incomplete PHI.
1312 const SCEV *Step = Normalized->getStepRecurrence(SE);
1313 // If the stride is negative, insert a sub instead of an add for the increment
1314 // (unless it's a constant, because subtracts of constants are canonicalized
1315 // to adds).
1316 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1317 if (useSubtract)
1318 Step = SE.getNegativeSCEV(Step);
1319 // Expand the step somewhere that dominates the loop header.
1320 Value *StepV = expandCodeForImpl(
1321 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1322
1323 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1324 // we actually do emit an addition. It does not apply if we emit a
1325 // subtraction.
1326 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1327 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1328
1329 // Create the PHI.
1330 BasicBlock *Header = L->getHeader();
1331 Builder.SetInsertPoint(Header, Header->begin());
1332 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1333 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1334 Twine(IVName) + ".iv");
1335
1336 // Create the step instructions and populate the PHI.
1337 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1338 BasicBlock *Pred = *HPI;
1339
1340 // Add a start value.
1341 if (!L->contains(Pred)) {
1342 PN->addIncoming(StartV, Pred);
1343 continue;
1344 }
1345
1346 // Create a step value and add it to the PHI.
1347 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1348 // instructions at IVIncInsertPos.
1349 Instruction *InsertPos = L == IVIncInsertLoop ?
1350 IVIncInsertPos : Pred->getTerminator();
1351 Builder.SetInsertPoint(InsertPos);
1352 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1353
1354 if (isa<OverflowingBinaryOperator>(IncV)) {
1355 if (IncrementIsNUW)
1356 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1357 if (IncrementIsNSW)
1358 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1359 }
1360 PN->addIncoming(IncV, Pred);
1361 }
1362
1363 // After expanding subexpressions, restore the PostIncLoops set so the caller
1364 // can ensure that IVIncrement dominates the current uses.
1365 PostIncLoops = SavedPostIncLoops;
1366
1367 // Remember this PHI, even in post-inc mode.
1368 InsertedValues.insert(PN);
1369
1370 return PN;
1371 }
1372
expandAddRecExprLiterally(const SCEVAddRecExpr * S)1373 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1374 Type *STy = S->getType();
1375 Type *IntTy = SE.getEffectiveSCEVType(STy);
1376 const Loop *L = S->getLoop();
1377
1378 // Determine a normalized form of this expression, which is the expression
1379 // before any post-inc adjustment is made.
1380 const SCEVAddRecExpr *Normalized = S;
1381 if (PostIncLoops.count(L)) {
1382 PostIncLoopSet Loops;
1383 Loops.insert(L);
1384 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1385 }
1386
1387 // Strip off any non-loop-dominating component from the addrec start.
1388 const SCEV *Start = Normalized->getStart();
1389 const SCEV *PostLoopOffset = nullptr;
1390 if (!SE.properlyDominates(Start, L->getHeader())) {
1391 PostLoopOffset = Start;
1392 Start = SE.getConstant(Normalized->getType(), 0);
1393 Normalized = cast<SCEVAddRecExpr>(
1394 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1395 Normalized->getLoop(),
1396 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1397 }
1398
1399 // Strip off any non-loop-dominating component from the addrec step.
1400 const SCEV *Step = Normalized->getStepRecurrence(SE);
1401 const SCEV *PostLoopScale = nullptr;
1402 if (!SE.dominates(Step, L->getHeader())) {
1403 PostLoopScale = Step;
1404 Step = SE.getConstant(Normalized->getType(), 1);
1405 if (!Start->isZero()) {
1406 // The normalization below assumes that Start is constant zero, so if
1407 // it isn't re-associate Start to PostLoopOffset.
1408 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1409 PostLoopOffset = Start;
1410 Start = SE.getConstant(Normalized->getType(), 0);
1411 }
1412 Normalized =
1413 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1414 Start, Step, Normalized->getLoop(),
1415 Normalized->getNoWrapFlags(SCEV::FlagNW)));
1416 }
1417
1418 // Expand the core addrec. If we need post-loop scaling, force it to
1419 // expand to an integer type to avoid the need for additional casting.
1420 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1421 // We can't use a pointer type for the addrec if the pointer type is
1422 // non-integral.
1423 Type *AddRecPHIExpandTy =
1424 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1425
1426 // In some cases, we decide to reuse an existing phi node but need to truncate
1427 // it and/or invert the step.
1428 Type *TruncTy = nullptr;
1429 bool InvertStep = false;
1430 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1431 IntTy, TruncTy, InvertStep);
1432
1433 // Accommodate post-inc mode, if necessary.
1434 Value *Result;
1435 if (!PostIncLoops.count(L))
1436 Result = PN;
1437 else {
1438 // In PostInc mode, use the post-incremented value.
1439 BasicBlock *LatchBlock = L->getLoopLatch();
1440 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1441 Result = PN->getIncomingValueForBlock(LatchBlock);
1442
1443 // We might be introducing a new use of the post-inc IV that is not poison
1444 // safe, in which case we should drop poison generating flags. Only keep
1445 // those flags for which SCEV has proven that they always hold.
1446 if (isa<OverflowingBinaryOperator>(Result)) {
1447 auto *I = cast<Instruction>(Result);
1448 if (!S->hasNoUnsignedWrap())
1449 I->setHasNoUnsignedWrap(false);
1450 if (!S->hasNoSignedWrap())
1451 I->setHasNoSignedWrap(false);
1452 }
1453
1454 // For an expansion to use the postinc form, the client must call
1455 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1456 // or dominated by IVIncInsertPos.
1457 if (isa<Instruction>(Result) &&
1458 !SE.DT.dominates(cast<Instruction>(Result),
1459 &*Builder.GetInsertPoint())) {
1460 // The induction variable's postinc expansion does not dominate this use.
1461 // IVUsers tries to prevent this case, so it is rare. However, it can
1462 // happen when an IVUser outside the loop is not dominated by the latch
1463 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1464 // all cases. Consider a phi outside whose operand is replaced during
1465 // expansion with the value of the postinc user. Without fundamentally
1466 // changing the way postinc users are tracked, the only remedy is
1467 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1468 // but hopefully expandCodeFor handles that.
1469 bool useSubtract =
1470 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1471 if (useSubtract)
1472 Step = SE.getNegativeSCEV(Step);
1473 Value *StepV;
1474 {
1475 // Expand the step somewhere that dominates the loop header.
1476 SCEVInsertPointGuard Guard(Builder, this);
1477 StepV = expandCodeForImpl(
1478 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false);
1479 }
1480 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1481 }
1482 }
1483
1484 // We have decided to reuse an induction variable of a dominating loop. Apply
1485 // truncation and/or inversion of the step.
1486 if (TruncTy) {
1487 Type *ResTy = Result->getType();
1488 // Normalize the result type.
1489 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1490 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1491 // Truncate the result.
1492 if (TruncTy != Result->getType())
1493 Result = Builder.CreateTrunc(Result, TruncTy);
1494
1495 // Invert the result.
1496 if (InvertStep)
1497 Result = Builder.CreateSub(
1498 expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result);
1499 }
1500
1501 // Re-apply any non-loop-dominating scale.
1502 if (PostLoopScale) {
1503 assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1504 Result = InsertNoopCastOfTo(Result, IntTy);
1505 Result = Builder.CreateMul(Result,
1506 expandCodeForImpl(PostLoopScale, IntTy, false));
1507 }
1508
1509 // Re-apply any non-loop-dominating offset.
1510 if (PostLoopOffset) {
1511 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1512 if (Result->getType()->isIntegerTy()) {
1513 Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false);
1514 Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1515 } else {
1516 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1517 }
1518 } else {
1519 Result = InsertNoopCastOfTo(Result, IntTy);
1520 Result = Builder.CreateAdd(
1521 Result, expandCodeForImpl(PostLoopOffset, IntTy, false));
1522 }
1523 }
1524
1525 return Result;
1526 }
1527
visitAddRecExpr(const SCEVAddRecExpr * S)1528 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1529 // In canonical mode we compute the addrec as an expression of a canonical IV
1530 // using evaluateAtIteration and expand the resulting SCEV expression. This
1531 // way we avoid introducing new IVs to carry on the comutation of the addrec
1532 // throughout the loop.
1533 //
1534 // For nested addrecs evaluateAtIteration might need a canonical IV of a
1535 // type wider than the addrec itself. Emitting a canonical IV of the
1536 // proper type might produce non-legal types, for example expanding an i64
1537 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1538 // back to non-canonical mode for nested addrecs.
1539 if (!CanonicalMode || (S->getNumOperands() > 2))
1540 return expandAddRecExprLiterally(S);
1541
1542 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1543 const Loop *L = S->getLoop();
1544
1545 // First check for an existing canonical IV in a suitable type.
1546 PHINode *CanonicalIV = nullptr;
1547 if (PHINode *PN = L->getCanonicalInductionVariable())
1548 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1549 CanonicalIV = PN;
1550
1551 // Rewrite an AddRec in terms of the canonical induction variable, if
1552 // its type is more narrow.
1553 if (CanonicalIV &&
1554 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1555 SE.getTypeSizeInBits(Ty)) {
1556 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1557 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1558 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1559 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1560 S->getNoWrapFlags(SCEV::FlagNW)));
1561 BasicBlock::iterator NewInsertPt =
1562 findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1563 V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1564 &*NewInsertPt, false);
1565 return V;
1566 }
1567
1568 // {X,+,F} --> X + {0,+,F}
1569 if (!S->getStart()->isZero()) {
1570 SmallVector<const SCEV *, 4> NewOps(S->operands());
1571 NewOps[0] = SE.getConstant(Ty, 0);
1572 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1573 S->getNoWrapFlags(SCEV::FlagNW));
1574
1575 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1576 // comments on expandAddToGEP for details.
1577 const SCEV *Base = S->getStart();
1578 // Dig into the expression to find the pointer base for a GEP.
1579 const SCEV *ExposedRest = Rest;
1580 ExposePointerBase(Base, ExposedRest, SE);
1581 // If we found a pointer, expand the AddRec with a GEP.
1582 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1583 // Make sure the Base isn't something exotic, such as a multiplied
1584 // or divided pointer value. In those cases, the result type isn't
1585 // actually a pointer type.
1586 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1587 Value *StartV = expand(Base);
1588 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1589 return expandAddToGEP(ExposedRest, PTy, Ty, StartV);
1590 }
1591 }
1592
1593 // Just do a normal add. Pre-expand the operands to suppress folding.
1594 //
1595 // The LHS and RHS values are factored out of the expand call to make the
1596 // output independent of the argument evaluation order.
1597 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1598 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1599 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1600 }
1601
1602 // If we don't yet have a canonical IV, create one.
1603 if (!CanonicalIV) {
1604 // Create and insert the PHI node for the induction variable in the
1605 // specified loop.
1606 BasicBlock *Header = L->getHeader();
1607 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1608 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1609 &Header->front());
1610 rememberInstruction(CanonicalIV);
1611
1612 SmallSet<BasicBlock *, 4> PredSeen;
1613 Constant *One = ConstantInt::get(Ty, 1);
1614 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1615 BasicBlock *HP = *HPI;
1616 if (!PredSeen.insert(HP).second) {
1617 // There must be an incoming value for each predecessor, even the
1618 // duplicates!
1619 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1620 continue;
1621 }
1622
1623 if (L->contains(HP)) {
1624 // Insert a unit add instruction right before the terminator
1625 // corresponding to the back-edge.
1626 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1627 "indvar.next",
1628 HP->getTerminator());
1629 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1630 rememberInstruction(Add);
1631 CanonicalIV->addIncoming(Add, HP);
1632 } else {
1633 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1634 }
1635 }
1636 }
1637
1638 // {0,+,1} --> Insert a canonical induction variable into the loop!
1639 if (S->isAffine() && S->getOperand(1)->isOne()) {
1640 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1641 "IVs with types different from the canonical IV should "
1642 "already have been handled!");
1643 return CanonicalIV;
1644 }
1645
1646 // {0,+,F} --> {0,+,1} * F
1647
1648 // If this is a simple linear addrec, emit it now as a special case.
1649 if (S->isAffine()) // {0,+,F} --> i*F
1650 return
1651 expand(SE.getTruncateOrNoop(
1652 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1653 SE.getNoopOrAnyExtend(S->getOperand(1),
1654 CanonicalIV->getType())),
1655 Ty));
1656
1657 // If this is a chain of recurrences, turn it into a closed form, using the
1658 // folders, then expandCodeFor the closed form. This allows the folders to
1659 // simplify the expression without having to build a bunch of special code
1660 // into this folder.
1661 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1662
1663 // Promote S up to the canonical IV type, if the cast is foldable.
1664 const SCEV *NewS = S;
1665 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1666 if (isa<SCEVAddRecExpr>(Ext))
1667 NewS = Ext;
1668
1669 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1670 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1671
1672 // Truncate the result down to the original type, if needed.
1673 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1674 return expand(T);
1675 }
1676
visitPtrToIntExpr(const SCEVPtrToIntExpr * S)1677 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1678 Value *V =
1679 expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false);
1680 return Builder.CreatePtrToInt(V, S->getType());
1681 }
1682
visitTruncateExpr(const SCEVTruncateExpr * S)1683 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1684 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1685 Value *V = expandCodeForImpl(
1686 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1687 false);
1688 return Builder.CreateTrunc(V, Ty);
1689 }
1690
visitZeroExtendExpr(const SCEVZeroExtendExpr * S)1691 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1692 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1693 Value *V = expandCodeForImpl(
1694 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1695 false);
1696 return Builder.CreateZExt(V, Ty);
1697 }
1698
visitSignExtendExpr(const SCEVSignExtendExpr * S)1699 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1700 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1701 Value *V = expandCodeForImpl(
1702 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()),
1703 false);
1704 return Builder.CreateSExt(V, Ty);
1705 }
1706
visitSMaxExpr(const SCEVSMaxExpr * S)1707 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1708 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1709 Type *Ty = LHS->getType();
1710 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1711 // In the case of mixed integer and pointer types, do the
1712 // rest of the comparisons as integer.
1713 Type *OpTy = S->getOperand(i)->getType();
1714 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1715 Ty = SE.getEffectiveSCEVType(Ty);
1716 LHS = InsertNoopCastOfTo(LHS, Ty);
1717 }
1718 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1719 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1720 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1721 LHS = Sel;
1722 }
1723 // In the case of mixed integer and pointer types, cast the
1724 // final result back to the pointer type.
1725 if (LHS->getType() != S->getType())
1726 LHS = InsertNoopCastOfTo(LHS, S->getType());
1727 return LHS;
1728 }
1729
visitUMaxExpr(const SCEVUMaxExpr * S)1730 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1731 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1732 Type *Ty = LHS->getType();
1733 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1734 // In the case of mixed integer and pointer types, do the
1735 // rest of the comparisons as integer.
1736 Type *OpTy = S->getOperand(i)->getType();
1737 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1738 Ty = SE.getEffectiveSCEVType(Ty);
1739 LHS = InsertNoopCastOfTo(LHS, Ty);
1740 }
1741 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1742 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1743 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1744 LHS = Sel;
1745 }
1746 // In the case of mixed integer and pointer types, cast the
1747 // final result back to the pointer type.
1748 if (LHS->getType() != S->getType())
1749 LHS = InsertNoopCastOfTo(LHS, S->getType());
1750 return LHS;
1751 }
1752
visitSMinExpr(const SCEVSMinExpr * S)1753 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1754 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1755 Type *Ty = LHS->getType();
1756 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1757 // In the case of mixed integer and pointer types, do the
1758 // rest of the comparisons as integer.
1759 Type *OpTy = S->getOperand(i)->getType();
1760 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1761 Ty = SE.getEffectiveSCEVType(Ty);
1762 LHS = InsertNoopCastOfTo(LHS, Ty);
1763 }
1764 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1765 Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
1766 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
1767 LHS = Sel;
1768 }
1769 // In the case of mixed integer and pointer types, cast the
1770 // final result back to the pointer type.
1771 if (LHS->getType() != S->getType())
1772 LHS = InsertNoopCastOfTo(LHS, S->getType());
1773 return LHS;
1774 }
1775
visitUMinExpr(const SCEVUMinExpr * S)1776 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1777 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1778 Type *Ty = LHS->getType();
1779 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1780 // In the case of mixed integer and pointer types, do the
1781 // rest of the comparisons as integer.
1782 Type *OpTy = S->getOperand(i)->getType();
1783 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1784 Ty = SE.getEffectiveSCEVType(Ty);
1785 LHS = InsertNoopCastOfTo(LHS, Ty);
1786 }
1787 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false);
1788 Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
1789 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
1790 LHS = Sel;
1791 }
1792 // In the case of mixed integer and pointer types, cast the
1793 // final result back to the pointer type.
1794 if (LHS->getType() != S->getType())
1795 LHS = InsertNoopCastOfTo(LHS, S->getType());
1796 return LHS;
1797 }
1798
expandCodeForImpl(const SCEV * SH,Type * Ty,Instruction * IP,bool Root)1799 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty,
1800 Instruction *IP, bool Root) {
1801 setInsertPoint(IP);
1802 Value *V = expandCodeForImpl(SH, Ty, Root);
1803 return V;
1804 }
1805
expandCodeForImpl(const SCEV * SH,Type * Ty,bool Root)1806 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) {
1807 // Expand the code for this SCEV.
1808 Value *V = expand(SH);
1809
1810 if (PreserveLCSSA) {
1811 if (auto *Inst = dyn_cast<Instruction>(V)) {
1812 // Create a temporary instruction to at the current insertion point, so we
1813 // can hand it off to the helper to create LCSSA PHIs if required for the
1814 // new use.
1815 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
1816 // would accept a insertion point and return an LCSSA phi for that
1817 // insertion point, so there is no need to insert & remove the temporary
1818 // instruction.
1819 Instruction *Tmp;
1820 if (Inst->getType()->isIntegerTy())
1821 Tmp =
1822 cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user"));
1823 else {
1824 assert(Inst->getType()->isPointerTy());
1825 Tmp = cast<Instruction>(
1826 Builder.CreateGEP(Inst, Builder.getInt32(1), "tmp.lcssa.user"));
1827 }
1828 V = fixupLCSSAFormFor(Tmp, 0);
1829
1830 // Clean up temporary instruction.
1831 InsertedValues.erase(Tmp);
1832 InsertedPostIncValues.erase(Tmp);
1833 Tmp->eraseFromParent();
1834 }
1835 }
1836
1837 InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V;
1838 if (Ty) {
1839 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1840 "non-trivial casts should be done with the SCEVs directly!");
1841 V = InsertNoopCastOfTo(V, Ty);
1842 }
1843 return V;
1844 }
1845
1846 ScalarEvolution::ValueOffsetPair
FindValueInExprValueMap(const SCEV * S,const Instruction * InsertPt)1847 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1848 const Instruction *InsertPt) {
1849 SetVector<ScalarEvolution::ValueOffsetPair> *Set = SE.getSCEVValues(S);
1850 // If the expansion is not in CanonicalMode, and the SCEV contains any
1851 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1852 if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1853 // If S is scConstant, it may be worse to reuse an existing Value.
1854 if (S->getSCEVType() != scConstant && Set) {
1855 // Choose a Value from the set which dominates the insertPt.
1856 // insertPt should be inside the Value's parent loop so as not to break
1857 // the LCSSA form.
1858 for (auto const &VOPair : *Set) {
1859 Value *V = VOPair.first;
1860 ConstantInt *Offset = VOPair.second;
1861 Instruction *EntInst = nullptr;
1862 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1863 S->getType() == V->getType() &&
1864 EntInst->getFunction() == InsertPt->getFunction() &&
1865 SE.DT.dominates(EntInst, InsertPt) &&
1866 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1867 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1868 return {V, Offset};
1869 }
1870 }
1871 }
1872 return {nullptr, nullptr};
1873 }
1874
1875 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1876 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1877 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1878 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1879 // the expansion will try to reuse Value from ExprValueMap, and only when it
1880 // fails, expand the SCEV literally.
expand(const SCEV * S)1881 Value *SCEVExpander::expand(const SCEV *S) {
1882 // Compute an insertion point for this SCEV object. Hoist the instructions
1883 // as far out in the loop nest as possible.
1884 Instruction *InsertPt = &*Builder.GetInsertPoint();
1885
1886 // We can move insertion point only if there is no div or rem operations
1887 // otherwise we are risky to move it over the check for zero denominator.
1888 auto SafeToHoist = [](const SCEV *S) {
1889 return !SCEVExprContains(S, [](const SCEV *S) {
1890 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1891 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1892 // Division by non-zero constants can be hoisted.
1893 return SC->getValue()->isZero();
1894 // All other divisions should not be moved as they may be
1895 // divisions by zero and should be kept within the
1896 // conditions of the surrounding loops that guard their
1897 // execution (see PR35406).
1898 return true;
1899 }
1900 return false;
1901 });
1902 };
1903 if (SafeToHoist(S)) {
1904 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1905 L = L->getParentLoop()) {
1906 if (SE.isLoopInvariant(S, L)) {
1907 if (!L) break;
1908 if (BasicBlock *Preheader = L->getLoopPreheader())
1909 InsertPt = Preheader->getTerminator();
1910 else
1911 // LSR sets the insertion point for AddRec start/step values to the
1912 // block start to simplify value reuse, even though it's an invalid
1913 // position. SCEVExpander must correct for this in all cases.
1914 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1915 } else {
1916 // If the SCEV is computable at this level, insert it into the header
1917 // after the PHIs (and after any other instructions that we've inserted
1918 // there) so that it is guaranteed to dominate any user inside the loop.
1919 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1920 InsertPt = &*L->getHeader()->getFirstInsertionPt();
1921
1922 while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1923 (isInsertedInstruction(InsertPt) ||
1924 isa<DbgInfoIntrinsic>(InsertPt))) {
1925 InsertPt = &*std::next(InsertPt->getIterator());
1926 }
1927 break;
1928 }
1929 }
1930 }
1931
1932 // Check to see if we already expanded this here.
1933 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1934 if (I != InsertedExpressions.end())
1935 return I->second;
1936
1937 SCEVInsertPointGuard Guard(Builder, this);
1938 Builder.SetInsertPoint(InsertPt);
1939
1940 // Expand the expression into instructions.
1941 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1942 Value *V = VO.first;
1943
1944 if (!V)
1945 V = visit(S);
1946 else if (VO.second) {
1947 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1948 Type *Ety = Vty->getPointerElementType();
1949 int64_t Offset = VO.second->getSExtValue();
1950 int64_t ESize = SE.getTypeSizeInBits(Ety);
1951 if ((Offset * 8) % ESize == 0) {
1952 ConstantInt *Idx =
1953 ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
1954 V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
1955 } else {
1956 ConstantInt *Idx =
1957 ConstantInt::getSigned(VO.second->getType(), -Offset);
1958 unsigned AS = Vty->getAddressSpace();
1959 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
1960 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
1961 "uglygep");
1962 V = Builder.CreateBitCast(V, Vty);
1963 }
1964 } else {
1965 V = Builder.CreateSub(V, VO.second);
1966 }
1967 }
1968 // Remember the expanded value for this SCEV at this location.
1969 //
1970 // This is independent of PostIncLoops. The mapped value simply materializes
1971 // the expression at this insertion point. If the mapped value happened to be
1972 // a postinc expansion, it could be reused by a non-postinc user, but only if
1973 // its insertion point was already at the head of the loop.
1974 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1975 return V;
1976 }
1977
rememberInstruction(Value * I)1978 void SCEVExpander::rememberInstruction(Value *I) {
1979 auto DoInsert = [this](Value *V) {
1980 if (!PostIncLoops.empty())
1981 InsertedPostIncValues.insert(V);
1982 else
1983 InsertedValues.insert(V);
1984 };
1985 DoInsert(I);
1986
1987 if (!PreserveLCSSA)
1988 return;
1989
1990 if (auto *Inst = dyn_cast<Instruction>(I)) {
1991 // A new instruction has been added, which might introduce new uses outside
1992 // a defining loop. Fix LCSSA from for each operand of the new instruction,
1993 // if required.
1994 for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd;
1995 OpIdx++)
1996 fixupLCSSAFormFor(Inst, OpIdx);
1997 }
1998 }
1999
2000 /// replaceCongruentIVs - Check for congruent phis in this loop header and
2001 /// replace them with their most canonical representative. Return the number of
2002 /// phis eliminated.
2003 ///
2004 /// This does not depend on any SCEVExpander state but should be used in
2005 /// the same context that SCEVExpander is used.
2006 unsigned
replaceCongruentIVs(Loop * L,const DominatorTree * DT,SmallVectorImpl<WeakTrackingVH> & DeadInsts,const TargetTransformInfo * TTI)2007 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
2008 SmallVectorImpl<WeakTrackingVH> &DeadInsts,
2009 const TargetTransformInfo *TTI) {
2010 // Find integer phis in order of increasing width.
2011 SmallVector<PHINode*, 8> Phis;
2012 for (PHINode &PN : L->getHeader()->phis())
2013 Phis.push_back(&PN);
2014
2015 if (TTI)
2016 llvm::sort(Phis, [](Value *LHS, Value *RHS) {
2017 // Put pointers at the back and make sure pointer < pointer = false.
2018 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
2019 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
2020 return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() <
2021 LHS->getType()->getPrimitiveSizeInBits().getFixedSize();
2022 });
2023
2024 unsigned NumElim = 0;
2025 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
2026 // Process phis from wide to narrow. Map wide phis to their truncation
2027 // so narrow phis can reuse them.
2028 for (PHINode *Phi : Phis) {
2029 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
2030 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
2031 return V;
2032 if (!SE.isSCEVable(PN->getType()))
2033 return nullptr;
2034 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
2035 if (!Const)
2036 return nullptr;
2037 return Const->getValue();
2038 };
2039
2040 // Fold constant phis. They may be congruent to other constant phis and
2041 // would confuse the logic below that expects proper IVs.
2042 if (Value *V = SimplifyPHINode(Phi)) {
2043 if (V->getType() != Phi->getType())
2044 continue;
2045 Phi->replaceAllUsesWith(V);
2046 DeadInsts.emplace_back(Phi);
2047 ++NumElim;
2048 DEBUG_WITH_TYPE(DebugType, dbgs()
2049 << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
2050 continue;
2051 }
2052
2053 if (!SE.isSCEVable(Phi->getType()))
2054 continue;
2055
2056 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
2057 if (!OrigPhiRef) {
2058 OrigPhiRef = Phi;
2059 if (Phi->getType()->isIntegerTy() && TTI &&
2060 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
2061 // This phi can be freely truncated to the narrowest phi type. Map the
2062 // truncated expression to it so it will be reused for narrow types.
2063 const SCEV *TruncExpr =
2064 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
2065 ExprToIVMap[TruncExpr] = Phi;
2066 }
2067 continue;
2068 }
2069
2070 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
2071 // sense.
2072 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
2073 continue;
2074
2075 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2076 Instruction *OrigInc = dyn_cast<Instruction>(
2077 OrigPhiRef->getIncomingValueForBlock(LatchBlock));
2078 Instruction *IsomorphicInc =
2079 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
2080
2081 if (OrigInc && IsomorphicInc) {
2082 // If this phi has the same width but is more canonical, replace the
2083 // original with it. As part of the "more canonical" determination,
2084 // respect a prior decision to use an IV chain.
2085 if (OrigPhiRef->getType() == Phi->getType() &&
2086 !(ChainedPhis.count(Phi) ||
2087 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
2088 (ChainedPhis.count(Phi) ||
2089 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2090 std::swap(OrigPhiRef, Phi);
2091 std::swap(OrigInc, IsomorphicInc);
2092 }
2093 // Replacing the congruent phi is sufficient because acyclic
2094 // redundancy elimination, CSE/GVN, should handle the
2095 // rest. However, once SCEV proves that a phi is congruent,
2096 // it's often the head of an IV user cycle that is isomorphic
2097 // with the original phi. It's worth eagerly cleaning up the
2098 // common case of a single IV increment so that DeleteDeadPHIs
2099 // can remove cycles that had postinc uses.
2100 const SCEV *TruncExpr =
2101 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2102 if (OrigInc != IsomorphicInc &&
2103 TruncExpr == SE.getSCEV(IsomorphicInc) &&
2104 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2105 hoistIVInc(OrigInc, IsomorphicInc)) {
2106 DEBUG_WITH_TYPE(DebugType,
2107 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2108 << *IsomorphicInc << '\n');
2109 Value *NewInc = OrigInc;
2110 if (OrigInc->getType() != IsomorphicInc->getType()) {
2111 Instruction *IP = nullptr;
2112 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2113 IP = &*PN->getParent()->getFirstInsertionPt();
2114 else
2115 IP = OrigInc->getNextNode();
2116
2117 IRBuilder<> Builder(IP);
2118 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2119 NewInc = Builder.CreateTruncOrBitCast(
2120 OrigInc, IsomorphicInc->getType(), IVName);
2121 }
2122 IsomorphicInc->replaceAllUsesWith(NewInc);
2123 DeadInsts.emplace_back(IsomorphicInc);
2124 }
2125 }
2126 }
2127 DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: "
2128 << *Phi << '\n');
2129 DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Original iv: "
2130 << *OrigPhiRef << '\n');
2131 ++NumElim;
2132 Value *NewIV = OrigPhiRef;
2133 if (OrigPhiRef->getType() != Phi->getType()) {
2134 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2135 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2136 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2137 }
2138 Phi->replaceAllUsesWith(NewIV);
2139 DeadInsts.emplace_back(Phi);
2140 }
2141 return NumElim;
2142 }
2143
2144 Optional<ScalarEvolution::ValueOffsetPair>
getRelatedExistingExpansion(const SCEV * S,const Instruction * At,Loop * L)2145 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
2146 Loop *L) {
2147 using namespace llvm::PatternMatch;
2148
2149 SmallVector<BasicBlock *, 4> ExitingBlocks;
2150 L->getExitingBlocks(ExitingBlocks);
2151
2152 // Look for suitable value in simple conditions at the loop exits.
2153 for (BasicBlock *BB : ExitingBlocks) {
2154 ICmpInst::Predicate Pred;
2155 Instruction *LHS, *RHS;
2156
2157 if (!match(BB->getTerminator(),
2158 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
2159 m_BasicBlock(), m_BasicBlock())))
2160 continue;
2161
2162 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2163 return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
2164
2165 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2166 return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2167 }
2168
2169 // Use expand's logic which is used for reusing a previous Value in
2170 // ExprValueMap.
2171 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2172 if (VO.first)
2173 return VO;
2174
2175 // There is potential to make this significantly smarter, but this simple
2176 // heuristic already gets some interesting cases.
2177
2178 // Can not find suitable value.
2179 return None;
2180 }
2181
costAndCollectOperands(const SCEVOperand & WorkItem,const TargetTransformInfo & TTI,TargetTransformInfo::TargetCostKind CostKind,SmallVectorImpl<SCEVOperand> & Worklist)2182 template<typename T> static int costAndCollectOperands(
2183 const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
2184 TargetTransformInfo::TargetCostKind CostKind,
2185 SmallVectorImpl<SCEVOperand> &Worklist) {
2186
2187 const T *S = cast<T>(WorkItem.S);
2188 int Cost = 0;
2189 // Object to help map SCEV operands to expanded IR instructions.
2190 struct OperationIndices {
2191 OperationIndices(unsigned Opc, size_t min, size_t max) :
2192 Opcode(Opc), MinIdx(min), MaxIdx(max) { }
2193 unsigned Opcode;
2194 size_t MinIdx;
2195 size_t MaxIdx;
2196 };
2197
2198 // Collect the operations of all the instructions that will be needed to
2199 // expand the SCEVExpr. This is so that when we come to cost the operands,
2200 // we know what the generated user(s) will be.
2201 SmallVector<OperationIndices, 2> Operations;
2202
2203 auto CastCost = [&](unsigned Opcode) {
2204 Operations.emplace_back(Opcode, 0, 0);
2205 return TTI.getCastInstrCost(Opcode, S->getType(),
2206 S->getOperand(0)->getType(),
2207 TTI::CastContextHint::None, CostKind);
2208 };
2209
2210 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
2211 unsigned MinIdx = 0, unsigned MaxIdx = 1) {
2212 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2213 return NumRequired *
2214 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
2215 };
2216
2217 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired,
2218 unsigned MinIdx, unsigned MaxIdx) {
2219 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
2220 Type *OpType = S->getOperand(0)->getType();
2221 return NumRequired * TTI.getCmpSelInstrCost(
2222 Opcode, OpType, CmpInst::makeCmpResultType(OpType),
2223 CmpInst::BAD_ICMP_PREDICATE, CostKind);
2224 };
2225
2226 switch (S->getSCEVType()) {
2227 case scCouldNotCompute:
2228 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2229 case scUnknown:
2230 case scConstant:
2231 return 0;
2232 case scPtrToInt:
2233 Cost = CastCost(Instruction::PtrToInt);
2234 break;
2235 case scTruncate:
2236 Cost = CastCost(Instruction::Trunc);
2237 break;
2238 case scZeroExtend:
2239 Cost = CastCost(Instruction::ZExt);
2240 break;
2241 case scSignExtend:
2242 Cost = CastCost(Instruction::SExt);
2243 break;
2244 case scUDivExpr: {
2245 unsigned Opcode = Instruction::UDiv;
2246 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2247 if (SC->getAPInt().isPowerOf2())
2248 Opcode = Instruction::LShr;
2249 Cost = ArithCost(Opcode, 1);
2250 break;
2251 }
2252 case scAddExpr:
2253 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2254 break;
2255 case scMulExpr:
2256 // TODO: this is a very pessimistic cost modelling for Mul,
2257 // because of Bin Pow algorithm actually used by the expander,
2258 // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
2259 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2260 break;
2261 case scSMaxExpr:
2262 case scUMaxExpr:
2263 case scSMinExpr:
2264 case scUMinExpr: {
2265 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2266 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2267 break;
2268 }
2269 case scAddRecExpr: {
2270 // In this polynominal, we may have some zero operands, and we shouldn't
2271 // really charge for those. So how many non-zero coeffients are there?
2272 int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) {
2273 return !Op->isZero();
2274 });
2275
2276 assert(NumTerms >= 1 && "Polynominal should have at least one term.");
2277 assert(!(*std::prev(S->operands().end()))->isZero() &&
2278 "Last operand should not be zero");
2279
2280 // Ignoring constant term (operand 0), how many of the coeffients are u> 1?
2281 int NumNonZeroDegreeNonOneTerms =
2282 llvm::count_if(S->operands(), [](const SCEV *Op) {
2283 auto *SConst = dyn_cast<SCEVConstant>(Op);
2284 return !SConst || SConst->getAPInt().ugt(1);
2285 });
2286
2287 // Much like with normal add expr, the polynominal will require
2288 // one less addition than the number of it's terms.
2289 int AddCost = ArithCost(Instruction::Add, NumTerms - 1,
2290 /*MinIdx*/1, /*MaxIdx*/1);
2291 // Here, *each* one of those will require a multiplication.
2292 int MulCost = ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2293 Cost = AddCost + MulCost;
2294
2295 // What is the degree of this polynominal?
2296 int PolyDegree = S->getNumOperands() - 1;
2297 assert(PolyDegree >= 1 && "Should be at least affine.");
2298
2299 // The final term will be:
2300 // Op_{PolyDegree} * x ^ {PolyDegree}
2301 // Where x ^ {PolyDegree} will again require PolyDegree-1 mul operations.
2302 // Note that x ^ {PolyDegree} = x * x ^ {PolyDegree-1} so charging for
2303 // x ^ {PolyDegree} will give us x ^ {2} .. x ^ {PolyDegree-1} for free.
2304 // FIXME: this is conservatively correct, but might be overly pessimistic.
2305 Cost += MulCost * (PolyDegree - 1);
2306 break;
2307 }
2308 }
2309
2310 for (auto &CostOp : Operations) {
2311 for (auto SCEVOp : enumerate(S->operands())) {
2312 // Clamp the index to account for multiple IR operations being chained.
2313 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2314 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2315 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2316 }
2317 }
2318 return Cost;
2319 }
2320
isHighCostExpansionHelper(const SCEVOperand & WorkItem,Loop * L,const Instruction & At,int & BudgetRemaining,const TargetTransformInfo & TTI,SmallPtrSetImpl<const SCEV * > & Processed,SmallVectorImpl<SCEVOperand> & Worklist)2321 bool SCEVExpander::isHighCostExpansionHelper(
2322 const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2323 int &BudgetRemaining, const TargetTransformInfo &TTI,
2324 SmallPtrSetImpl<const SCEV *> &Processed,
2325 SmallVectorImpl<SCEVOperand> &Worklist) {
2326 if (BudgetRemaining < 0)
2327 return true; // Already run out of budget, give up.
2328
2329 const SCEV *S = WorkItem.S;
2330 // Was the cost of expansion of this expression already accounted for?
2331 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2332 return false; // We have already accounted for this expression.
2333
2334 // If we can find an existing value for this scev available at the point "At"
2335 // then consider the expression cheap.
2336 if (getRelatedExistingExpansion(S, &At, L))
2337 return false; // Consider the expression to be free.
2338
2339 TargetTransformInfo::TargetCostKind CostKind =
2340 L->getHeader()->getParent()->hasMinSize()
2341 ? TargetTransformInfo::TCK_CodeSize
2342 : TargetTransformInfo::TCK_RecipThroughput;
2343
2344 switch (S->getSCEVType()) {
2345 case scCouldNotCompute:
2346 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2347 case scUnknown:
2348 // Assume to be zero-cost.
2349 return false;
2350 case scConstant: {
2351 // Only evalulate the costs of constants when optimizing for size.
2352 if (CostKind != TargetTransformInfo::TCK_CodeSize)
2353 return 0;
2354 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2355 Type *Ty = S->getType();
2356 BudgetRemaining -= TTI.getIntImmCostInst(
2357 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2358 return BudgetRemaining < 0;
2359 }
2360 case scTruncate:
2361 case scPtrToInt:
2362 case scZeroExtend:
2363 case scSignExtend: {
2364 int Cost =
2365 costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist);
2366 BudgetRemaining -= Cost;
2367 return false; // Will answer upon next entry into this function.
2368 }
2369 case scUDivExpr: {
2370 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2371 // HowManyLessThans produced to compute a precise expression, rather than a
2372 // UDiv from the user's code. If we can't find a UDiv in the code with some
2373 // simple searching, we need to account for it's cost.
2374
2375 // At the beginning of this function we already tried to find existing
2376 // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2377 // pattern involving division. This is just a simple search heuristic.
2378 if (getRelatedExistingExpansion(
2379 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2380 return false; // Consider it to be free.
2381
2382 int Cost =
2383 costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist);
2384 // Need to count the cost of this UDiv.
2385 BudgetRemaining -= Cost;
2386 return false; // Will answer upon next entry into this function.
2387 }
2388 case scAddExpr:
2389 case scMulExpr:
2390 case scUMaxExpr:
2391 case scSMaxExpr:
2392 case scUMinExpr:
2393 case scSMinExpr: {
2394 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2395 "Nary expr should have more than 1 operand.");
2396 // The simple nary expr will require one less op (or pair of ops)
2397 // than the number of it's terms.
2398 int Cost =
2399 costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist);
2400 BudgetRemaining -= Cost;
2401 return BudgetRemaining < 0;
2402 }
2403 case scAddRecExpr: {
2404 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2405 "Polynomial should be at least linear");
2406 BudgetRemaining -= costAndCollectOperands<SCEVAddRecExpr>(
2407 WorkItem, TTI, CostKind, Worklist);
2408 return BudgetRemaining < 0;
2409 }
2410 }
2411 llvm_unreachable("Unknown SCEV kind!");
2412 }
2413
expandCodeForPredicate(const SCEVPredicate * Pred,Instruction * IP)2414 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2415 Instruction *IP) {
2416 assert(IP);
2417 switch (Pred->getKind()) {
2418 case SCEVPredicate::P_Union:
2419 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2420 case SCEVPredicate::P_Equal:
2421 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2422 case SCEVPredicate::P_Wrap: {
2423 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2424 return expandWrapPredicate(AddRecPred, IP);
2425 }
2426 }
2427 llvm_unreachable("Unknown SCEV predicate type");
2428 }
2429
expandEqualPredicate(const SCEVEqualPredicate * Pred,Instruction * IP)2430 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2431 Instruction *IP) {
2432 Value *Expr0 =
2433 expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false);
2434 Value *Expr1 =
2435 expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false);
2436
2437 Builder.SetInsertPoint(IP);
2438 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2439 return I;
2440 }
2441
generateOverflowCheck(const SCEVAddRecExpr * AR,Instruction * Loc,bool Signed)2442 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2443 Instruction *Loc, bool Signed) {
2444 assert(AR->isAffine() && "Cannot generate RT check for "
2445 "non-affine expression");
2446
2447 SCEVUnionPredicate Pred;
2448 const SCEV *ExitCount =
2449 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2450
2451 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2452
2453 const SCEV *Step = AR->getStepRecurrence(SE);
2454 const SCEV *Start = AR->getStart();
2455
2456 Type *ARTy = AR->getType();
2457 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2458 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2459
2460 // The expression {Start,+,Step} has nusw/nssw if
2461 // Step < 0, Start - |Step| * Backedge <= Start
2462 // Step >= 0, Start + |Step| * Backedge > Start
2463 // and |Step| * Backedge doesn't unsigned overflow.
2464
2465 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2466 Builder.SetInsertPoint(Loc);
2467 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false);
2468
2469 IntegerType *Ty =
2470 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2471 Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty;
2472
2473 Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false);
2474 Value *NegStepValue =
2475 expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false);
2476 Value *StartValue = expandCodeForImpl(Start, ARExpandTy, Loc, false);
2477
2478 ConstantInt *Zero =
2479 ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
2480
2481 Builder.SetInsertPoint(Loc);
2482 // Compute |Step|
2483 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2484 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2485
2486 // Get the backedge taken count and truncate or extended to the AR type.
2487 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2488 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2489 Intrinsic::umul_with_overflow, Ty);
2490
2491 // Compute |Step| * Backedge
2492 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2493 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2494 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2495
2496 // Compute:
2497 // Start + |Step| * Backedge < Start
2498 // Start - |Step| * Backedge > Start
2499 Value *Add = nullptr, *Sub = nullptr;
2500 if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) {
2501 const SCEV *MulS = SE.getSCEV(MulV);
2502 const SCEV *NegMulS = SE.getNegativeSCEV(MulS);
2503 Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue),
2504 ARPtrTy);
2505 Sub = Builder.CreateBitCast(
2506 expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy);
2507 } else {
2508 Add = Builder.CreateAdd(StartValue, MulV);
2509 Sub = Builder.CreateSub(StartValue, MulV);
2510 }
2511
2512 Value *EndCompareGT = Builder.CreateICmp(
2513 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2514
2515 Value *EndCompareLT = Builder.CreateICmp(
2516 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2517
2518 // Select the answer based on the sign of Step.
2519 Value *EndCheck =
2520 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2521
2522 // If the backedge taken count type is larger than the AR type,
2523 // check that we don't drop any bits by truncating it. If we are
2524 // dropping bits, then we have overflow (unless the step is zero).
2525 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2526 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2527 auto *BackedgeCheck =
2528 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2529 ConstantInt::get(Loc->getContext(), MaxVal));
2530 BackedgeCheck = Builder.CreateAnd(
2531 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2532
2533 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2534 }
2535
2536 return Builder.CreateOr(EndCheck, OfMul);
2537 }
2538
expandWrapPredicate(const SCEVWrapPredicate * Pred,Instruction * IP)2539 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2540 Instruction *IP) {
2541 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2542 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2543
2544 // Add a check for NUSW
2545 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2546 NUSWCheck = generateOverflowCheck(A, IP, false);
2547
2548 // Add a check for NSSW
2549 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2550 NSSWCheck = generateOverflowCheck(A, IP, true);
2551
2552 if (NUSWCheck && NSSWCheck)
2553 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2554
2555 if (NUSWCheck)
2556 return NUSWCheck;
2557
2558 if (NSSWCheck)
2559 return NSSWCheck;
2560
2561 return ConstantInt::getFalse(IP->getContext());
2562 }
2563
expandUnionPredicate(const SCEVUnionPredicate * Union,Instruction * IP)2564 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2565 Instruction *IP) {
2566 auto *BoolType = IntegerType::get(IP->getContext(), 1);
2567 Value *Check = ConstantInt::getNullValue(BoolType);
2568
2569 // Loop over all checks in this set.
2570 for (auto Pred : Union->getPredicates()) {
2571 auto *NextCheck = expandCodeForPredicate(Pred, IP);
2572 Builder.SetInsertPoint(IP);
2573 Check = Builder.CreateOr(Check, NextCheck);
2574 }
2575
2576 return Check;
2577 }
2578
fixupLCSSAFormFor(Instruction * User,unsigned OpIdx)2579 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) {
2580 assert(PreserveLCSSA);
2581 SmallVector<Instruction *, 1> ToUpdate;
2582
2583 auto *OpV = User->getOperand(OpIdx);
2584 auto *OpI = dyn_cast<Instruction>(OpV);
2585 if (!OpI)
2586 return OpV;
2587
2588 Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent());
2589 Loop *UseLoop = SE.LI.getLoopFor(User->getParent());
2590 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2591 return OpV;
2592
2593 ToUpdate.push_back(OpI);
2594 SmallVector<PHINode *, 16> PHIsToRemove;
2595 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove);
2596 for (PHINode *PN : PHIsToRemove) {
2597 if (!PN->use_empty())
2598 continue;
2599 InsertedValues.erase(PN);
2600 InsertedPostIncValues.erase(PN);
2601 PN->eraseFromParent();
2602 }
2603
2604 return User->getOperand(OpIdx);
2605 }
2606
2607 namespace {
2608 // Search for a SCEV subexpression that is not safe to expand. Any expression
2609 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2610 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2611 // instruction, but the important thing is that we prove the denominator is
2612 // nonzero before expansion.
2613 //
2614 // IVUsers already checks that IV-derived expressions are safe. So this check is
2615 // only needed when the expression includes some subexpression that is not IV
2616 // derived.
2617 //
2618 // Currently, we only allow division by a nonzero constant here. If this is
2619 // inadequate, we could easily allow division by SCEVUnknown by using
2620 // ValueTracking to check isKnownNonZero().
2621 //
2622 // We cannot generally expand recurrences unless the step dominates the loop
2623 // header. The expander handles the special case of affine recurrences by
2624 // scaling the recurrence outside the loop, but this technique isn't generally
2625 // applicable. Expanding a nested recurrence outside a loop requires computing
2626 // binomial coefficients. This could be done, but the recurrence has to be in a
2627 // perfectly reduced form, which can't be guaranteed.
2628 struct SCEVFindUnsafe {
2629 ScalarEvolution &SE;
2630 bool IsUnsafe;
2631
SCEVFindUnsafe__anonf98615580f11::SCEVFindUnsafe2632 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2633
follow__anonf98615580f11::SCEVFindUnsafe2634 bool follow(const SCEV *S) {
2635 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2636 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2637 if (!SC || SC->getValue()->isZero()) {
2638 IsUnsafe = true;
2639 return false;
2640 }
2641 }
2642 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2643 const SCEV *Step = AR->getStepRecurrence(SE);
2644 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2645 IsUnsafe = true;
2646 return false;
2647 }
2648 }
2649 return true;
2650 }
isDone__anonf98615580f11::SCEVFindUnsafe2651 bool isDone() const { return IsUnsafe; }
2652 };
2653 }
2654
2655 namespace llvm {
isSafeToExpand(const SCEV * S,ScalarEvolution & SE)2656 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2657 SCEVFindUnsafe Search(SE);
2658 visitAll(S, Search);
2659 return !Search.IsUnsafe;
2660 }
2661
isSafeToExpandAt(const SCEV * S,const Instruction * InsertionPoint,ScalarEvolution & SE)2662 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2663 ScalarEvolution &SE) {
2664 if (!isSafeToExpand(S, SE))
2665 return false;
2666 // We have to prove that the expanded site of S dominates InsertionPoint.
2667 // This is easy when not in the same block, but hard when S is an instruction
2668 // to be expanded somewhere inside the same block as our insertion point.
2669 // What we really need here is something analogous to an OrderedBasicBlock,
2670 // but for the moment, we paper over the problem by handling two common and
2671 // cheap to check cases.
2672 if (SE.properlyDominates(S, InsertionPoint->getParent()))
2673 return true;
2674 if (SE.dominates(S, InsertionPoint->getParent())) {
2675 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2676 return true;
2677 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2678 for (const Value *V : InsertionPoint->operand_values())
2679 if (V == U->getValue())
2680 return true;
2681 }
2682 return false;
2683 }
2684
~SCEVExpanderCleaner()2685 SCEVExpanderCleaner::~SCEVExpanderCleaner() {
2686 // Result is used, nothing to remove.
2687 if (ResultUsed)
2688 return;
2689
2690 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2691 #ifndef NDEBUG
2692 SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(),
2693 InsertedInstructions.end());
2694 (void)InsertedSet;
2695 #endif
2696 // Remove sets with value handles.
2697 Expander.clear();
2698
2699 // Sort so that earlier instructions do not dominate later instructions.
2700 stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) {
2701 return DT.dominates(B, A);
2702 });
2703 // Remove all inserted instructions.
2704 for (Instruction *I : InsertedInstructions) {
2705
2706 #ifndef NDEBUG
2707 assert(all_of(I->users(),
2708 [&InsertedSet](Value *U) {
2709 return InsertedSet.contains(cast<Instruction>(U));
2710 }) &&
2711 "removed instruction should only be used by instructions inserted "
2712 "during expansion");
2713 #endif
2714 assert(!I->getType()->isVoidTy() &&
2715 "inserted instruction should have non-void types");
2716 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2717 I->eraseFromParent();
2718 }
2719 }
2720 }
2721