1 //===- ScalarEvolutionsTest.cpp - ScalarEvolution unit tests --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/ADT/SmallVector.h"
10 #include "llvm/Analysis/AssumptionCache.h"
11 #include "llvm/Analysis/LoopInfo.h"
12 #include "llvm/Analysis/ScalarEvolutionExpander.h"
13 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
14 #include "llvm/Analysis/TargetLibraryInfo.h"
15 #include "llvm/AsmParser/Parser.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Dominators.h"
18 #include "llvm/IR/GlobalVariable.h"
19 #include "llvm/IR/IRBuilder.h"
20 #include "llvm/IR/InstIterator.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/LegacyPassManager.h"
23 #include "llvm/IR/Module.h"
24 #include "llvm/IR/Verifier.h"
25 #include "llvm/Support/SourceMgr.h"
26 #include "gtest/gtest.h"
27
28 namespace llvm {
29
30 // We use this fixture to ensure that we clean up ScalarEvolution before
31 // deleting the PassManager.
32 class ScalarEvolutionsTest : public testing::Test {
33 protected:
34 LLVMContext Context;
35 Module M;
36 TargetLibraryInfoImpl TLII;
37 TargetLibraryInfo TLI;
38
39 std::unique_ptr<AssumptionCache> AC;
40 std::unique_ptr<DominatorTree> DT;
41 std::unique_ptr<LoopInfo> LI;
42
ScalarEvolutionsTest()43 ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {}
44
buildSE(Function & F)45 ScalarEvolution buildSE(Function &F) {
46 AC.reset(new AssumptionCache(F));
47 DT.reset(new DominatorTree(F));
48 LI.reset(new LoopInfo(*DT));
49 return ScalarEvolution(F, TLI, *AC, *DT, *LI);
50 }
51
runWithSE(Module & M,StringRef FuncName,function_ref<void (Function & F,LoopInfo & LI,ScalarEvolution & SE)> Test)52 void runWithSE(
53 Module &M, StringRef FuncName,
54 function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
55 auto *F = M.getFunction(FuncName);
56 ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
57 ScalarEvolution SE = buildSE(*F);
58 Test(*F, *LI, SE);
59 }
60
computeConstantDifference(ScalarEvolution & SE,const SCEV * LHS,const SCEV * RHS)61 static Optional<APInt> computeConstantDifference(ScalarEvolution &SE,
62 const SCEV *LHS,
63 const SCEV *RHS) {
64 return SE.computeConstantDifference(LHS, RHS);
65 }
66 };
67
TEST_F(ScalarEvolutionsTest,SCEVUnknownRAUW)68 TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) {
69 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
70 std::vector<Type *>(), false);
71 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
72 BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
73 ReturnInst::Create(Context, nullptr, BB);
74
75 Type *Ty = Type::getInt1Ty(Context);
76 Constant *Init = Constant::getNullValue(Ty);
77 Value *V0 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V0");
78 Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1");
79 Value *V2 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V2");
80
81 ScalarEvolution SE = buildSE(*F);
82
83 const SCEV *S0 = SE.getSCEV(V0);
84 const SCEV *S1 = SE.getSCEV(V1);
85 const SCEV *S2 = SE.getSCEV(V2);
86
87 const SCEV *P0 = SE.getAddExpr(S0, S0);
88 const SCEV *P1 = SE.getAddExpr(S1, S1);
89 const SCEV *P2 = SE.getAddExpr(S2, S2);
90
91 const SCEVMulExpr *M0 = cast<SCEVMulExpr>(P0);
92 const SCEVMulExpr *M1 = cast<SCEVMulExpr>(P1);
93 const SCEVMulExpr *M2 = cast<SCEVMulExpr>(P2);
94
95 EXPECT_EQ(cast<SCEVConstant>(M0->getOperand(0))->getValue()->getZExtValue(),
96 2u);
97 EXPECT_EQ(cast<SCEVConstant>(M1->getOperand(0))->getValue()->getZExtValue(),
98 2u);
99 EXPECT_EQ(cast<SCEVConstant>(M2->getOperand(0))->getValue()->getZExtValue(),
100 2u);
101
102 // Before the RAUWs, these are all pointing to separate values.
103 EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
104 EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V1);
105 EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V2);
106
107 // Do some RAUWs.
108 V2->replaceAllUsesWith(V1);
109 V1->replaceAllUsesWith(V0);
110
111 // After the RAUWs, these should all be pointing to V0.
112 EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
113 EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V0);
114 EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V0);
115 }
116
TEST_F(ScalarEvolutionsTest,SimplifiedPHI)117 TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
118 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
119 std::vector<Type *>(), false);
120 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
121 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
122 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
123 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
124 BranchInst::Create(LoopBB, EntryBB);
125 BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
126 LoopBB);
127 ReturnInst::Create(Context, nullptr, ExitBB);
128 auto *Ty = Type::getInt32Ty(Context);
129 auto *PN = PHINode::Create(Ty, 2, "", &*LoopBB->begin());
130 PN->addIncoming(Constant::getNullValue(Ty), EntryBB);
131 PN->addIncoming(UndefValue::get(Ty), LoopBB);
132 ScalarEvolution SE = buildSE(*F);
133 auto *S1 = SE.getSCEV(PN);
134 auto *S2 = SE.getSCEV(PN);
135 auto *ZeroConst = SE.getConstant(Ty, 0);
136
137 // At some point, only the first call to getSCEV returned the simplified
138 // SCEVConstant and later calls just returned a SCEVUnknown referencing the
139 // PHI node.
140 EXPECT_EQ(S1, ZeroConst);
141 EXPECT_EQ(S1, S2);
142 }
143
TEST_F(ScalarEvolutionsTest,ExpandPtrTypeSCEV)144 TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
145 // It is to test the fix for PR30213. It exercises the branch in scev
146 // expansion when the value in ValueOffsetPair is a ptr and the offset
147 // is not divisible by the elem type size of value.
148 auto *I8Ty = Type::getInt8Ty(Context);
149 auto *I8PtrTy = Type::getInt8PtrTy(Context);
150 auto *I32Ty = Type::getInt32Ty(Context);
151 auto *I32PtrTy = Type::getInt32PtrTy(Context);
152 FunctionType *FTy =
153 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
154 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
155 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
156 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
157 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
158 BranchInst::Create(LoopBB, EntryBB);
159 ReturnInst::Create(Context, nullptr, ExitBB);
160
161 // loop: ; preds = %loop, %entry
162 // %alloca = alloca i32
163 // %gep0 = getelementptr i32, i32* %alloca, i32 1
164 // %bitcast1 = bitcast i32* %gep0 to i8*
165 // %gep1 = getelementptr i8, i8* %bitcast1, i32 1
166 // %gep2 = getelementptr i8, i8* undef, i32 1
167 // %cmp = icmp ult i8* undef, %bitcast1
168 // %select = select i1 %cmp, i8* %gep1, i8* %gep2
169 // %bitcast2 = bitcast i8* %select to i32*
170 // br i1 undef, label %loop, label %exit
171
172 const DataLayout &DL = F->getParent()->getDataLayout();
173 BranchInst *Br = BranchInst::Create(
174 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
175 AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
176 "alloca", Br);
177 ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
178 GetElementPtrInst *Gep0 =
179 GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
180 CastInst *CastA =
181 CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
182 GetElementPtrInst *Gep1 =
183 GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
184 GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
185 I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
186 CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
187 UndefValue::get(I8PtrTy), CastA, "cmp", Br);
188 SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
189 CastInst *CastB =
190 CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
191
192 ScalarEvolution SE = buildSE(*F);
193 auto *S = SE.getSCEV(CastB);
194 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
195 Value *V =
196 Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
197
198 // Expect the expansion code contains:
199 // %0 = bitcast i32* %bitcast2 to i8*
200 // %uglygep = getelementptr i8, i8* %0, i64 -1
201 // %1 = bitcast i8* %uglygep to i32*
202 EXPECT_TRUE(isa<BitCastInst>(V));
203 Instruction *Gep = cast<Instruction>(V)->getPrevNode();
204 EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
205 EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
206 EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
207 EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
208 }
209
getInstructionByName(Function & F,StringRef Name)210 static Instruction *getInstructionByName(Function &F, StringRef Name) {
211 for (auto &I : instructions(F))
212 if (I.getName() == Name)
213 return &I;
214 llvm_unreachable("Expected to find instruction!");
215 }
216
TEST_F(ScalarEvolutionsTest,CommutativeExprOperandOrder)217 TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
218 LLVMContext C;
219 SMDiagnostic Err;
220 std::unique_ptr<Module> M = parseAssemblyString(
221 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
222 " "
223 "@var_0 = external global i32, align 4"
224 "@var_1 = external global i32, align 4"
225 "@var_2 = external global i32, align 4"
226 " "
227 "declare i32 @unknown(i32, i32, i32)"
228 " "
229 "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
230 " local_unnamed_addr { "
231 "entry: "
232 " %entrycond = icmp sgt i32 %n, 0 "
233 " br i1 %entrycond, label %loop.ph, label %for.end "
234 " "
235 "loop.ph: "
236 " %a = load i32, i32* %A, align 4 "
237 " %b = load i32, i32* %B, align 4 "
238 " %mul = mul nsw i32 %b, %a "
239 " %iv0.init = getelementptr inbounds i8, i8* %arr, i32 %mul "
240 " br label %loop "
241 " "
242 "loop: "
243 " %iv0 = phi i8* [ %iv0.inc, %loop ], [ %iv0.init, %loop.ph ] "
244 " %iv1 = phi i32 [ %iv1.inc, %loop ], [ 0, %loop.ph ] "
245 " %conv = trunc i32 %iv1 to i8 "
246 " store i8 %conv, i8* %iv0, align 1 "
247 " %iv0.inc = getelementptr inbounds i8, i8* %iv0, i32 %b "
248 " %iv1.inc = add nuw nsw i32 %iv1, 1 "
249 " %exitcond = icmp eq i32 %iv1.inc, %n "
250 " br i1 %exitcond, label %for.end.loopexit, label %loop "
251 " "
252 "for.end.loopexit: "
253 " br label %for.end "
254 " "
255 "for.end: "
256 " ret void "
257 "} "
258 " "
259 "define void @f_2(i32* %X, i32* %Y, i32* %Z) { "
260 " %x = load i32, i32* %X "
261 " %y = load i32, i32* %Y "
262 " %z = load i32, i32* %Z "
263 " ret void "
264 "} "
265 " "
266 "define void @f_3() { "
267 " %x = load i32, i32* @var_0"
268 " %y = load i32, i32* @var_1"
269 " %z = load i32, i32* @var_2"
270 " ret void"
271 "} "
272 " "
273 "define void @f_4(i32 %a, i32 %b, i32 %c) { "
274 " %x = call i32 @unknown(i32 %a, i32 %b, i32 %c)"
275 " %y = call i32 @unknown(i32 %b, i32 %c, i32 %a)"
276 " %z = call i32 @unknown(i32 %c, i32 %a, i32 %b)"
277 " ret void"
278 "} "
279 ,
280 Err, C);
281
282 assert(M && "Could not parse module?");
283 assert(!verifyModule(*M) && "Must have been well formed!");
284
285 runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
286 auto *IV0 = getInstructionByName(F, "iv0");
287 auto *IV0Inc = getInstructionByName(F, "iv0.inc");
288
289 auto *FirstExprForIV0 = SE.getSCEV(IV0);
290 auto *FirstExprForIV0Inc = SE.getSCEV(IV0Inc);
291 auto *SecondExprForIV0 = SE.getSCEV(IV0);
292
293 EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0));
294 EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0Inc));
295 EXPECT_TRUE(isa<SCEVAddRecExpr>(SecondExprForIV0));
296 });
297
298 auto CheckCommutativeMulExprs = [&](ScalarEvolution &SE, const SCEV *A,
299 const SCEV *B, const SCEV *C) {
300 EXPECT_EQ(SE.getMulExpr(A, B), SE.getMulExpr(B, A));
301 EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B));
302 EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A));
303
304 SmallVector<const SCEV *, 3> Ops0 = {A, B, C};
305 SmallVector<const SCEV *, 3> Ops1 = {A, C, B};
306 SmallVector<const SCEV *, 3> Ops2 = {B, A, C};
307 SmallVector<const SCEV *, 3> Ops3 = {B, C, A};
308 SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
309 SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
310
311 auto *Mul0 = SE.getMulExpr(Ops0);
312 auto *Mul1 = SE.getMulExpr(Ops1);
313 auto *Mul2 = SE.getMulExpr(Ops2);
314 auto *Mul3 = SE.getMulExpr(Ops3);
315 auto *Mul4 = SE.getMulExpr(Ops4);
316 auto *Mul5 = SE.getMulExpr(Ops5);
317
318 EXPECT_EQ(Mul0, Mul1) << "Expected " << *Mul0 << " == " << *Mul1;
319 EXPECT_EQ(Mul1, Mul2) << "Expected " << *Mul1 << " == " << *Mul2;
320 EXPECT_EQ(Mul2, Mul3) << "Expected " << *Mul2 << " == " << *Mul3;
321 EXPECT_EQ(Mul3, Mul4) << "Expected " << *Mul3 << " == " << *Mul4;
322 EXPECT_EQ(Mul4, Mul5) << "Expected " << *Mul4 << " == " << *Mul5;
323 };
324
325 for (StringRef FuncName : {"f_2", "f_3", "f_4"})
326 runWithSE(
327 *M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
328 CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
329 SE.getSCEV(getInstructionByName(F, "y")),
330 SE.getSCEV(getInstructionByName(F, "z")));
331 });
332 }
333
TEST_F(ScalarEvolutionsTest,CompareSCEVComplexity)334 TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) {
335 FunctionType *FTy =
336 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
337 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
338 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
339 BasicBlock *LoopBB = BasicBlock::Create(Context, "bb1", F);
340 BranchInst::Create(LoopBB, EntryBB);
341
342 auto *Ty = Type::getInt32Ty(Context);
343 SmallVector<Instruction*, 8> Muls(8), Acc(8), NextAcc(8);
344
345 Acc[0] = PHINode::Create(Ty, 2, "", LoopBB);
346 Acc[1] = PHINode::Create(Ty, 2, "", LoopBB);
347 Acc[2] = PHINode::Create(Ty, 2, "", LoopBB);
348 Acc[3] = PHINode::Create(Ty, 2, "", LoopBB);
349 Acc[4] = PHINode::Create(Ty, 2, "", LoopBB);
350 Acc[5] = PHINode::Create(Ty, 2, "", LoopBB);
351 Acc[6] = PHINode::Create(Ty, 2, "", LoopBB);
352 Acc[7] = PHINode::Create(Ty, 2, "", LoopBB);
353
354 for (int i = 0; i < 20; i++) {
355 Muls[0] = BinaryOperator::CreateMul(Acc[0], Acc[0], "", LoopBB);
356 NextAcc[0] = BinaryOperator::CreateAdd(Muls[0], Acc[4], "", LoopBB);
357 Muls[1] = BinaryOperator::CreateMul(Acc[1], Acc[1], "", LoopBB);
358 NextAcc[1] = BinaryOperator::CreateAdd(Muls[1], Acc[5], "", LoopBB);
359 Muls[2] = BinaryOperator::CreateMul(Acc[2], Acc[2], "", LoopBB);
360 NextAcc[2] = BinaryOperator::CreateAdd(Muls[2], Acc[6], "", LoopBB);
361 Muls[3] = BinaryOperator::CreateMul(Acc[3], Acc[3], "", LoopBB);
362 NextAcc[3] = BinaryOperator::CreateAdd(Muls[3], Acc[7], "", LoopBB);
363
364 Muls[4] = BinaryOperator::CreateMul(Acc[4], Acc[4], "", LoopBB);
365 NextAcc[4] = BinaryOperator::CreateAdd(Muls[4], Acc[0], "", LoopBB);
366 Muls[5] = BinaryOperator::CreateMul(Acc[5], Acc[5], "", LoopBB);
367 NextAcc[5] = BinaryOperator::CreateAdd(Muls[5], Acc[1], "", LoopBB);
368 Muls[6] = BinaryOperator::CreateMul(Acc[6], Acc[6], "", LoopBB);
369 NextAcc[6] = BinaryOperator::CreateAdd(Muls[6], Acc[2], "", LoopBB);
370 Muls[7] = BinaryOperator::CreateMul(Acc[7], Acc[7], "", LoopBB);
371 NextAcc[7] = BinaryOperator::CreateAdd(Muls[7], Acc[3], "", LoopBB);
372 Acc = NextAcc;
373 }
374
375 auto II = LoopBB->begin();
376 for (int i = 0; i < 8; i++) {
377 PHINode *Phi = cast<PHINode>(&*II++);
378 Phi->addIncoming(Acc[i], LoopBB);
379 Phi->addIncoming(UndefValue::get(Ty), EntryBB);
380 }
381
382 BasicBlock *ExitBB = BasicBlock::Create(Context, "bb2", F);
383 BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
384 LoopBB);
385
386 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
387 Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
388 Acc[2] = BinaryOperator::CreateAdd(Acc[4], Acc[5], "", ExitBB);
389 Acc[3] = BinaryOperator::CreateAdd(Acc[6], Acc[7], "", ExitBB);
390 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
391 Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
392 Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
393
394 ReturnInst::Create(Context, nullptr, ExitBB);
395
396 ScalarEvolution SE = buildSE(*F);
397
398 EXPECT_NE(nullptr, SE.getSCEV(Acc[0]));
399 }
400
TEST_F(ScalarEvolutionsTest,CompareValueComplexity)401 TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
402 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(Context);
403 PointerType *IntPtrPtrTy = IntPtrTy->getPointerTo();
404
405 FunctionType *FTy =
406 FunctionType::get(Type::getVoidTy(Context), {IntPtrTy, IntPtrTy}, false);
407 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
408 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
409
410 Value *X = &*F->arg_begin();
411 Value *Y = &*std::next(F->arg_begin());
412
413 const int ValueDepth = 10;
414 for (int i = 0; i < ValueDepth; i++) {
415 X = new LoadInst(IntPtrTy, new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB),
416 "",
417 /*isVolatile*/ false, EntryBB);
418 Y = new LoadInst(IntPtrTy, new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB),
419 "",
420 /*isVolatile*/ false, EntryBB);
421 }
422
423 auto *MulA = BinaryOperator::CreateMul(X, Y, "", EntryBB);
424 auto *MulB = BinaryOperator::CreateMul(Y, X, "", EntryBB);
425 ReturnInst::Create(Context, nullptr, EntryBB);
426
427 // This test isn't checking for correctness. Today making A and B resolve to
428 // the same SCEV would require deeper searching in CompareValueComplexity,
429 // which will slow down compilation. However, this test can fail (with LLVM's
430 // behavior still being correct) if we ever have a smarter
431 // CompareValueComplexity that is both fast and more accurate.
432
433 ScalarEvolution SE = buildSE(*F);
434 auto *A = SE.getSCEV(MulA);
435 auto *B = SE.getSCEV(MulB);
436 EXPECT_NE(A, B);
437 }
438
TEST_F(ScalarEvolutionsTest,SCEVAddExpr)439 TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
440 Type *Ty32 = Type::getInt32Ty(Context);
441 Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32};
442
443 FunctionType *FTy =
444 FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
445 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
446
447 Argument *A1 = &*F->arg_begin();
448 Argument *A2 = &*(std::next(F->arg_begin()));
449 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
450
451 Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB);
452 Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB);
453 Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB);
454 Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB);
455 Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
456 // FIXME: The size of this is arbitrary and doesn't seem to change the
457 // result, but SCEV will do quadratic work for these so a large number here
458 // will be extremely slow. We should revisit what and how this is testing
459 // SCEV.
460 for (int i = 0; i < 10; i++) {
461 Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB);
462 Add1 = Add2;
463 Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
464 }
465
466 ReturnInst::Create(Context, nullptr, EntryBB);
467 ScalarEvolution SE = buildSE(*F);
468 EXPECT_NE(nullptr, SE.getSCEV(Mul1));
469 }
470
GetInstByName(Function & F,StringRef Name)471 static Instruction &GetInstByName(Function &F, StringRef Name) {
472 for (auto &I : instructions(F))
473 if (I.getName() == Name)
474 return I;
475 llvm_unreachable("Could not find instructions!");
476 }
477
TEST_F(ScalarEvolutionsTest,SCEVNormalization)478 TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
479 LLVMContext C;
480 SMDiagnostic Err;
481 std::unique_ptr<Module> M = parseAssemblyString(
482 "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
483 " "
484 "@var_0 = external global i32, align 4"
485 "@var_1 = external global i32, align 4"
486 "@var_2 = external global i32, align 4"
487 " "
488 "declare i32 @unknown(i32, i32, i32)"
489 " "
490 "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
491 " local_unnamed_addr { "
492 "entry: "
493 " br label %loop.ph "
494 " "
495 "loop.ph: "
496 " br label %loop "
497 " "
498 "loop: "
499 " %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
500 " %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
501 " %iv0.inc = add i32 %iv0, 1 "
502 " %iv1.inc = add i32 %iv1, 3 "
503 " br i1 undef, label %for.end.loopexit, label %loop "
504 " "
505 "for.end.loopexit: "
506 " ret void "
507 "} "
508 " "
509 "define void @f_2(i32 %a, i32 %b, i32 %c, i32 %d) "
510 " local_unnamed_addr { "
511 "entry: "
512 " br label %loop_0 "
513 " "
514 "loop_0: "
515 " br i1 undef, label %loop_0, label %loop_1 "
516 " "
517 "loop_1: "
518 " br i1 undef, label %loop_2, label %loop_1 "
519 " "
520 " "
521 "loop_2: "
522 " br i1 undef, label %end, label %loop_2 "
523 " "
524 "end: "
525 " ret void "
526 "} "
527 ,
528 Err, C);
529
530 assert(M && "Could not parse module?");
531 assert(!verifyModule(*M) && "Must have been well formed!");
532
533 runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
534 auto &I0 = GetInstByName(F, "iv0");
535 auto &I1 = *I0.getNextNode();
536
537 auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0));
538 PostIncLoopSet Loops;
539 Loops.insert(S0->getLoop());
540 auto *N0 = normalizeForPostIncUse(S0, Loops, SE);
541 auto *D0 = denormalizeForPostIncUse(N0, Loops, SE);
542 EXPECT_EQ(S0, D0) << *S0 << " " << *D0;
543
544 auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1));
545 Loops.clear();
546 Loops.insert(S1->getLoop());
547 auto *N1 = normalizeForPostIncUse(S1, Loops, SE);
548 auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
549 EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
550 });
551
552 runWithSE(*M, "f_2", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
553 auto *L2 = *LI.begin();
554 auto *L1 = *std::next(LI.begin());
555 auto *L0 = *std::next(LI.begin(), 2);
556
557 auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
558 SmallVector<const SCEV *, 4> OpsCopy(Ops);
559 return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
560 };
561
562 auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
563 SmallVector<const SCEV *, 4> OpsCopy(Ops);
564 return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
565 };
566
567 // We first populate the AddRecs vector with a few "interesting" SCEV
568 // expressions, and then we go through the list and assert that each
569 // expression in it has an invertible normalization.
570
571 std::vector<const SCEV *> Exprs;
572 {
573 const SCEV *V0 = SE.getSCEV(&*F.arg_begin());
574 const SCEV *V1 = SE.getSCEV(&*std::next(F.arg_begin(), 1));
575 const SCEV *V2 = SE.getSCEV(&*std::next(F.arg_begin(), 2));
576 const SCEV *V3 = SE.getSCEV(&*std::next(F.arg_begin(), 3));
577
578 Exprs.push_back(GetAddRec(L0, {V0})); // 0
579 Exprs.push_back(GetAddRec(L0, {V0, V1})); // 1
580 Exprs.push_back(GetAddRec(L0, {V0, V1, V2})); // 2
581 Exprs.push_back(GetAddRec(L0, {V0, V1, V2, V3})); // 3
582
583 Exprs.push_back(
584 GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[3], Exprs[0]})); // 4
585 Exprs.push_back(
586 GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[0], Exprs[3]})); // 5
587 Exprs.push_back(
588 GetAddRec(L1, {Exprs[1], Exprs[3], Exprs[3], Exprs[1]})); // 6
589
590 Exprs.push_back(GetAdd({Exprs[6], Exprs[3], V2})); // 7
591
592 Exprs.push_back(
593 GetAddRec(L2, {Exprs[4], Exprs[3], Exprs[3], Exprs[5]})); // 8
594
595 Exprs.push_back(
596 GetAddRec(L2, {Exprs[4], Exprs[6], Exprs[7], Exprs[3], V0})); // 9
597 }
598
599 std::vector<PostIncLoopSet> LoopSets;
600 for (int i = 0; i < 8; i++) {
601 LoopSets.emplace_back();
602 if (i & 1)
603 LoopSets.back().insert(L0);
604 if (i & 2)
605 LoopSets.back().insert(L1);
606 if (i & 4)
607 LoopSets.back().insert(L2);
608 }
609
610 for (const auto &LoopSet : LoopSets)
611 for (auto *S : Exprs) {
612 {
613 auto *N = llvm::normalizeForPostIncUse(S, LoopSet, SE);
614 auto *D = llvm::denormalizeForPostIncUse(N, LoopSet, SE);
615
616 // Normalization and then denormalizing better give us back the same
617 // value.
618 EXPECT_EQ(S, D) << "S = " << *S << " D = " << *D << " N = " << *N;
619 }
620 {
621 auto *D = llvm::denormalizeForPostIncUse(S, LoopSet, SE);
622 auto *N = llvm::normalizeForPostIncUse(D, LoopSet, SE);
623
624 // Denormalization and then normalizing better give us back the same
625 // value.
626 EXPECT_EQ(S, N) << "S = " << *S << " N = " << *N;
627 }
628 }
629 });
630 }
631
632 // Expect the call of getZeroExtendExpr will not cost exponential time.
TEST_F(ScalarEvolutionsTest,SCEVZeroExtendExpr)633 TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
634 LLVMContext C;
635 SMDiagnostic Err;
636
637 // Generate a function like below:
638 // define void @foo() {
639 // entry:
640 // br label %for.cond
641 //
642 // for.cond:
643 // %0 = phi i64 [ 100, %entry ], [ %dec, %for.inc ]
644 // %cmp = icmp sgt i64 %0, 90
645 // br i1 %cmp, label %for.inc, label %for.cond1
646 //
647 // for.inc:
648 // %dec = add nsw i64 %0, -1
649 // br label %for.cond
650 //
651 // for.cond1:
652 // %1 = phi i64 [ 100, %for.cond ], [ %dec5, %for.inc2 ]
653 // %cmp3 = icmp sgt i64 %1, 90
654 // br i1 %cmp3, label %for.inc2, label %for.cond4
655 //
656 // for.inc2:
657 // %dec5 = add nsw i64 %1, -1
658 // br label %for.cond1
659 //
660 // ......
661 //
662 // for.cond89:
663 // %19 = phi i64 [ 100, %for.cond84 ], [ %dec94, %for.inc92 ]
664 // %cmp93 = icmp sgt i64 %19, 90
665 // br i1 %cmp93, label %for.inc92, label %for.end
666 //
667 // for.inc92:
668 // %dec94 = add nsw i64 %19, -1
669 // br label %for.cond89
670 //
671 // for.end:
672 // %gep = getelementptr i8, i8* null, i64 %dec
673 // %gep6 = getelementptr i8, i8* %gep, i64 %dec5
674 // ......
675 // %gep95 = getelementptr i8, i8* %gep91, i64 %dec94
676 // ret void
677 // }
678 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false);
679 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", M);
680
681 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
682 BasicBlock *CondBB = BasicBlock::Create(Context, "for.cond", F);
683 BasicBlock *EndBB = BasicBlock::Create(Context, "for.end", F);
684 BranchInst::Create(CondBB, EntryBB);
685 BasicBlock *PrevBB = EntryBB;
686
687 Type *I64Ty = Type::getInt64Ty(Context);
688 Type *I8Ty = Type::getInt8Ty(Context);
689 Type *I8PtrTy = Type::getInt8PtrTy(Context);
690 Value *Accum = Constant::getNullValue(I8PtrTy);
691 int Iters = 20;
692 for (int i = 0; i < Iters; i++) {
693 BasicBlock *IncBB = BasicBlock::Create(Context, "for.inc", F, EndBB);
694 auto *PN = PHINode::Create(I64Ty, 2, "", CondBB);
695 PN->addIncoming(ConstantInt::get(Context, APInt(64, 100)), PrevBB);
696 auto *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_SGT, PN,
697 ConstantInt::get(Context, APInt(64, 90)), "cmp",
698 CondBB);
699 BasicBlock *NextBB;
700 if (i != Iters - 1)
701 NextBB = BasicBlock::Create(Context, "for.cond", F, EndBB);
702 else
703 NextBB = EndBB;
704 BranchInst::Create(IncBB, NextBB, Cmp, CondBB);
705 auto *Dec = BinaryOperator::CreateNSWAdd(
706 PN, ConstantInt::get(Context, APInt(64, -1)), "dec", IncBB);
707 PN->addIncoming(Dec, IncBB);
708 BranchInst::Create(CondBB, IncBB);
709
710 Accum = GetElementPtrInst::Create(I8Ty, Accum, PN, "gep", EndBB);
711
712 PrevBB = CondBB;
713 CondBB = NextBB;
714 }
715 ReturnInst::Create(Context, nullptr, EndBB);
716 ScalarEvolution SE = buildSE(*F);
717 const SCEV *S = SE.getSCEV(Accum);
718 Type *I128Ty = Type::getInt128Ty(Context);
719 SE.getZeroExtendExpr(S, I128Ty);
720 }
721
722 // Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
TEST_F(ScalarEvolutionsTest,SCEVZeroExtendExprNonIntegral)723 TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) {
724 /*
725 * Create the following code:
726 * func(i64 addrspace(10)* %arg)
727 * top:
728 * br label %L.ph
729 * L.ph:
730 * br label %L
731 * L:
732 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
733 * %add = add i64 %phi2, 1
734 * br i1 undef, label %post, label %L2
735 * post:
736 * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
737 * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
738 * ret void
739 *
740 * We will create the appropriate SCEV expression for %gep and expand it,
741 * then check that no inttoptr/ptrtoint instructions got inserted.
742 */
743
744 // Create a module with non-integral pointers in it's datalayout
745 Module NIM("nonintegral", Context);
746 std::string DataLayout = M.getDataLayoutStr();
747 if (!DataLayout.empty())
748 DataLayout += "-";
749 DataLayout += "ni:10";
750 NIM.setDataLayout(DataLayout);
751
752 Type *T_int1 = Type::getInt1Ty(Context);
753 Type *T_int64 = Type::getInt64Ty(Context);
754 Type *T_pint64 = T_int64->getPointerTo(10);
755
756 FunctionType *FTy =
757 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
758 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
759
760 Argument *Arg = &*F->arg_begin();
761
762 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
763 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
764 BasicBlock *L = BasicBlock::Create(Context, "L", F);
765 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
766
767 IRBuilder<> Builder(Top);
768 Builder.CreateBr(LPh);
769
770 Builder.SetInsertPoint(LPh);
771 Builder.CreateBr(L);
772
773 Builder.SetInsertPoint(L);
774 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
775 Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
776 Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
777 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
778 Phi->addIncoming(Add, L);
779
780 Builder.SetInsertPoint(Post);
781 Value *GepBase =
782 Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
783 Instruction *Ret = Builder.CreateRetVoid();
784
785 ScalarEvolution SE = buildSE(*F);
786 auto *AddRec =
787 SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
788 LI->getLoopFor(L), SCEV::FlagNUW);
789
790 SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
791 Exp.disableCanonicalMode();
792 Exp.expandCodeFor(AddRec, T_pint64, Ret);
793
794 // Make sure none of the instructions inserted were inttoptr/ptrtoint.
795 // The verifier will check this.
796 EXPECT_FALSE(verifyFunction(*F, &errs()));
797 }
798
799 // Make sure that SCEV invalidates exit limits after invalidating the values it
800 // depends on when we forget a loop.
TEST_F(ScalarEvolutionsTest,SCEVExitLimitForgetLoop)801 TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
802 /*
803 * Create the following code:
804 * func(i64 addrspace(10)* %arg)
805 * top:
806 * br label %L.ph
807 * L.ph:
808 * br label %L
809 * L:
810 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
811 * %add = add i64 %phi2, 1
812 * %cond = icmp slt i64 %add, 1000; then becomes 2000.
813 * br i1 %cond, label %post, label %L2
814 * post:
815 * ret void
816 *
817 */
818
819 // Create a module with non-integral pointers in it's datalayout
820 Module NIM("nonintegral", Context);
821 std::string DataLayout = M.getDataLayoutStr();
822 if (!DataLayout.empty())
823 DataLayout += "-";
824 DataLayout += "ni:10";
825 NIM.setDataLayout(DataLayout);
826
827 Type *T_int64 = Type::getInt64Ty(Context);
828 Type *T_pint64 = T_int64->getPointerTo(10);
829
830 FunctionType *FTy =
831 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
832 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
833
834 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
835 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
836 BasicBlock *L = BasicBlock::Create(Context, "L", F);
837 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
838
839 IRBuilder<> Builder(Top);
840 Builder.CreateBr(LPh);
841
842 Builder.SetInsertPoint(LPh);
843 Builder.CreateBr(L);
844
845 Builder.SetInsertPoint(L);
846 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
847 auto *Add = cast<Instruction>(
848 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
849 auto *Limit = ConstantInt::get(T_int64, 1000);
850 auto *Cond = cast<Instruction>(
851 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
852 auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
853 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
854 Phi->addIncoming(Add, L);
855
856 Builder.SetInsertPoint(Post);
857 Builder.CreateRetVoid();
858
859 ScalarEvolution SE = buildSE(*F);
860 auto *Loop = LI->getLoopFor(L);
861 const SCEV *EC = SE.getBackedgeTakenCount(Loop);
862 EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
863 EXPECT_TRUE(isa<SCEVConstant>(EC));
864 EXPECT_EQ(cast<SCEVConstant>(EC)->getAPInt().getLimitedValue(), 999u);
865
866 // The add recurrence {5,+,1} does not correspond to any PHI in the IR, and
867 // that is relevant to this test.
868 auto *Five = SE.getConstant(APInt(/*numBits=*/64, 5));
869 auto *AR =
870 SE.getAddRecExpr(Five, SE.getOne(T_int64), Loop, SCEV::FlagAnyWrap);
871 const SCEV *ARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
872 EXPECT_FALSE(isa<SCEVCouldNotCompute>(ARAtLoopExit));
873 EXPECT_TRUE(isa<SCEVConstant>(ARAtLoopExit));
874 EXPECT_EQ(cast<SCEVConstant>(ARAtLoopExit)->getAPInt().getLimitedValue(),
875 1004u);
876
877 SE.forgetLoop(Loop);
878 Br->eraseFromParent();
879 Cond->eraseFromParent();
880
881 Builder.SetInsertPoint(L);
882 auto *NewCond = Builder.CreateICmp(
883 ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
884 Builder.CreateCondBr(NewCond, L, Post);
885 const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
886 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
887 EXPECT_TRUE(isa<SCEVConstant>(NewEC));
888 EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
889 const SCEV *NewARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
890 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewARAtLoopExit));
891 EXPECT_TRUE(isa<SCEVConstant>(NewARAtLoopExit));
892 EXPECT_EQ(cast<SCEVConstant>(NewARAtLoopExit)->getAPInt().getLimitedValue(),
893 2004u);
894 }
895
896 // Make sure that SCEV invalidates exit limits after invalidating the values it
897 // depends on when we forget a value.
TEST_F(ScalarEvolutionsTest,SCEVExitLimitForgetValue)898 TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetValue) {
899 /*
900 * Create the following code:
901 * func(i64 addrspace(10)* %arg)
902 * top:
903 * br label %L.ph
904 * L.ph:
905 * %load = load i64 addrspace(10)* %arg
906 * br label %L
907 * L:
908 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
909 * %add = add i64 %phi2, 1
910 * %cond = icmp slt i64 %add, %load ; then becomes 2000.
911 * br i1 %cond, label %post, label %L2
912 * post:
913 * ret void
914 *
915 */
916
917 // Create a module with non-integral pointers in it's datalayout
918 Module NIM("nonintegral", Context);
919 std::string DataLayout = M.getDataLayoutStr();
920 if (!DataLayout.empty())
921 DataLayout += "-";
922 DataLayout += "ni:10";
923 NIM.setDataLayout(DataLayout);
924
925 Type *T_int64 = Type::getInt64Ty(Context);
926 Type *T_pint64 = T_int64->getPointerTo(10);
927
928 FunctionType *FTy =
929 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
930 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
931
932 Argument *Arg = &*F->arg_begin();
933
934 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
935 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
936 BasicBlock *L = BasicBlock::Create(Context, "L", F);
937 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
938
939 IRBuilder<> Builder(Top);
940 Builder.CreateBr(LPh);
941
942 Builder.SetInsertPoint(LPh);
943 auto *Load = cast<Instruction>(Builder.CreateLoad(T_int64, Arg, "load"));
944 Builder.CreateBr(L);
945
946 Builder.SetInsertPoint(L);
947 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
948 auto *Add = cast<Instruction>(
949 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
950 auto *Cond = cast<Instruction>(
951 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Load, "cond"));
952 auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
953 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
954 Phi->addIncoming(Add, L);
955
956 Builder.SetInsertPoint(Post);
957 Builder.CreateRetVoid();
958
959 ScalarEvolution SE = buildSE(*F);
960 auto *Loop = LI->getLoopFor(L);
961 const SCEV *EC = SE.getBackedgeTakenCount(Loop);
962 EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
963 EXPECT_FALSE(isa<SCEVConstant>(EC));
964
965 SE.forgetValue(Load);
966 Br->eraseFromParent();
967 Cond->eraseFromParent();
968 Load->eraseFromParent();
969
970 Builder.SetInsertPoint(L);
971 auto *NewCond = Builder.CreateICmp(
972 ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
973 Builder.CreateCondBr(NewCond, L, Post);
974 const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
975 EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
976 EXPECT_TRUE(isa<SCEVConstant>(NewEC));
977 EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
978 }
979
TEST_F(ScalarEvolutionsTest,SCEVAddRecFromPHIwithLargeConstants)980 TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstants) {
981 // Reference: https://reviews.llvm.org/D37265
982 // Make sure that SCEV does not blow up when constructing an AddRec
983 // with predicates for a phi with the update pattern:
984 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
985 // when either the initial value of the Phi or the InvariantAccum are
986 // constants that are too large to fit in an ix but are zero when truncated to
987 // ix.
988 FunctionType *FTy =
989 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
990 Function *F =
991 Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M);
992
993 /*
994 Create IR:
995 entry:
996 br label %loop
997 loop:
998 %0 = phi i64 [-9223372036854775808, %entry], [%3, %loop]
999 %1 = shl i64 %0, 32
1000 %2 = ashr exact i64 %1, 32
1001 %3 = add i64 %2, -9223372036854775808
1002 br i1 undef, label %exit, label %loop
1003 exit:
1004 ret void
1005 */
1006 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
1007 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
1008 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
1009
1010 // entry:
1011 BranchInst::Create(LoopBB, EntryBB);
1012 // loop:
1013 auto *MinInt64 =
1014 ConstantInt::get(Context, APInt(64, 0x8000000000000000U, true));
1015 auto *Int64_32 = ConstantInt::get(Context, APInt(64, 32));
1016 auto *Br = BranchInst::Create(
1017 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
1018 auto *Phi = PHINode::Create(Type::getInt64Ty(Context), 2, "", Br);
1019 auto *Shl = BinaryOperator::CreateShl(Phi, Int64_32, "", Br);
1020 auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int64_32, "", Br);
1021 auto *Add = BinaryOperator::CreateAdd(AShr, MinInt64, "", Br);
1022 Phi->addIncoming(MinInt64, EntryBB);
1023 Phi->addIncoming(Add, LoopBB);
1024 // exit:
1025 ReturnInst::Create(Context, nullptr, ExitBB);
1026
1027 // Make sure that SCEV doesn't blow up
1028 ScalarEvolution SE = buildSE(*F);
1029 SCEVUnionPredicate Preds;
1030 const SCEV *Expr = SE.getSCEV(Phi);
1031 EXPECT_NE(nullptr, Expr);
1032 EXPECT_TRUE(isa<SCEVUnknown>(Expr));
1033 auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
1034 }
1035
TEST_F(ScalarEvolutionsTest,SCEVAddRecFromPHIwithLargeConstantAccum)1036 TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstantAccum) {
1037 // Make sure that SCEV does not blow up when constructing an AddRec
1038 // with predicates for a phi with the update pattern:
1039 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
1040 // when the InvariantAccum is a constant that is too large to fit in an
1041 // ix but are zero when truncated to ix, and the initial value of the
1042 // phi is not a constant.
1043 Type *Int32Ty = Type::getInt32Ty(Context);
1044 SmallVector<Type *, 1> Types;
1045 Types.push_back(Int32Ty);
1046 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
1047 Function *F =
1048 Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M);
1049
1050 /*
1051 Create IR:
1052 define @addrecphitest(i32)
1053 entry:
1054 br label %loop
1055 loop:
1056 %1 = phi i32 [%0, %entry], [%4, %loop]
1057 %2 = shl i32 %1, 16
1058 %3 = ashr exact i32 %2, 16
1059 %4 = add i32 %3, -2147483648
1060 br i1 undef, label %exit, label %loop
1061 exit:
1062 ret void
1063 */
1064 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
1065 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
1066 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
1067
1068 // entry:
1069 BranchInst::Create(LoopBB, EntryBB);
1070 // loop:
1071 auto *MinInt32 = ConstantInt::get(Context, APInt(32, 0x80000000U, true));
1072 auto *Int32_16 = ConstantInt::get(Context, APInt(32, 16));
1073 auto *Br = BranchInst::Create(
1074 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
1075 auto *Phi = PHINode::Create(Int32Ty, 2, "", Br);
1076 auto *Shl = BinaryOperator::CreateShl(Phi, Int32_16, "", Br);
1077 auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int32_16, "", Br);
1078 auto *Add = BinaryOperator::CreateAdd(AShr, MinInt32, "", Br);
1079 auto *Arg = &*(F->arg_begin());
1080 Phi->addIncoming(Arg, EntryBB);
1081 Phi->addIncoming(Add, LoopBB);
1082 // exit:
1083 ReturnInst::Create(Context, nullptr, ExitBB);
1084
1085 // Make sure that SCEV doesn't blow up
1086 ScalarEvolution SE = buildSE(*F);
1087 SCEVUnionPredicate Preds;
1088 const SCEV *Expr = SE.getSCEV(Phi);
1089 EXPECT_NE(nullptr, Expr);
1090 EXPECT_TRUE(isa<SCEVUnknown>(Expr));
1091 auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
1092 }
1093
TEST_F(ScalarEvolutionsTest,SCEVFoldSumOfTruncs)1094 TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
1095 // Verify that the following SCEV gets folded to a zero:
1096 // (-1 * (trunc i64 (-1 * %0) to i32)) + (-1 * (trunc i64 %0 to i32)
1097 Type *ArgTy = Type::getInt64Ty(Context);
1098 Type *Int32Ty = Type::getInt32Ty(Context);
1099 SmallVector<Type *, 1> Types;
1100 Types.push_back(ArgTy);
1101 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
1102 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
1103 BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
1104 ReturnInst::Create(Context, nullptr, BB);
1105
1106 ScalarEvolution SE = buildSE(*F);
1107
1108 auto *Arg = &*(F->arg_begin());
1109 const auto *ArgSCEV = SE.getSCEV(Arg);
1110
1111 // Build the SCEV
1112 const auto *A0 = SE.getNegativeSCEV(ArgSCEV);
1113 const auto *A1 = SE.getTruncateExpr(A0, Int32Ty);
1114 const auto *A = SE.getNegativeSCEV(A1);
1115
1116 const auto *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty);
1117 const auto *B = SE.getNegativeSCEV(B0);
1118
1119 const auto *Expr = SE.getAddExpr(A, B);
1120 // Verify that the SCEV was folded to 0
1121 const auto *ZeroConst = SE.getConstant(Int32Ty, 0);
1122 EXPECT_EQ(Expr, ZeroConst);
1123 }
1124
1125 // Check that we can correctly identify the points at which the SCEV of the
1126 // AddRec can be expanded.
TEST_F(ScalarEvolutionsTest,SCEVExpanderIsSafeToExpandAt)1127 TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) {
1128 /*
1129 * Create the following code:
1130 * func(i64 addrspace(10)* %arg)
1131 * top:
1132 * br label %L.ph
1133 * L.ph:
1134 * br label %L
1135 * L:
1136 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
1137 * %add = add i64 %phi2, 1
1138 * %cond = icmp slt i64 %add, 1000; then becomes 2000.
1139 * br i1 %cond, label %post, label %L2
1140 * post:
1141 * ret void
1142 *
1143 */
1144
1145 // Create a module with non-integral pointers in it's datalayout
1146 Module NIM("nonintegral", Context);
1147 std::string DataLayout = M.getDataLayoutStr();
1148 if (!DataLayout.empty())
1149 DataLayout += "-";
1150 DataLayout += "ni:10";
1151 NIM.setDataLayout(DataLayout);
1152
1153 Type *T_int64 = Type::getInt64Ty(Context);
1154 Type *T_pint64 = T_int64->getPointerTo(10);
1155
1156 FunctionType *FTy =
1157 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
1158 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
1159
1160 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
1161 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
1162 BasicBlock *L = BasicBlock::Create(Context, "L", F);
1163 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
1164
1165 IRBuilder<> Builder(Top);
1166 Builder.CreateBr(LPh);
1167
1168 Builder.SetInsertPoint(LPh);
1169 Builder.CreateBr(L);
1170
1171 Builder.SetInsertPoint(L);
1172 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
1173 auto *Add = cast<Instruction>(
1174 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
1175 auto *Limit = ConstantInt::get(T_int64, 1000);
1176 auto *Cond = cast<Instruction>(
1177 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
1178 Builder.CreateCondBr(Cond, L, Post);
1179 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
1180 Phi->addIncoming(Add, L);
1181
1182 Builder.SetInsertPoint(Post);
1183 Builder.CreateRetVoid();
1184
1185 ScalarEvolution SE = buildSE(*F);
1186 const SCEV *S = SE.getSCEV(Phi);
1187 EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
1188 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
1189 EXPECT_TRUE(AR->isAffine());
1190 EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
1191 EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
1192 EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
1193 EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
1194 }
1195
1196 // Check that SCEV expander does not use the nuw instruction
1197 // for expansion.
TEST_F(ScalarEvolutionsTest,SCEVExpanderNUW)1198 TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) {
1199 /*
1200 * Create the following code:
1201 * func(i64 %a)
1202 * entry:
1203 * br false, label %exit, label %body
1204 * body:
1205 * %s1 = add i64 %a, -1
1206 * br label %exit
1207 * exit:
1208 * %s = add nuw i64 %a, -1
1209 * ret %s
1210 */
1211
1212 // Create a module.
1213 Module M("SCEVExpanderNUW", Context);
1214
1215 Type *T_int64 = Type::getInt64Ty(Context);
1216
1217 FunctionType *FTy =
1218 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1219 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1220 Argument *Arg = &*F->arg_begin();
1221 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1222
1223 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1224 BasicBlock *Body = BasicBlock::Create(Context, "body", F);
1225 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1226
1227 IRBuilder<> Builder(Entry);
1228 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
1229 Builder.CreateCondBr(Cond, Exit, Body);
1230
1231 Builder.SetInsertPoint(Body);
1232 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1233 Builder.CreateBr(Exit);
1234
1235 Builder.SetInsertPoint(Exit);
1236 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1237 S2->setHasNoUnsignedWrap(true);
1238 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1239
1240 ScalarEvolution SE = buildSE(*F);
1241 const SCEV *S = SE.getSCEV(S1);
1242 EXPECT_TRUE(isa<SCEVAddExpr>(S));
1243 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1244 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
1245 EXPECT_FALSE(I->hasNoUnsignedWrap());
1246 }
1247
1248 // Check that SCEV expander does not use the nsw instruction
1249 // for expansion.
TEST_F(ScalarEvolutionsTest,SCEVExpanderNSW)1250 TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) {
1251 /*
1252 * Create the following code:
1253 * func(i64 %a)
1254 * entry:
1255 * br false, label %exit, label %body
1256 * body:
1257 * %s1 = add i64 %a, -1
1258 * br label %exit
1259 * exit:
1260 * %s = add nsw i64 %a, -1
1261 * ret %s
1262 */
1263
1264 // Create a module.
1265 Module M("SCEVExpanderNSW", Context);
1266
1267 Type *T_int64 = Type::getInt64Ty(Context);
1268
1269 FunctionType *FTy =
1270 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1271 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1272 Argument *Arg = &*F->arg_begin();
1273 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1274
1275 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1276 BasicBlock *Body = BasicBlock::Create(Context, "body", F);
1277 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1278
1279 IRBuilder<> Builder(Entry);
1280 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
1281 Builder.CreateCondBr(Cond, Exit, Body);
1282
1283 Builder.SetInsertPoint(Body);
1284 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1285 Builder.CreateBr(Exit);
1286
1287 Builder.SetInsertPoint(Exit);
1288 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1289 S2->setHasNoSignedWrap(true);
1290 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1291
1292 ScalarEvolution SE = buildSE(*F);
1293 const SCEV *S = SE.getSCEV(S1);
1294 EXPECT_TRUE(isa<SCEVAddExpr>(S));
1295 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1296 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
1297 EXPECT_FALSE(I->hasNoSignedWrap());
1298 }
1299
1300 // Check that SCEV does not save the SCEV -> V
1301 // mapping of SCEV differ from V in NUW flag.
TEST_F(ScalarEvolutionsTest,SCEVCacheNUW)1302 TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) {
1303 /*
1304 * Create the following code:
1305 * func(i64 %a)
1306 * entry:
1307 * %s1 = add i64 %a, -1
1308 * %s2 = add nuw i64 %a, -1
1309 * br label %exit
1310 * exit:
1311 * ret %s
1312 */
1313
1314 // Create a module.
1315 Module M("SCEVCacheNUW", Context);
1316
1317 Type *T_int64 = Type::getInt64Ty(Context);
1318
1319 FunctionType *FTy =
1320 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1321 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1322 Argument *Arg = &*F->arg_begin();
1323 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1324
1325 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1326 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1327
1328 IRBuilder<> Builder(Entry);
1329 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1330 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1331 S2->setHasNoUnsignedWrap(true);
1332 Builder.CreateBr(Exit);
1333
1334 Builder.SetInsertPoint(Exit);
1335 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1336
1337 ScalarEvolution SE = buildSE(*F);
1338 // Get S2 first to move it to cache.
1339 const SCEV *SC2 = SE.getSCEV(S2);
1340 EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
1341 // Now get S1.
1342 const SCEV *SC1 = SE.getSCEV(S1);
1343 EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
1344 // Expand for S1, it should use S1 not S2 in spite S2
1345 // first in the cache.
1346 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1347 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
1348 EXPECT_FALSE(I->hasNoUnsignedWrap());
1349 }
1350
1351 // Check that SCEV does not save the SCEV -> V
1352 // mapping of SCEV differ from V in NSW flag.
TEST_F(ScalarEvolutionsTest,SCEVCacheNSW)1353 TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) {
1354 /*
1355 * Create the following code:
1356 * func(i64 %a)
1357 * entry:
1358 * %s1 = add i64 %a, -1
1359 * %s2 = add nsw i64 %a, -1
1360 * br label %exit
1361 * exit:
1362 * ret %s
1363 */
1364
1365 // Create a module.
1366 Module M("SCEVCacheNUW", Context);
1367
1368 Type *T_int64 = Type::getInt64Ty(Context);
1369
1370 FunctionType *FTy =
1371 FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
1372 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1373 Argument *Arg = &*F->arg_begin();
1374 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
1375
1376 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1377 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1378
1379 IRBuilder<> Builder(Entry);
1380 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1381 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
1382 S2->setHasNoSignedWrap(true);
1383 Builder.CreateBr(Exit);
1384
1385 Builder.SetInsertPoint(Exit);
1386 auto *R = cast<Instruction>(Builder.CreateRetVoid());
1387
1388 ScalarEvolution SE = buildSE(*F);
1389 // Get S2 first to move it to cache.
1390 const SCEV *SC2 = SE.getSCEV(S2);
1391 EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
1392 // Now get S1.
1393 const SCEV *SC1 = SE.getSCEV(S1);
1394 EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
1395 // Expand for S1, it should use S1 not S2 in spite S2
1396 // first in the cache.
1397 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
1398 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
1399 EXPECT_FALSE(I->hasNoSignedWrap());
1400 }
1401
1402 // Check logic of SCEV expression size computation.
TEST_F(ScalarEvolutionsTest,SCEVComputeExpressionSize)1403 TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
1404 /*
1405 * Create the following code:
1406 * void func(i64 %a, i64 %b)
1407 * entry:
1408 * %s1 = add i64 %a, 1
1409 * %s2 = udiv i64 %s1, %b
1410 * br label %exit
1411 * exit:
1412 * ret
1413 */
1414
1415 // Create a module.
1416 Module M("SCEVComputeExpressionSize", Context);
1417
1418 Type *T_int64 = Type::getInt64Ty(Context);
1419
1420 FunctionType *FTy =
1421 FunctionType::get(Type::getVoidTy(Context), { T_int64, T_int64 }, false);
1422 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
1423 Argument *A = &*F->arg_begin();
1424 Argument *B = &*std::next(F->arg_begin());
1425 ConstantInt *C = ConstantInt::get(Context, APInt(64, 1));
1426
1427 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
1428 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
1429
1430 IRBuilder<> Builder(Entry);
1431 auto *S1 = cast<Instruction>(Builder.CreateAdd(A, C, "s1"));
1432 auto *S2 = cast<Instruction>(Builder.CreateUDiv(S1, B, "s2"));
1433 Builder.CreateBr(Exit);
1434
1435 Builder.SetInsertPoint(Exit);
1436 Builder.CreateRetVoid();
1437
1438 ScalarEvolution SE = buildSE(*F);
1439 // Get S2 first to move it to cache.
1440 const SCEV *AS = SE.getSCEV(A);
1441 const SCEV *BS = SE.getSCEV(B);
1442 const SCEV *CS = SE.getSCEV(C);
1443 const SCEV *S1S = SE.getSCEV(S1);
1444 const SCEV *S2S = SE.getSCEV(S2);
1445 EXPECT_EQ(AS->getExpressionSize(), 1u);
1446 EXPECT_EQ(BS->getExpressionSize(), 1u);
1447 EXPECT_EQ(CS->getExpressionSize(), 1u);
1448 EXPECT_EQ(S1S->getExpressionSize(), 3u);
1449 EXPECT_EQ(S2S->getExpressionSize(), 5u);
1450 }
1451
TEST_F(ScalarEvolutionsTest,SCEVExpandInsertCanonicalIV)1452 TEST_F(ScalarEvolutionsTest, SCEVExpandInsertCanonicalIV) {
1453 LLVMContext C;
1454 SMDiagnostic Err;
1455
1456 // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
1457 // SCEVExpander will insert one.
1458 auto TestNoCanonicalIV = [&](
1459 std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
1460 std::unique_ptr<Module> M =
1461 parseAssemblyString("define i32 @test(i32 %limit) { "
1462 "entry: "
1463 " br label %loop "
1464 "loop: "
1465 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1466 " %i.inc = add nsw i32 %i, 1 "
1467 " %cont = icmp slt i32 %i.inc, %limit "
1468 " br i1 %cont, label %loop, label %exit "
1469 "exit: "
1470 " ret i32 %i.inc "
1471 "}",
1472 Err, C);
1473
1474 assert(M && "Could not parse module?");
1475 assert(!verifyModule(*M) && "Must have been well formed!");
1476
1477 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1478 auto &I = GetInstByName(F, "i");
1479 auto *Loop = LI.getLoopFor(I.getParent());
1480 EXPECT_FALSE(Loop->getCanonicalInductionVariable());
1481
1482 auto *AR = GetAddRec(SE, Loop);
1483 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
1484
1485 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1486 auto *InsertAt = I.getNextNode();
1487 Exp.expandCodeFor(AR, nullptr, InsertAt);
1488 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
1489 unsigned CanonicalIVBitWidth =
1490 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
1491 EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1492 });
1493 };
1494
1495 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
1496 // which is narrower than addrec type.
1497 // SCEVExpander will insert a canonical IV of a wider type to expand the
1498 // addrec.
1499 auto TestNarrowCanonicalIV = [&](
1500 std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
1501 std::unique_ptr<Module> M = parseAssemblyString(
1502 "define i32 @test(i32 %limit) { "
1503 "entry: "
1504 " br label %loop "
1505 "loop: "
1506 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1507 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
1508 " %i.inc = add nsw i32 %i, 1 "
1509 " %canonical.iv.inc = add i8 %canonical.iv, 1 "
1510 " %cont = icmp slt i32 %i.inc, %limit "
1511 " br i1 %cont, label %loop, label %exit "
1512 "exit: "
1513 " ret i32 %i.inc "
1514 "}",
1515 Err, C);
1516
1517 assert(M && "Could not parse module?");
1518 assert(!verifyModule(*M) && "Must have been well formed!");
1519
1520 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1521 auto &I = GetInstByName(F, "i");
1522
1523 auto *LoopHeaderBB = I.getParent();
1524 auto *Loop = LI.getLoopFor(LoopHeaderBB);
1525 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
1526 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
1527
1528 auto *AR = GetAddRec(SE, Loop);
1529
1530 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
1531 unsigned CanonicalIVBitWidth =
1532 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
1533 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1534
1535 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1536 auto *InsertAt = I.getNextNode();
1537 Exp.expandCodeFor(AR, nullptr, InsertAt);
1538
1539 // Loop over all of the PHI nodes, looking for the new canonical indvar.
1540 PHINode *NewCanonicalIV = nullptr;
1541 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
1542 ++i) {
1543 PHINode *PN = cast<PHINode>(i);
1544 if (PN == &I || PN == CanonicalIV)
1545 continue;
1546 // We expect that the only PHI added is the new canonical IV
1547 EXPECT_FALSE(NewCanonicalIV);
1548 NewCanonicalIV = PN;
1549 }
1550
1551 // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
1552 BasicBlock *Incoming = nullptr, *Backedge = nullptr;
1553 EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
1554 auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
1555 EXPECT_TRUE(isa<ConstantInt>(Start));
1556 EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
1557 auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
1558 EXPECT_TRUE(isa<BinaryOperator>(Next));
1559 auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
1560 EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
1561 EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
1562 auto *Step = NextBinOp->getOperand(1);
1563 EXPECT_TRUE(isa<ConstantInt>(Step));
1564 EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
1565
1566 unsigned NewCanonicalIVBitWidth =
1567 cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
1568 EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1569 });
1570 };
1571
1572 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
1573 // of addrec width.
1574 // To expand the addrec SCEVExpander should use the existing canonical IV.
1575 auto TestMatchingCanonicalIV = [&](
1576 std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
1577 unsigned ARBitWidth) {
1578 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
1579 std::unique_ptr<Module> M = parseAssemblyString(
1580 "define i32 @test(i32 %limit) { "
1581 "entry: "
1582 " br label %loop "
1583 "loop: "
1584 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1585 " %canonical.iv = phi " + ARBitWidthTypeStr +
1586 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
1587 " %i.inc = add nsw i32 %i, 1 "
1588 " %canonical.iv.inc = add " + ARBitWidthTypeStr +
1589 " %canonical.iv, 1 "
1590 " %cont = icmp slt i32 %i.inc, %limit "
1591 " br i1 %cont, label %loop, label %exit "
1592 "exit: "
1593 " ret i32 %i.inc "
1594 "}",
1595 Err, C);
1596
1597 assert(M && "Could not parse module?");
1598 assert(!verifyModule(*M) && "Must have been well formed!");
1599
1600 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1601 auto &I = GetInstByName(F, "i");
1602 auto &CanonicalIV = GetInstByName(F, "canonical.iv");
1603
1604 auto *LoopHeaderBB = I.getParent();
1605 auto *Loop = LI.getLoopFor(LoopHeaderBB);
1606 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
1607 unsigned CanonicalIVBitWidth =
1608 cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
1609
1610 auto *AR = GetAddRec(SE, Loop);
1611 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
1612 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
1613
1614 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1615 auto *InsertAt = I.getNextNode();
1616 Exp.expandCodeFor(AR, nullptr, InsertAt);
1617
1618 // Loop over all of the PHI nodes, looking if a new canonical indvar was
1619 // introduced.
1620 PHINode *NewCanonicalIV = nullptr;
1621 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
1622 ++i) {
1623 PHINode *PN = cast<PHINode>(i);
1624 if (PN == &I || PN == &CanonicalIV)
1625 continue;
1626 NewCanonicalIV = PN;
1627 }
1628 EXPECT_FALSE(NewCanonicalIV);
1629 });
1630 };
1631
1632 unsigned ARBitWidth = 16;
1633 Type *ARType = IntegerType::get(C, ARBitWidth);
1634
1635 // Expand {5,+,1}
1636 auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
1637 return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
1638 SE.getOne(ARType), L, SCEV::FlagAnyWrap);
1639 };
1640 TestNoCanonicalIV(GetAR2);
1641 TestNarrowCanonicalIV(GetAR2);
1642 TestMatchingCanonicalIV(GetAR2, ARBitWidth);
1643 }
1644
TEST_F(ScalarEvolutionsTest,SCEVExpanderShlNSW)1645 TEST_F(ScalarEvolutionsTest, SCEVExpanderShlNSW) {
1646
1647 auto checkOneCase = [this](std::string &&str) {
1648 LLVMContext C;
1649 SMDiagnostic Err;
1650 std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
1651
1652 assert(M && "Could not parse module?");
1653 assert(!verifyModule(*M) && "Must have been well formed!");
1654
1655 Function *F = M->getFunction("f");
1656 ASSERT_NE(F, nullptr) << "Could not find function 'f'";
1657
1658 BasicBlock &Entry = F->getEntryBlock();
1659 LoadInst *Load = cast<LoadInst>(&Entry.front());
1660 BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
1661
1662 ScalarEvolution SE = buildSE(*F);
1663 const SCEV *AndSCEV = SE.getSCEV(And);
1664 EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
1665 EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
1666
1667 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1668 auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
1669 EXPECT_EQ(I->getOpcode(), Instruction::Shl);
1670 EXPECT_FALSE(I->hasNoSignedWrap());
1671 };
1672
1673 checkOneCase("define void @f(i16* %arrayidx) { "
1674 " %1 = load i16, i16* %arrayidx "
1675 " %2 = and i16 %1, -32768 "
1676 " ret void "
1677 "} ");
1678
1679 checkOneCase("define void @f(i8* %arrayidx) { "
1680 " %1 = load i8, i8* %arrayidx "
1681 " %2 = and i8 %1, -128 "
1682 " ret void "
1683 "} ");
1684 }
1685
TEST_F(ScalarEvolutionsTest,SCEVLoopDecIntrinsic)1686 TEST_F(ScalarEvolutionsTest, SCEVLoopDecIntrinsic) {
1687 LLVMContext C;
1688 SMDiagnostic Err;
1689 std::unique_ptr<Module> M = parseAssemblyString(
1690 "define void @foo(i32 %N) { "
1691 "entry: "
1692 " %cmp3 = icmp sgt i32 %N, 0 "
1693 " br i1 %cmp3, label %for.body, label %for.cond.cleanup "
1694 "for.cond.cleanup: "
1695 " ret void "
1696 "for.body: "
1697 " %i.04 = phi i32 [ %inc, %for.body ], [ 100, %entry ] "
1698 " %inc = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %i.04, i32 1) "
1699 " %exitcond = icmp ne i32 %inc, 0 "
1700 " br i1 %exitcond, label %for.cond.cleanup, label %for.body "
1701 "} "
1702 "declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) ",
1703 Err, C);
1704
1705 ASSERT_TRUE(M && "Could not parse module?");
1706 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
1707
1708 runWithSE(*M, "foo", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1709 auto *ScevInc = SE.getSCEV(getInstructionByName(F, "inc"));
1710 EXPECT_TRUE(isa<SCEVAddRecExpr>(ScevInc));
1711 });
1712 }
1713
TEST_F(ScalarEvolutionsTest,SCEVComputeConstantDifference)1714 TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) {
1715 LLVMContext C;
1716 SMDiagnostic Err;
1717 std::unique_ptr<Module> M = parseAssemblyString(
1718 "define void @foo(i32 %sz, i32 %pp) { "
1719 "entry: "
1720 " %v0 = add i32 %pp, 0 "
1721 " %v3 = add i32 %pp, 3 "
1722 " br label %loop.body "
1723 "loop.body: "
1724 " %iv = phi i32 [ %iv.next, %loop.body ], [ 0, %entry ] "
1725 " %xa = add nsw i32 %iv, %v0 "
1726 " %yy = add nsw i32 %iv, %v3 "
1727 " %xb = sub nsw i32 %yy, 3 "
1728 " %iv.next = add nsw i32 %iv, 1 "
1729 " %cmp = icmp sle i32 %iv.next, %sz "
1730 " br i1 %cmp, label %loop.body, label %exit "
1731 "exit: "
1732 " ret void "
1733 "} ",
1734 Err, C);
1735
1736 ASSERT_TRUE(M && "Could not parse module?");
1737 ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
1738
1739 runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1740 auto *ScevV0 = SE.getSCEV(getInstructionByName(F, "v0")); // %pp
1741 auto *ScevV3 = SE.getSCEV(getInstructionByName(F, "v3")); // (3 + %pp)
1742 auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
1743 auto *ScevXA = SE.getSCEV(getInstructionByName(F, "xa")); // {%pp,+,1}
1744 auto *ScevYY = SE.getSCEV(getInstructionByName(F, "yy")); // {(3 + %pp),+,1}
1745 auto *ScevXB = SE.getSCEV(getInstructionByName(F, "xb")); // {%pp,+,1}
1746 auto *ScevIVNext = SE.getSCEV(getInstructionByName(F, "iv.next")); // {1,+,1}
1747
1748 auto diff = [&SE](const SCEV *LHS, const SCEV *RHS) -> Optional<int> {
1749 auto ConstantDiffOrNone = computeConstantDifference(SE, LHS, RHS);
1750 if (!ConstantDiffOrNone)
1751 return None;
1752
1753 auto ExtDiff = ConstantDiffOrNone->getSExtValue();
1754 int Diff = ExtDiff;
1755 assert(Diff == ExtDiff && "Integer overflow");
1756 return Diff;
1757 };
1758
1759 EXPECT_EQ(diff(ScevV3, ScevV0), 3);
1760 EXPECT_EQ(diff(ScevV0, ScevV3), -3);
1761 EXPECT_EQ(diff(ScevV0, ScevV0), 0);
1762 EXPECT_EQ(diff(ScevV3, ScevV3), 0);
1763 EXPECT_EQ(diff(ScevIV, ScevIV), 0);
1764 EXPECT_EQ(diff(ScevXA, ScevXB), 0);
1765 EXPECT_EQ(diff(ScevXA, ScevYY), -3);
1766 EXPECT_EQ(diff(ScevYY, ScevXB), 3);
1767 EXPECT_EQ(diff(ScevIV, ScevIVNext), -1);
1768 EXPECT_EQ(diff(ScevIVNext, ScevIV), 1);
1769 EXPECT_EQ(diff(ScevIVNext, ScevIVNext), 0);
1770 EXPECT_EQ(diff(ScevV0, ScevIV), None);
1771 EXPECT_EQ(diff(ScevIVNext, ScevV3), None);
1772 EXPECT_EQ(diff(ScevYY, ScevV3), None);
1773 });
1774 }
1775
1776 // Test expansion of nested addrecs in CanonicalMode.
1777 // Expanding nested addrecs in canonical mode requiers a canonical IV of a
1778 // type wider than the type of the addrec itself. Currently, SCEVExpander
1779 // just falls back to literal mode for nested addrecs.
TEST_F(ScalarEvolutionsTest,SCEVExpandNonAffineAddRec)1780 TEST_F(ScalarEvolutionsTest, SCEVExpandNonAffineAddRec) {
1781 LLVMContext C;
1782 SMDiagnostic Err;
1783
1784 // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
1785 auto TestNoCanonicalIV = [&](std::function<const SCEVAddRecExpr *(
1786 ScalarEvolution & SE, Loop * L)> GetAddRec) {
1787 std::unique_ptr<Module> M =
1788 parseAssemblyString("define i32 @test(i32 %limit) { "
1789 "entry: "
1790 " br label %loop "
1791 "loop: "
1792 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1793 " %i.inc = add nsw i32 %i, 1 "
1794 " %cont = icmp slt i32 %i.inc, %limit "
1795 " br i1 %cont, label %loop, label %exit "
1796 "exit: "
1797 " ret i32 %i.inc "
1798 "}",
1799 Err, C);
1800
1801 assert(M && "Could not parse module?");
1802 assert(!verifyModule(*M) && "Must have been well formed!");
1803
1804 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1805 auto &I = GetInstByName(F, "i");
1806 auto *Loop = LI.getLoopFor(I.getParent());
1807 EXPECT_FALSE(Loop->getCanonicalInductionVariable());
1808
1809 auto *AR = GetAddRec(SE, Loop);
1810 EXPECT_FALSE(AR->isAffine());
1811
1812 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1813 auto *InsertAt = I.getNextNode();
1814 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
1815 auto *ExpandedAR = SE.getSCEV(V);
1816 // Check that the expansion happened literally.
1817 EXPECT_EQ(AR, ExpandedAR);
1818 });
1819 };
1820
1821 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
1822 // which is narrower than addrec type.
1823 auto TestNarrowCanonicalIV = [&](
1824 std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
1825 GetAddRec) {
1826 std::unique_ptr<Module> M = parseAssemblyString(
1827 "define i32 @test(i32 %limit) { "
1828 "entry: "
1829 " br label %loop "
1830 "loop: "
1831 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1832 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
1833 " %i.inc = add nsw i32 %i, 1 "
1834 " %canonical.iv.inc = add i8 %canonical.iv, 1 "
1835 " %cont = icmp slt i32 %i.inc, %limit "
1836 " br i1 %cont, label %loop, label %exit "
1837 "exit: "
1838 " ret i32 %i.inc "
1839 "}",
1840 Err, C);
1841
1842 assert(M && "Could not parse module?");
1843 assert(!verifyModule(*M) && "Must have been well formed!");
1844
1845 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1846 auto &I = GetInstByName(F, "i");
1847
1848 auto *LoopHeaderBB = I.getParent();
1849 auto *Loop = LI.getLoopFor(LoopHeaderBB);
1850 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
1851 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
1852
1853 auto *AR = GetAddRec(SE, Loop);
1854 EXPECT_FALSE(AR->isAffine());
1855
1856 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
1857 unsigned CanonicalIVBitWidth =
1858 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
1859 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
1860
1861 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1862 auto *InsertAt = I.getNextNode();
1863 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
1864 auto *ExpandedAR = SE.getSCEV(V);
1865 // Check that the expansion happened literally.
1866 EXPECT_EQ(AR, ExpandedAR);
1867 });
1868 };
1869
1870 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
1871 // of addrec width.
1872 auto TestMatchingCanonicalIV = [&](
1873 std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
1874 GetAddRec,
1875 unsigned ARBitWidth) {
1876 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
1877 std::unique_ptr<Module> M = parseAssemblyString(
1878 "define i32 @test(i32 %limit) { "
1879 "entry: "
1880 " br label %loop "
1881 "loop: "
1882 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
1883 " %canonical.iv = phi " + ARBitWidthTypeStr +
1884 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
1885 " %i.inc = add nsw i32 %i, 1 "
1886 " %canonical.iv.inc = add " + ARBitWidthTypeStr +
1887 " %canonical.iv, 1 "
1888 " %cont = icmp slt i32 %i.inc, %limit "
1889 " br i1 %cont, label %loop, label %exit "
1890 "exit: "
1891 " ret i32 %i.inc "
1892 "}",
1893 Err, C);
1894
1895 assert(M && "Could not parse module?");
1896 assert(!verifyModule(*M) && "Must have been well formed!");
1897
1898 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
1899 auto &I = GetInstByName(F, "i");
1900 auto &CanonicalIV = GetInstByName(F, "canonical.iv");
1901
1902 auto *LoopHeaderBB = I.getParent();
1903 auto *Loop = LI.getLoopFor(LoopHeaderBB);
1904 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
1905 unsigned CanonicalIVBitWidth =
1906 cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
1907
1908 auto *AR = GetAddRec(SE, Loop);
1909 EXPECT_FALSE(AR->isAffine());
1910 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
1911 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
1912
1913 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
1914 auto *InsertAt = I.getNextNode();
1915 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
1916 auto *ExpandedAR = SE.getSCEV(V);
1917 // Check that the expansion happened literally.
1918 EXPECT_EQ(AR, ExpandedAR);
1919 });
1920 };
1921
1922 unsigned ARBitWidth = 16;
1923 Type *ARType = IntegerType::get(C, ARBitWidth);
1924
1925 // Expand {5,+,1,+,1}
1926 auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
1927 SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
1928 SE.getOne(ARType), SE.getOne(ARType)};
1929 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
1930 };
1931 TestNoCanonicalIV(GetAR3);
1932 TestNarrowCanonicalIV(GetAR3);
1933 TestMatchingCanonicalIV(GetAR3, ARBitWidth);
1934
1935 // Expand {5,+,1,+,1,+,1}
1936 auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
1937 SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
1938 SE.getOne(ARType), SE.getOne(ARType),
1939 SE.getOne(ARType)};
1940 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
1941 };
1942 TestNoCanonicalIV(GetAR4);
1943 TestNarrowCanonicalIV(GetAR4);
1944 TestMatchingCanonicalIV(GetAR4, ARBitWidth);
1945
1946 // Expand {5,+,1,+,1,+,1,+,1}
1947 auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
1948 SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
1949 SE.getOne(ARType), SE.getOne(ARType),
1950 SE.getOne(ARType), SE.getOne(ARType)};
1951 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
1952 };
1953 TestNoCanonicalIV(GetAR5);
1954 TestNarrowCanonicalIV(GetAR5);
1955 TestMatchingCanonicalIV(GetAR5, ARBitWidth);
1956 }
1957
1958 } // end namespace llvm
1959