1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
12 //
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
18 //
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
24 //
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
29 //
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
33 //
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // There are several good references for the techniques used in this analysis.
40 //
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44 //
45 // On computational properties of chains of recurrences
46 // Eugene V. Zima
47 //
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
50 //
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
53 //
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
57 //
58 //===----------------------------------------------------------------------===//
59
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionDivision.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/Config/llvm-config.h"
87 #include "llvm/IR/Argument.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/InitializePasses.h"
116 #include "llvm/Pass.h"
117 #include "llvm/Support/Casting.h"
118 #include "llvm/Support/CommandLine.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/KnownBits.h"
123 #include "llvm/Support/SaveAndRestore.h"
124 #include "llvm/Support/raw_ostream.h"
125 #include <algorithm>
126 #include <cassert>
127 #include <climits>
128 #include <cstddef>
129 #include <cstdint>
130 #include <cstdlib>
131 #include <map>
132 #include <memory>
133 #include <tuple>
134 #include <utility>
135 #include <vector>
136
137 using namespace llvm;
138 using namespace PatternMatch;
139
140 #define DEBUG_TYPE "scalar-evolution"
141
142 STATISTIC(NumArrayLenItCounts,
143 "Number of trip counts computed with array length");
144 STATISTIC(NumTripCountsComputed,
145 "Number of loops with predictable loop counts");
146 STATISTIC(NumTripCountsNotComputed,
147 "Number of loops without predictable loop counts");
148 STATISTIC(NumBruteForceTripCountsComputed,
149 "Number of loops with trip counts computed by force");
150
151 static cl::opt<unsigned>
152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
153 cl::ZeroOrMore,
154 cl::desc("Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
156 "derived loop"),
157 cl::init(100));
158
159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
160 static cl::opt<bool> VerifySCEV(
161 "verify-scev", cl::Hidden,
162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
163 static cl::opt<bool> VerifySCEVStrict(
164 "verify-scev-strict", cl::Hidden,
165 cl::desc("Enable stricter verification with -verify-scev is passed"));
166 static cl::opt<bool>
167 VerifySCEVMap("verify-scev-maps", cl::Hidden,
168 cl::desc("Verify no dangling value in ScalarEvolution's "
169 "ExprValueMap (slow)"));
170
171 static cl::opt<bool> VerifyIR(
172 "scev-verify-ir", cl::Hidden,
173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::init(false));
175
176 static cl::opt<unsigned> MulOpsInlineThreshold(
177 "scev-mulops-inline-threshold", cl::Hidden,
178 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
179 cl::init(32));
180
181 static cl::opt<unsigned> AddOpsInlineThreshold(
182 "scev-addops-inline-threshold", cl::Hidden,
183 cl::desc("Threshold for inlining addition operands into a SCEV"),
184 cl::init(500));
185
186 static cl::opt<unsigned> MaxSCEVCompareDepth(
187 "scalar-evolution-max-scev-compare-depth", cl::Hidden,
188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
189 cl::init(32));
190
191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
194 cl::init(2));
195
196 static cl::opt<unsigned> MaxValueCompareDepth(
197 "scalar-evolution-max-value-compare-depth", cl::Hidden,
198 cl::desc("Maximum depth of recursive value complexity comparisons"),
199 cl::init(2));
200
201 static cl::opt<unsigned>
202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
203 cl::desc("Maximum depth of recursive arithmetics"),
204 cl::init(32));
205
206 static cl::opt<unsigned> MaxConstantEvolvingDepth(
207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
209
210 static cl::opt<unsigned>
211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::init(8));
214
215 static cl::opt<unsigned>
216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
217 cl::desc("Max coefficients in AddRec during evolving"),
218 cl::init(8));
219
220 static cl::opt<unsigned>
221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
222 cl::desc("Size of the expression which is considered huge"),
223 cl::init(4096));
224
225 static cl::opt<bool>
226 ClassifyExpressions("scalar-evolution-classify-expressions",
227 cl::Hidden, cl::init(true),
228 cl::desc("When printing analysis, include information on every instruction"));
229
230 static cl::opt<bool> UseExpensiveRangeSharpening(
231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
232 cl::init(false),
233 cl::desc("Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
235
236 //===----------------------------------------------------------------------===//
237 // SCEV class definitions
238 //===----------------------------------------------------------------------===//
239
240 //===----------------------------------------------------------------------===//
241 // Implementation of the SCEV class.
242 //
243
244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const245 LLVM_DUMP_METHOD void SCEV::dump() const {
246 print(dbgs());
247 dbgs() << '\n';
248 }
249 #endif
250
print(raw_ostream & OS) const251 void SCEV::print(raw_ostream &OS) const {
252 switch (getSCEVType()) {
253 case scConstant:
254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
255 return;
256 case scPtrToInt: {
257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
258 const SCEV *Op = PtrToInt->getOperand();
259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
260 << *PtrToInt->getType() << ")";
261 return;
262 }
263 case scTruncate: {
264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
265 const SCEV *Op = Trunc->getOperand();
266 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
267 << *Trunc->getType() << ")";
268 return;
269 }
270 case scZeroExtend: {
271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
272 const SCEV *Op = ZExt->getOperand();
273 OS << "(zext " << *Op->getType() << " " << *Op << " to "
274 << *ZExt->getType() << ")";
275 return;
276 }
277 case scSignExtend: {
278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
279 const SCEV *Op = SExt->getOperand();
280 OS << "(sext " << *Op->getType() << " " << *Op << " to "
281 << *SExt->getType() << ")";
282 return;
283 }
284 case scAddRecExpr: {
285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
286 OS << "{" << *AR->getOperand(0);
287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
288 OS << ",+," << *AR->getOperand(i);
289 OS << "}<";
290 if (AR->hasNoUnsignedWrap())
291 OS << "nuw><";
292 if (AR->hasNoSignedWrap())
293 OS << "nsw><";
294 if (AR->hasNoSelfWrap() &&
295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
296 OS << "nw><";
297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
298 OS << ">";
299 return;
300 }
301 case scAddExpr:
302 case scMulExpr:
303 case scUMaxExpr:
304 case scSMaxExpr:
305 case scUMinExpr:
306 case scSMinExpr: {
307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
308 const char *OpStr = nullptr;
309 switch (NAry->getSCEVType()) {
310 case scAddExpr: OpStr = " + "; break;
311 case scMulExpr: OpStr = " * "; break;
312 case scUMaxExpr: OpStr = " umax "; break;
313 case scSMaxExpr: OpStr = " smax "; break;
314 case scUMinExpr:
315 OpStr = " umin ";
316 break;
317 case scSMinExpr:
318 OpStr = " smin ";
319 break;
320 default:
321 llvm_unreachable("There are no other nary expression types.");
322 }
323 OS << "(";
324 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
325 I != E; ++I) {
326 OS << **I;
327 if (std::next(I) != E)
328 OS << OpStr;
329 }
330 OS << ")";
331 switch (NAry->getSCEVType()) {
332 case scAddExpr:
333 case scMulExpr:
334 if (NAry->hasNoUnsignedWrap())
335 OS << "<nuw>";
336 if (NAry->hasNoSignedWrap())
337 OS << "<nsw>";
338 break;
339 default:
340 // Nothing to print for other nary expressions.
341 break;
342 }
343 return;
344 }
345 case scUDivExpr: {
346 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
347 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
348 return;
349 }
350 case scUnknown: {
351 const SCEVUnknown *U = cast<SCEVUnknown>(this);
352 Type *AllocTy;
353 if (U->isSizeOf(AllocTy)) {
354 OS << "sizeof(" << *AllocTy << ")";
355 return;
356 }
357 if (U->isAlignOf(AllocTy)) {
358 OS << "alignof(" << *AllocTy << ")";
359 return;
360 }
361
362 Type *CTy;
363 Constant *FieldNo;
364 if (U->isOffsetOf(CTy, FieldNo)) {
365 OS << "offsetof(" << *CTy << ", ";
366 FieldNo->printAsOperand(OS, false);
367 OS << ")";
368 return;
369 }
370
371 // Otherwise just print it normally.
372 U->getValue()->printAsOperand(OS, false);
373 return;
374 }
375 case scCouldNotCompute:
376 OS << "***COULDNOTCOMPUTE***";
377 return;
378 }
379 llvm_unreachable("Unknown SCEV kind!");
380 }
381
getType() const382 Type *SCEV::getType() const {
383 switch (getSCEVType()) {
384 case scConstant:
385 return cast<SCEVConstant>(this)->getType();
386 case scPtrToInt:
387 case scTruncate:
388 case scZeroExtend:
389 case scSignExtend:
390 return cast<SCEVCastExpr>(this)->getType();
391 case scAddRecExpr:
392 case scMulExpr:
393 case scUMaxExpr:
394 case scSMaxExpr:
395 case scUMinExpr:
396 case scSMinExpr:
397 return cast<SCEVNAryExpr>(this)->getType();
398 case scAddExpr:
399 return cast<SCEVAddExpr>(this)->getType();
400 case scUDivExpr:
401 return cast<SCEVUDivExpr>(this)->getType();
402 case scUnknown:
403 return cast<SCEVUnknown>(this)->getType();
404 case scCouldNotCompute:
405 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
406 }
407 llvm_unreachable("Unknown SCEV kind!");
408 }
409
isZero() const410 bool SCEV::isZero() const {
411 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
412 return SC->getValue()->isZero();
413 return false;
414 }
415
isOne() const416 bool SCEV::isOne() const {
417 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
418 return SC->getValue()->isOne();
419 return false;
420 }
421
isAllOnesValue() const422 bool SCEV::isAllOnesValue() const {
423 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
424 return SC->getValue()->isMinusOne();
425 return false;
426 }
427
isNonConstantNegative() const428 bool SCEV::isNonConstantNegative() const {
429 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
430 if (!Mul) return false;
431
432 // If there is a constant factor, it will be first.
433 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
434 if (!SC) return false;
435
436 // Return true if the value is negative, this matches things like (-42 * V).
437 return SC->getAPInt().isNegative();
438 }
439
SCEVCouldNotCompute()440 SCEVCouldNotCompute::SCEVCouldNotCompute() :
441 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}
442
classof(const SCEV * S)443 bool SCEVCouldNotCompute::classof(const SCEV *S) {
444 return S->getSCEVType() == scCouldNotCompute;
445 }
446
getConstant(ConstantInt * V)447 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
448 FoldingSetNodeID ID;
449 ID.AddInteger(scConstant);
450 ID.AddPointer(V);
451 void *IP = nullptr;
452 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
453 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
454 UniqueSCEVs.InsertNode(S, IP);
455 return S;
456 }
457
getConstant(const APInt & Val)458 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
459 return getConstant(ConstantInt::get(getContext(), Val));
460 }
461
462 const SCEV *
getConstant(Type * Ty,uint64_t V,bool isSigned)463 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
464 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
465 return getConstant(ConstantInt::get(ITy, V, isSigned));
466 }
467
SCEVCastExpr(const FoldingSetNodeIDRef ID,SCEVTypes SCEVTy,const SCEV * op,Type * ty)468 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
469 const SCEV *op, Type *ty)
470 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) {
471 Operands[0] = op;
472 }
473
SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID,const SCEV * Op,Type * ITy)474 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
475 Type *ITy)
476 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
477 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
478 "Must be a non-bit-width-changing pointer-to-integer cast!");
479 }
480
SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,SCEVTypes SCEVTy,const SCEV * op,Type * ty)481 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
482 SCEVTypes SCEVTy, const SCEV *op,
483 Type *ty)
484 : SCEVCastExpr(ID, SCEVTy, op, ty) {}
485
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)486 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
487 Type *ty)
488 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
489 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
490 "Cannot truncate non-integer value!");
491 }
492
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)493 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
494 const SCEV *op, Type *ty)
495 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
496 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
497 "Cannot zero extend non-integer value!");
498 }
499
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)500 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
501 const SCEV *op, Type *ty)
502 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
503 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
504 "Cannot sign extend non-integer value!");
505 }
506
deleted()507 void SCEVUnknown::deleted() {
508 // Clear this SCEVUnknown from various maps.
509 SE->forgetMemoizedResults(this);
510
511 // Remove this SCEVUnknown from the uniquing map.
512 SE->UniqueSCEVs.RemoveNode(this);
513
514 // Release the value.
515 setValPtr(nullptr);
516 }
517
allUsesReplacedWith(Value * New)518 void SCEVUnknown::allUsesReplacedWith(Value *New) {
519 // Remove this SCEVUnknown from the uniquing map.
520 SE->UniqueSCEVs.RemoveNode(this);
521
522 // Update this SCEVUnknown to point to the new value. This is needed
523 // because there may still be outstanding SCEVs which still point to
524 // this SCEVUnknown.
525 setValPtr(New);
526 }
527
isSizeOf(Type * & AllocTy) const528 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
529 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
530 if (VCE->getOpcode() == Instruction::PtrToInt)
531 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
532 if (CE->getOpcode() == Instruction::GetElementPtr &&
533 CE->getOperand(0)->isNullValue() &&
534 CE->getNumOperands() == 2)
535 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
536 if (CI->isOne()) {
537 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
538 ->getElementType();
539 return true;
540 }
541
542 return false;
543 }
544
isAlignOf(Type * & AllocTy) const545 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
546 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
547 if (VCE->getOpcode() == Instruction::PtrToInt)
548 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
549 if (CE->getOpcode() == Instruction::GetElementPtr &&
550 CE->getOperand(0)->isNullValue()) {
551 Type *Ty =
552 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
553 if (StructType *STy = dyn_cast<StructType>(Ty))
554 if (!STy->isPacked() &&
555 CE->getNumOperands() == 3 &&
556 CE->getOperand(1)->isNullValue()) {
557 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
558 if (CI->isOne() &&
559 STy->getNumElements() == 2 &&
560 STy->getElementType(0)->isIntegerTy(1)) {
561 AllocTy = STy->getElementType(1);
562 return true;
563 }
564 }
565 }
566
567 return false;
568 }
569
isOffsetOf(Type * & CTy,Constant * & FieldNo) const570 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
571 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
572 if (VCE->getOpcode() == Instruction::PtrToInt)
573 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
574 if (CE->getOpcode() == Instruction::GetElementPtr &&
575 CE->getNumOperands() == 3 &&
576 CE->getOperand(0)->isNullValue() &&
577 CE->getOperand(1)->isNullValue()) {
578 Type *Ty =
579 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
580 // Ignore vector types here so that ScalarEvolutionExpander doesn't
581 // emit getelementptrs that index into vectors.
582 if (Ty->isStructTy() || Ty->isArrayTy()) {
583 CTy = Ty;
584 FieldNo = CE->getOperand(2);
585 return true;
586 }
587 }
588
589 return false;
590 }
591
592 //===----------------------------------------------------------------------===//
593 // SCEV Utilities
594 //===----------------------------------------------------------------------===//
595
596 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
597 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
598 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
599 /// have been previously deemed to be "equally complex" by this routine. It is
600 /// intended to avoid exponential time complexity in cases like:
601 ///
602 /// %a = f(%x, %y)
603 /// %b = f(%a, %a)
604 /// %c = f(%b, %b)
605 ///
606 /// %d = f(%x, %y)
607 /// %e = f(%d, %d)
608 /// %f = f(%e, %e)
609 ///
610 /// CompareValueComplexity(%f, %c)
611 ///
612 /// Since we do not continue running this routine on expression trees once we
613 /// have seen unequal values, there is no need to track them in the cache.
614 static int
CompareValueComplexity(EquivalenceClasses<const Value * > & EqCacheValue,const LoopInfo * const LI,Value * LV,Value * RV,unsigned Depth)615 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
616 const LoopInfo *const LI, Value *LV, Value *RV,
617 unsigned Depth) {
618 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
619 return 0;
620
621 // Order pointer values after integer values. This helps SCEVExpander form
622 // GEPs.
623 bool LIsPointer = LV->getType()->isPointerTy(),
624 RIsPointer = RV->getType()->isPointerTy();
625 if (LIsPointer != RIsPointer)
626 return (int)LIsPointer - (int)RIsPointer;
627
628 // Compare getValueID values.
629 unsigned LID = LV->getValueID(), RID = RV->getValueID();
630 if (LID != RID)
631 return (int)LID - (int)RID;
632
633 // Sort arguments by their position.
634 if (const auto *LA = dyn_cast<Argument>(LV)) {
635 const auto *RA = cast<Argument>(RV);
636 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
637 return (int)LArgNo - (int)RArgNo;
638 }
639
640 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
641 const auto *RGV = cast<GlobalValue>(RV);
642
643 const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
644 auto LT = GV->getLinkage();
645 return !(GlobalValue::isPrivateLinkage(LT) ||
646 GlobalValue::isInternalLinkage(LT));
647 };
648
649 // Use the names to distinguish the two values, but only if the
650 // names are semantically important.
651 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
652 return LGV->getName().compare(RGV->getName());
653 }
654
655 // For instructions, compare their loop depth, and their operand count. This
656 // is pretty loose.
657 if (const auto *LInst = dyn_cast<Instruction>(LV)) {
658 const auto *RInst = cast<Instruction>(RV);
659
660 // Compare loop depths.
661 const BasicBlock *LParent = LInst->getParent(),
662 *RParent = RInst->getParent();
663 if (LParent != RParent) {
664 unsigned LDepth = LI->getLoopDepth(LParent),
665 RDepth = LI->getLoopDepth(RParent);
666 if (LDepth != RDepth)
667 return (int)LDepth - (int)RDepth;
668 }
669
670 // Compare the number of operands.
671 unsigned LNumOps = LInst->getNumOperands(),
672 RNumOps = RInst->getNumOperands();
673 if (LNumOps != RNumOps)
674 return (int)LNumOps - (int)RNumOps;
675
676 for (unsigned Idx : seq(0u, LNumOps)) {
677 int Result =
678 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
679 RInst->getOperand(Idx), Depth + 1);
680 if (Result != 0)
681 return Result;
682 }
683 }
684
685 EqCacheValue.unionSets(LV, RV);
686 return 0;
687 }
688
689 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
690 // than RHS, respectively. A three-way result allows recursive comparisons to be
691 // more efficient.
CompareSCEVComplexity(EquivalenceClasses<const SCEV * > & EqCacheSCEV,EquivalenceClasses<const Value * > & EqCacheValue,const LoopInfo * const LI,const SCEV * LHS,const SCEV * RHS,DominatorTree & DT,unsigned Depth=0)692 static int CompareSCEVComplexity(
693 EquivalenceClasses<const SCEV *> &EqCacheSCEV,
694 EquivalenceClasses<const Value *> &EqCacheValue,
695 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS,
696 DominatorTree &DT, unsigned Depth = 0) {
697 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
698 if (LHS == RHS)
699 return 0;
700
701 // Primarily, sort the SCEVs by their getSCEVType().
702 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
703 if (LType != RType)
704 return (int)LType - (int)RType;
705
706 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS))
707 return 0;
708 // Aside from the getSCEVType() ordering, the particular ordering
709 // isn't very important except that it's beneficial to be consistent,
710 // so that (a + b) and (b + a) don't end up as different expressions.
711 switch (LType) {
712 case scUnknown: {
713 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
714 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
715
716 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
717 RU->getValue(), Depth + 1);
718 if (X == 0)
719 EqCacheSCEV.unionSets(LHS, RHS);
720 return X;
721 }
722
723 case scConstant: {
724 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
725 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
726
727 // Compare constant values.
728 const APInt &LA = LC->getAPInt();
729 const APInt &RA = RC->getAPInt();
730 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
731 if (LBitWidth != RBitWidth)
732 return (int)LBitWidth - (int)RBitWidth;
733 return LA.ult(RA) ? -1 : 1;
734 }
735
736 case scAddRecExpr: {
737 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
738 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
739
740 // There is always a dominance between two recs that are used by one SCEV,
741 // so we can safely sort recs by loop header dominance. We require such
742 // order in getAddExpr.
743 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
744 if (LLoop != RLoop) {
745 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
746 assert(LHead != RHead && "Two loops share the same header?");
747 if (DT.dominates(LHead, RHead))
748 return 1;
749 else
750 assert(DT.dominates(RHead, LHead) &&
751 "No dominance between recurrences used by one SCEV?");
752 return -1;
753 }
754
755 // Addrec complexity grows with operand count.
756 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
757 if (LNumOps != RNumOps)
758 return (int)LNumOps - (int)RNumOps;
759
760 // Lexicographically compare.
761 for (unsigned i = 0; i != LNumOps; ++i) {
762 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
763 LA->getOperand(i), RA->getOperand(i), DT,
764 Depth + 1);
765 if (X != 0)
766 return X;
767 }
768 EqCacheSCEV.unionSets(LHS, RHS);
769 return 0;
770 }
771
772 case scAddExpr:
773 case scMulExpr:
774 case scSMaxExpr:
775 case scUMaxExpr:
776 case scSMinExpr:
777 case scUMinExpr: {
778 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
779 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
780
781 // Lexicographically compare n-ary expressions.
782 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
783 if (LNumOps != RNumOps)
784 return (int)LNumOps - (int)RNumOps;
785
786 for (unsigned i = 0; i != LNumOps; ++i) {
787 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
788 LC->getOperand(i), RC->getOperand(i), DT,
789 Depth + 1);
790 if (X != 0)
791 return X;
792 }
793 EqCacheSCEV.unionSets(LHS, RHS);
794 return 0;
795 }
796
797 case scUDivExpr: {
798 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
799 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
800
801 // Lexicographically compare udiv expressions.
802 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
803 RC->getLHS(), DT, Depth + 1);
804 if (X != 0)
805 return X;
806 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
807 RC->getRHS(), DT, Depth + 1);
808 if (X == 0)
809 EqCacheSCEV.unionSets(LHS, RHS);
810 return X;
811 }
812
813 case scPtrToInt:
814 case scTruncate:
815 case scZeroExtend:
816 case scSignExtend: {
817 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
818 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
819
820 // Compare cast expressions by operand.
821 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
822 LC->getOperand(), RC->getOperand(), DT,
823 Depth + 1);
824 if (X == 0)
825 EqCacheSCEV.unionSets(LHS, RHS);
826 return X;
827 }
828
829 case scCouldNotCompute:
830 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
831 }
832 llvm_unreachable("Unknown SCEV kind!");
833 }
834
835 /// Given a list of SCEV objects, order them by their complexity, and group
836 /// objects of the same complexity together by value. When this routine is
837 /// finished, we know that any duplicates in the vector are consecutive and that
838 /// complexity is monotonically increasing.
839 ///
840 /// Note that we go take special precautions to ensure that we get deterministic
841 /// results from this routine. In other words, we don't want the results of
842 /// this to depend on where the addresses of various SCEV objects happened to
843 /// land in memory.
GroupByComplexity(SmallVectorImpl<const SCEV * > & Ops,LoopInfo * LI,DominatorTree & DT)844 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
845 LoopInfo *LI, DominatorTree &DT) {
846 if (Ops.size() < 2) return; // Noop
847
848 EquivalenceClasses<const SCEV *> EqCacheSCEV;
849 EquivalenceClasses<const Value *> EqCacheValue;
850 if (Ops.size() == 2) {
851 // This is the common case, which also happens to be trivially simple.
852 // Special case it.
853 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
854 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0)
855 std::swap(LHS, RHS);
856 return;
857 }
858
859 // Do the rough sort by complexity.
860 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
861 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) <
862 0;
863 });
864
865 // Now that we are sorted by complexity, group elements of the same
866 // complexity. Note that this is, at worst, N^2, but the vector is likely to
867 // be extremely short in practice. Note that we take this approach because we
868 // do not want to depend on the addresses of the objects we are grouping.
869 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
870 const SCEV *S = Ops[i];
871 unsigned Complexity = S->getSCEVType();
872
873 // If there are any objects of the same complexity and same value as this
874 // one, group them.
875 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
876 if (Ops[j] == S) { // Found a duplicate.
877 // Move it to immediately after i'th element.
878 std::swap(Ops[i+1], Ops[j]);
879 ++i; // no need to rescan it.
880 if (i == e-2) return; // Done!
881 }
882 }
883 }
884 }
885
886 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
887 /// least HugeExprThreshold nodes).
hasHugeExpression(ArrayRef<const SCEV * > Ops)888 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
889 return any_of(Ops, [](const SCEV *S) {
890 return S->getExpressionSize() >= HugeExprThreshold;
891 });
892 }
893
894 //===----------------------------------------------------------------------===//
895 // Simple SCEV method implementations
896 //===----------------------------------------------------------------------===//
897
898 /// Compute BC(It, K). The result has width W. Assume, K > 0.
BinomialCoefficient(const SCEV * It,unsigned K,ScalarEvolution & SE,Type * ResultTy)899 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
900 ScalarEvolution &SE,
901 Type *ResultTy) {
902 // Handle the simplest case efficiently.
903 if (K == 1)
904 return SE.getTruncateOrZeroExtend(It, ResultTy);
905
906 // We are using the following formula for BC(It, K):
907 //
908 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
909 //
910 // Suppose, W is the bitwidth of the return value. We must be prepared for
911 // overflow. Hence, we must assure that the result of our computation is
912 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
913 // safe in modular arithmetic.
914 //
915 // However, this code doesn't use exactly that formula; the formula it uses
916 // is something like the following, where T is the number of factors of 2 in
917 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
918 // exponentiation:
919 //
920 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
921 //
922 // This formula is trivially equivalent to the previous formula. However,
923 // this formula can be implemented much more efficiently. The trick is that
924 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
925 // arithmetic. To do exact division in modular arithmetic, all we have
926 // to do is multiply by the inverse. Therefore, this step can be done at
927 // width W.
928 //
929 // The next issue is how to safely do the division by 2^T. The way this
930 // is done is by doing the multiplication step at a width of at least W + T
931 // bits. This way, the bottom W+T bits of the product are accurate. Then,
932 // when we perform the division by 2^T (which is equivalent to a right shift
933 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
934 // truncated out after the division by 2^T.
935 //
936 // In comparison to just directly using the first formula, this technique
937 // is much more efficient; using the first formula requires W * K bits,
938 // but this formula less than W + K bits. Also, the first formula requires
939 // a division step, whereas this formula only requires multiplies and shifts.
940 //
941 // It doesn't matter whether the subtraction step is done in the calculation
942 // width or the input iteration count's width; if the subtraction overflows,
943 // the result must be zero anyway. We prefer here to do it in the width of
944 // the induction variable because it helps a lot for certain cases; CodeGen
945 // isn't smart enough to ignore the overflow, which leads to much less
946 // efficient code if the width of the subtraction is wider than the native
947 // register width.
948 //
949 // (It's possible to not widen at all by pulling out factors of 2 before
950 // the multiplication; for example, K=2 can be calculated as
951 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
952 // extra arithmetic, so it's not an obvious win, and it gets
953 // much more complicated for K > 3.)
954
955 // Protection from insane SCEVs; this bound is conservative,
956 // but it probably doesn't matter.
957 if (K > 1000)
958 return SE.getCouldNotCompute();
959
960 unsigned W = SE.getTypeSizeInBits(ResultTy);
961
962 // Calculate K! / 2^T and T; we divide out the factors of two before
963 // multiplying for calculating K! / 2^T to avoid overflow.
964 // Other overflow doesn't matter because we only care about the bottom
965 // W bits of the result.
966 APInt OddFactorial(W, 1);
967 unsigned T = 1;
968 for (unsigned i = 3; i <= K; ++i) {
969 APInt Mult(W, i);
970 unsigned TwoFactors = Mult.countTrailingZeros();
971 T += TwoFactors;
972 Mult.lshrInPlace(TwoFactors);
973 OddFactorial *= Mult;
974 }
975
976 // We need at least W + T bits for the multiplication step
977 unsigned CalculationBits = W + T;
978
979 // Calculate 2^T, at width T+W.
980 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
981
982 // Calculate the multiplicative inverse of K! / 2^T;
983 // this multiplication factor will perform the exact division by
984 // K! / 2^T.
985 APInt Mod = APInt::getSignedMinValue(W+1);
986 APInt MultiplyFactor = OddFactorial.zext(W+1);
987 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
988 MultiplyFactor = MultiplyFactor.trunc(W);
989
990 // Calculate the product, at width T+W
991 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
992 CalculationBits);
993 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
994 for (unsigned i = 1; i != K; ++i) {
995 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
996 Dividend = SE.getMulExpr(Dividend,
997 SE.getTruncateOrZeroExtend(S, CalculationTy));
998 }
999
1000 // Divide by 2^T
1001 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1002
1003 // Truncate the result, and divide by K! / 2^T.
1004
1005 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1006 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1007 }
1008
1009 /// Return the value of this chain of recurrences at the specified iteration
1010 /// number. We can evaluate this recurrence by multiplying each element in the
1011 /// chain by the binomial coefficient corresponding to it. In other words, we
1012 /// can evaluate {A,+,B,+,C,+,D} as:
1013 ///
1014 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1015 ///
1016 /// where BC(It, k) stands for binomial coefficient.
evaluateAtIteration(const SCEV * It,ScalarEvolution & SE) const1017 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1018 ScalarEvolution &SE) const {
1019 const SCEV *Result = getStart();
1020 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1021 // The computation is correct in the face of overflow provided that the
1022 // multiplication is performed _after_ the evaluation of the binomial
1023 // coefficient.
1024 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
1025 if (isa<SCEVCouldNotCompute>(Coeff))
1026 return Coeff;
1027
1028 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
1029 }
1030 return Result;
1031 }
1032
1033 //===----------------------------------------------------------------------===//
1034 // SCEV Expression folder implementations
1035 //===----------------------------------------------------------------------===//
1036
getPtrToIntExpr(const SCEV * Op,Type * Ty,unsigned Depth)1037 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty,
1038 unsigned Depth) {
1039 assert(Ty->isIntegerTy() && "Target type must be an integer type!");
1040 assert(Depth <= 1 && "getPtrToIntExpr() should self-recurse at most once.");
1041
1042 // We could be called with an integer-typed operands during SCEV rewrites.
1043 // Since the operand is an integer already, just perform zext/trunc/self cast.
1044 if (!Op->getType()->isPointerTy())
1045 return getTruncateOrZeroExtend(Op, Ty);
1046
1047 // What would be an ID for such a SCEV cast expression?
1048 FoldingSetNodeID ID;
1049 ID.AddInteger(scPtrToInt);
1050 ID.AddPointer(Op);
1051
1052 void *IP = nullptr;
1053
1054 // Is there already an expression for such a cast?
1055 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1056 return getTruncateOrZeroExtend(S, Ty);
1057
1058 // If not, is this expression something we can't reduce any further?
1059 if (isa<SCEVUnknown>(Op)) {
1060 // Create an explicit cast node.
1061 // We can reuse the existing insert position since if we get here,
1062 // we won't have made any changes which would invalidate it.
1063 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1064 assert(getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(
1065 Op->getType())) == getDataLayout().getTypeSizeInBits(IntPtrTy) &&
1066 "We can only model ptrtoint if SCEV's effective (integer) type is "
1067 "sufficiently wide to represent all possible pointer values.");
1068 SCEV *S = new (SCEVAllocator)
1069 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
1070 UniqueSCEVs.InsertNode(S, IP);
1071 addToLoopUseLists(S);
1072 return getTruncateOrZeroExtend(S, Ty);
1073 }
1074
1075 assert(Depth == 0 &&
1076 "getPtrToIntExpr() should not self-recurse for non-SCEVUnknown's.");
1077
1078 // Otherwise, we've got some expression that is more complex than just a
1079 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1080 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1081 // only, and the expressions must otherwise be integer-typed.
1082 // So sink the cast down to the SCEVUnknown's.
1083
1084 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1085 /// which computes a pointer-typed value, and rewrites the whole expression
1086 /// tree so that *all* the computations are done on integers, and the only
1087 /// pointer-typed operands in the expression are SCEVUnknown.
1088 class SCEVPtrToIntSinkingRewriter
1089 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> {
1090 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>;
1091
1092 public:
1093 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
1094
1095 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
1096 SCEVPtrToIntSinkingRewriter Rewriter(SE);
1097 return Rewriter.visit(Scev);
1098 }
1099
1100 const SCEV *visit(const SCEV *S) {
1101 Type *STy = S->getType();
1102 // If the expression is not pointer-typed, just keep it as-is.
1103 if (!STy->isPointerTy())
1104 return S;
1105 // Else, recursively sink the cast down into it.
1106 return Base::visit(S);
1107 }
1108
1109 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1110 SmallVector<const SCEV *, 2> Operands;
1111 bool Changed = false;
1112 for (auto *Op : Expr->operands()) {
1113 Operands.push_back(visit(Op));
1114 Changed |= Op != Operands.back();
1115 }
1116 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1117 }
1118
1119 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1120 SmallVector<const SCEV *, 2> Operands;
1121 bool Changed = false;
1122 for (auto *Op : Expr->operands()) {
1123 Operands.push_back(visit(Op));
1124 Changed |= Op != Operands.back();
1125 }
1126 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1127 }
1128
1129 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1130 Type *ExprPtrTy = Expr->getType();
1131 assert(ExprPtrTy->isPointerTy() &&
1132 "Should only reach pointer-typed SCEVUnknown's.");
1133 Type *ExprIntPtrTy = SE.getDataLayout().getIntPtrType(ExprPtrTy);
1134 return SE.getPtrToIntExpr(Expr, ExprIntPtrTy, /*Depth=*/1);
1135 }
1136 };
1137
1138 // And actually perform the cast sinking.
1139 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
1140 assert(IntOp->getType()->isIntegerTy() &&
1141 "We must have succeeded in sinking the cast, "
1142 "and ending up with an integer-typed expression!");
1143 return getTruncateOrZeroExtend(IntOp, Ty);
1144 }
1145
getTruncateExpr(const SCEV * Op,Type * Ty,unsigned Depth)1146 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
1147 unsigned Depth) {
1148 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1149 "This is not a truncating conversion!");
1150 assert(isSCEVable(Ty) &&
1151 "This is not a conversion to a SCEVable type!");
1152 Ty = getEffectiveSCEVType(Ty);
1153
1154 FoldingSetNodeID ID;
1155 ID.AddInteger(scTruncate);
1156 ID.AddPointer(Op);
1157 ID.AddPointer(Ty);
1158 void *IP = nullptr;
1159 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1160
1161 // Fold if the operand is constant.
1162 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1163 return getConstant(
1164 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1165
1166 // trunc(trunc(x)) --> trunc(x)
1167 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1168 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1169
1170 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1171 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1172 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1173
1174 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1175 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1176 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1177
1178 if (Depth > MaxCastDepth) {
1179 SCEV *S =
1180 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1181 UniqueSCEVs.InsertNode(S, IP);
1182 addToLoopUseLists(S);
1183 return S;
1184 }
1185
1186 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1187 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1188 // if after transforming we have at most one truncate, not counting truncates
1189 // that replace other casts.
1190 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1191 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1192 SmallVector<const SCEV *, 4> Operands;
1193 unsigned numTruncs = 0;
1194 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1195 ++i) {
1196 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1197 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1198 isa<SCEVTruncateExpr>(S))
1199 numTruncs++;
1200 Operands.push_back(S);
1201 }
1202 if (numTruncs < 2) {
1203 if (isa<SCEVAddExpr>(Op))
1204 return getAddExpr(Operands);
1205 else if (isa<SCEVMulExpr>(Op))
1206 return getMulExpr(Operands);
1207 else
1208 llvm_unreachable("Unexpected SCEV type for Op.");
1209 }
1210 // Although we checked in the beginning that ID is not in the cache, it is
1211 // possible that during recursion and different modification ID was inserted
1212 // into the cache. So if we find it, just return it.
1213 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1214 return S;
1215 }
1216
1217 // If the input value is a chrec scev, truncate the chrec's operands.
1218 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1219 SmallVector<const SCEV *, 4> Operands;
1220 for (const SCEV *Op : AddRec->operands())
1221 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1222 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1223 }
1224
1225 // Return zero if truncating to known zeros.
1226 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op);
1227 if (MinTrailingZeros >= getTypeSizeInBits(Ty))
1228 return getZero(Ty);
1229
1230 // The cast wasn't folded; create an explicit cast node. We can reuse
1231 // the existing insert position since if we get here, we won't have
1232 // made any changes which would invalidate it.
1233 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1234 Op, Ty);
1235 UniqueSCEVs.InsertNode(S, IP);
1236 addToLoopUseLists(S);
1237 return S;
1238 }
1239
1240 // Get the limit of a recurrence such that incrementing by Step cannot cause
1241 // signed overflow as long as the value of the recurrence within the
1242 // loop does not exceed this limit before incrementing.
getSignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1243 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1244 ICmpInst::Predicate *Pred,
1245 ScalarEvolution *SE) {
1246 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1247 if (SE->isKnownPositive(Step)) {
1248 *Pred = ICmpInst::ICMP_SLT;
1249 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1250 SE->getSignedRangeMax(Step));
1251 }
1252 if (SE->isKnownNegative(Step)) {
1253 *Pred = ICmpInst::ICMP_SGT;
1254 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1255 SE->getSignedRangeMin(Step));
1256 }
1257 return nullptr;
1258 }
1259
1260 // Get the limit of a recurrence such that incrementing by Step cannot cause
1261 // unsigned overflow as long as the value of the recurrence within the loop does
1262 // not exceed this limit before incrementing.
getUnsignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1263 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1264 ICmpInst::Predicate *Pred,
1265 ScalarEvolution *SE) {
1266 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1267 *Pred = ICmpInst::ICMP_ULT;
1268
1269 return SE->getConstant(APInt::getMinValue(BitWidth) -
1270 SE->getUnsignedRangeMax(Step));
1271 }
1272
1273 namespace {
1274
1275 struct ExtendOpTraitsBase {
1276 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1277 unsigned);
1278 };
1279
1280 // Used to make code generic over signed and unsigned overflow.
1281 template <typename ExtendOp> struct ExtendOpTraits {
1282 // Members present:
1283 //
1284 // static const SCEV::NoWrapFlags WrapType;
1285 //
1286 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1287 //
1288 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1289 // ICmpInst::Predicate *Pred,
1290 // ScalarEvolution *SE);
1291 };
1292
1293 template <>
1294 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1295 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1296
1297 static const GetExtendExprTy GetExtendExpr;
1298
getOverflowLimitForStep__anon2e4d85960411::ExtendOpTraits1299 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1300 ICmpInst::Predicate *Pred,
1301 ScalarEvolution *SE) {
1302 return getSignedOverflowLimitForStep(Step, Pred, SE);
1303 }
1304 };
1305
1306 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1307 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1308
1309 template <>
1310 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1311 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1312
1313 static const GetExtendExprTy GetExtendExpr;
1314
getOverflowLimitForStep__anon2e4d85960411::ExtendOpTraits1315 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1316 ICmpInst::Predicate *Pred,
1317 ScalarEvolution *SE) {
1318 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1319 }
1320 };
1321
1322 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1323 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1324
1325 } // end anonymous namespace
1326
1327 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1328 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1329 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1330 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1331 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1332 // expression "Step + sext/zext(PreIncAR)" is congruent with
1333 // "sext/zext(PostIncAR)"
1334 template <typename ExtendOpTy>
getPreStartForExtend(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE,unsigned Depth)1335 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1336 ScalarEvolution *SE, unsigned Depth) {
1337 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1338 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1339
1340 const Loop *L = AR->getLoop();
1341 const SCEV *Start = AR->getStart();
1342 const SCEV *Step = AR->getStepRecurrence(*SE);
1343
1344 // Check for a simple looking step prior to loop entry.
1345 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1346 if (!SA)
1347 return nullptr;
1348
1349 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1350 // subtraction is expensive. For this purpose, perform a quick and dirty
1351 // difference, by checking for Step in the operand list.
1352 SmallVector<const SCEV *, 4> DiffOps;
1353 for (const SCEV *Op : SA->operands())
1354 if (Op != Step)
1355 DiffOps.push_back(Op);
1356
1357 if (DiffOps.size() == SA->getNumOperands())
1358 return nullptr;
1359
1360 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1361 // `Step`:
1362
1363 // 1. NSW/NUW flags on the step increment.
1364 auto PreStartFlags =
1365 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1366 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1367 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1368 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1369
1370 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1371 // "S+X does not sign/unsign-overflow".
1372 //
1373
1374 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1375 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1376 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1377 return PreStart;
1378
1379 // 2. Direct overflow check on the step operation's expression.
1380 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1381 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1382 const SCEV *OperandExtendedStart =
1383 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1384 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1385 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1386 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1387 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1388 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1389 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1390 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1391 }
1392 return PreStart;
1393 }
1394
1395 // 3. Loop precondition.
1396 ICmpInst::Predicate Pred;
1397 const SCEV *OverflowLimit =
1398 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1399
1400 if (OverflowLimit &&
1401 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1402 return PreStart;
1403
1404 return nullptr;
1405 }
1406
1407 // Get the normalized zero or sign extended expression for this AddRec's Start.
1408 template <typename ExtendOpTy>
getExtendAddRecStart(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE,unsigned Depth)1409 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1410 ScalarEvolution *SE,
1411 unsigned Depth) {
1412 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1413
1414 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1415 if (!PreStart)
1416 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1417
1418 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1419 Depth),
1420 (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1421 }
1422
1423 // Try to prove away overflow by looking at "nearby" add recurrences. A
1424 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1425 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1426 //
1427 // Formally:
1428 //
1429 // {S,+,X} == {S-T,+,X} + T
1430 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1431 //
1432 // If ({S-T,+,X} + T) does not overflow ... (1)
1433 //
1434 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1435 //
1436 // If {S-T,+,X} does not overflow ... (2)
1437 //
1438 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1439 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1440 //
1441 // If (S-T)+T does not overflow ... (3)
1442 //
1443 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1444 // == {Ext(S),+,Ext(X)} == LHS
1445 //
1446 // Thus, if (1), (2) and (3) are true for some T, then
1447 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1448 //
1449 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1450 // does not overflow" restricted to the 0th iteration. Therefore we only need
1451 // to check for (1) and (2).
1452 //
1453 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1454 // is `Delta` (defined below).
1455 template <typename ExtendOpTy>
proveNoWrapByVaryingStart(const SCEV * Start,const SCEV * Step,const Loop * L)1456 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1457 const SCEV *Step,
1458 const Loop *L) {
1459 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1460
1461 // We restrict `Start` to a constant to prevent SCEV from spending too much
1462 // time here. It is correct (but more expensive) to continue with a
1463 // non-constant `Start` and do a general SCEV subtraction to compute
1464 // `PreStart` below.
1465 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1466 if (!StartC)
1467 return false;
1468
1469 APInt StartAI = StartC->getAPInt();
1470
1471 for (unsigned Delta : {-2, -1, 1, 2}) {
1472 const SCEV *PreStart = getConstant(StartAI - Delta);
1473
1474 FoldingSetNodeID ID;
1475 ID.AddInteger(scAddRecExpr);
1476 ID.AddPointer(PreStart);
1477 ID.AddPointer(Step);
1478 ID.AddPointer(L);
1479 void *IP = nullptr;
1480 const auto *PreAR =
1481 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1482
1483 // Give up if we don't already have the add recurrence we need because
1484 // actually constructing an add recurrence is relatively expensive.
1485 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1486 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1487 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1488 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1489 DeltaS, &Pred, this);
1490 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1491 return true;
1492 }
1493 }
1494
1495 return false;
1496 }
1497
1498 // Finds an integer D for an expression (C + x + y + ...) such that the top
1499 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1500 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1501 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1502 // the (C + x + y + ...) expression is \p WholeAddExpr.
extractConstantWithoutWrapping(ScalarEvolution & SE,const SCEVConstant * ConstantTerm,const SCEVAddExpr * WholeAddExpr)1503 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1504 const SCEVConstant *ConstantTerm,
1505 const SCEVAddExpr *WholeAddExpr) {
1506 const APInt &C = ConstantTerm->getAPInt();
1507 const unsigned BitWidth = C.getBitWidth();
1508 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1509 uint32_t TZ = BitWidth;
1510 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1511 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1512 if (TZ) {
1513 // Set D to be as many least significant bits of C as possible while still
1514 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1515 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1516 }
1517 return APInt(BitWidth, 0);
1518 }
1519
1520 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1521 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1522 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1523 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
extractConstantWithoutWrapping(ScalarEvolution & SE,const APInt & ConstantStart,const SCEV * Step)1524 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1525 const APInt &ConstantStart,
1526 const SCEV *Step) {
1527 const unsigned BitWidth = ConstantStart.getBitWidth();
1528 const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1529 if (TZ)
1530 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1531 : ConstantStart;
1532 return APInt(BitWidth, 0);
1533 }
1534
1535 const SCEV *
getZeroExtendExpr(const SCEV * Op,Type * Ty,unsigned Depth)1536 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1537 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1538 "This is not an extending conversion!");
1539 assert(isSCEVable(Ty) &&
1540 "This is not a conversion to a SCEVable type!");
1541 Ty = getEffectiveSCEVType(Ty);
1542
1543 // Fold if the operand is constant.
1544 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1545 return getConstant(
1546 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1547
1548 // zext(zext(x)) --> zext(x)
1549 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1550 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1551
1552 // Before doing any expensive analysis, check to see if we've already
1553 // computed a SCEV for this Op and Ty.
1554 FoldingSetNodeID ID;
1555 ID.AddInteger(scZeroExtend);
1556 ID.AddPointer(Op);
1557 ID.AddPointer(Ty);
1558 void *IP = nullptr;
1559 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1560 if (Depth > MaxCastDepth) {
1561 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1562 Op, Ty);
1563 UniqueSCEVs.InsertNode(S, IP);
1564 addToLoopUseLists(S);
1565 return S;
1566 }
1567
1568 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1569 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1570 // It's possible the bits taken off by the truncate were all zero bits. If
1571 // so, we should be able to simplify this further.
1572 const SCEV *X = ST->getOperand();
1573 ConstantRange CR = getUnsignedRange(X);
1574 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1575 unsigned NewBits = getTypeSizeInBits(Ty);
1576 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1577 CR.zextOrTrunc(NewBits)))
1578 return getTruncateOrZeroExtend(X, Ty, Depth);
1579 }
1580
1581 // If the input value is a chrec scev, and we can prove that the value
1582 // did not overflow the old, smaller, value, we can zero extend all of the
1583 // operands (often constants). This allows analysis of something like
1584 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1585 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1586 if (AR->isAffine()) {
1587 const SCEV *Start = AR->getStart();
1588 const SCEV *Step = AR->getStepRecurrence(*this);
1589 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1590 const Loop *L = AR->getLoop();
1591
1592 if (!AR->hasNoUnsignedWrap()) {
1593 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1594 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1595 }
1596
1597 // If we have special knowledge that this addrec won't overflow,
1598 // we don't need to do any further analysis.
1599 if (AR->hasNoUnsignedWrap())
1600 return getAddRecExpr(
1601 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1602 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1603
1604 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1605 // Note that this serves two purposes: It filters out loops that are
1606 // simply not analyzable, and it covers the case where this code is
1607 // being called from within backedge-taken count analysis, such that
1608 // attempting to ask for the backedge-taken count would likely result
1609 // in infinite recursion. In the later case, the analysis code will
1610 // cope with a conservative value, and it will take care to purge
1611 // that value once it has finished.
1612 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1613 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1614 // Manually compute the final value for AR, checking for overflow.
1615
1616 // Check whether the backedge-taken count can be losslessly casted to
1617 // the addrec's type. The count is always unsigned.
1618 const SCEV *CastedMaxBECount =
1619 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1620 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1621 CastedMaxBECount, MaxBECount->getType(), Depth);
1622 if (MaxBECount == RecastedMaxBECount) {
1623 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1624 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1625 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1626 SCEV::FlagAnyWrap, Depth + 1);
1627 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1628 SCEV::FlagAnyWrap,
1629 Depth + 1),
1630 WideTy, Depth + 1);
1631 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1632 const SCEV *WideMaxBECount =
1633 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1634 const SCEV *OperandExtendedAdd =
1635 getAddExpr(WideStart,
1636 getMulExpr(WideMaxBECount,
1637 getZeroExtendExpr(Step, WideTy, Depth + 1),
1638 SCEV::FlagAnyWrap, Depth + 1),
1639 SCEV::FlagAnyWrap, Depth + 1);
1640 if (ZAdd == OperandExtendedAdd) {
1641 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1642 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1643 // Return the expression with the addrec on the outside.
1644 return getAddRecExpr(
1645 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1646 Depth + 1),
1647 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1648 AR->getNoWrapFlags());
1649 }
1650 // Similar to above, only this time treat the step value as signed.
1651 // This covers loops that count down.
1652 OperandExtendedAdd =
1653 getAddExpr(WideStart,
1654 getMulExpr(WideMaxBECount,
1655 getSignExtendExpr(Step, WideTy, Depth + 1),
1656 SCEV::FlagAnyWrap, Depth + 1),
1657 SCEV::FlagAnyWrap, Depth + 1);
1658 if (ZAdd == OperandExtendedAdd) {
1659 // Cache knowledge of AR NW, which is propagated to this AddRec.
1660 // Negative step causes unsigned wrap, but it still can't self-wrap.
1661 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1662 // Return the expression with the addrec on the outside.
1663 return getAddRecExpr(
1664 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1665 Depth + 1),
1666 getSignExtendExpr(Step, Ty, Depth + 1), L,
1667 AR->getNoWrapFlags());
1668 }
1669 }
1670 }
1671
1672 // Normally, in the cases we can prove no-overflow via a
1673 // backedge guarding condition, we can also compute a backedge
1674 // taken count for the loop. The exceptions are assumptions and
1675 // guards present in the loop -- SCEV is not great at exploiting
1676 // these to compute max backedge taken counts, but can still use
1677 // these to prove lack of overflow. Use this fact to avoid
1678 // doing extra work that may not pay off.
1679 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1680 !AC.assumptions().empty()) {
1681
1682 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1683 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1684 if (AR->hasNoUnsignedWrap()) {
1685 // Same as nuw case above - duplicated here to avoid a compile time
1686 // issue. It's not clear that the order of checks does matter, but
1687 // it's one of two issue possible causes for a change which was
1688 // reverted. Be conservative for the moment.
1689 return getAddRecExpr(
1690 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1691 Depth + 1),
1692 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1693 AR->getNoWrapFlags());
1694 }
1695
1696 // For a negative step, we can extend the operands iff doing so only
1697 // traverses values in the range zext([0,UINT_MAX]).
1698 if (isKnownNegative(Step)) {
1699 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1700 getSignedRangeMin(Step));
1701 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1702 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1703 // Cache knowledge of AR NW, which is propagated to this
1704 // AddRec. Negative step causes unsigned wrap, but it
1705 // still can't self-wrap.
1706 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1707 // Return the expression with the addrec on the outside.
1708 return getAddRecExpr(
1709 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1710 Depth + 1),
1711 getSignExtendExpr(Step, Ty, Depth + 1), L,
1712 AR->getNoWrapFlags());
1713 }
1714 }
1715 }
1716
1717 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1718 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1719 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1720 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1721 const APInt &C = SC->getAPInt();
1722 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1723 if (D != 0) {
1724 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1725 const SCEV *SResidual =
1726 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1727 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1728 return getAddExpr(SZExtD, SZExtR,
1729 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1730 Depth + 1);
1731 }
1732 }
1733
1734 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1735 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1736 return getAddRecExpr(
1737 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1738 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1739 }
1740 }
1741
1742 // zext(A % B) --> zext(A) % zext(B)
1743 {
1744 const SCEV *LHS;
1745 const SCEV *RHS;
1746 if (matchURem(Op, LHS, RHS))
1747 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1748 getZeroExtendExpr(RHS, Ty, Depth + 1));
1749 }
1750
1751 // zext(A / B) --> zext(A) / zext(B).
1752 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1753 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1754 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1755
1756 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1757 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1758 if (SA->hasNoUnsignedWrap()) {
1759 // If the addition does not unsign overflow then we can, by definition,
1760 // commute the zero extension with the addition operation.
1761 SmallVector<const SCEV *, 4> Ops;
1762 for (const auto *Op : SA->operands())
1763 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1764 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1765 }
1766
1767 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1768 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1769 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1770 //
1771 // Often address arithmetics contain expressions like
1772 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1773 // This transformation is useful while proving that such expressions are
1774 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1775 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1776 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1777 if (D != 0) {
1778 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1779 const SCEV *SResidual =
1780 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1781 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1782 return getAddExpr(SZExtD, SZExtR,
1783 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1784 Depth + 1);
1785 }
1786 }
1787 }
1788
1789 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1790 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1791 if (SM->hasNoUnsignedWrap()) {
1792 // If the multiply does not unsign overflow then we can, by definition,
1793 // commute the zero extension with the multiply operation.
1794 SmallVector<const SCEV *, 4> Ops;
1795 for (const auto *Op : SM->operands())
1796 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1797 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1798 }
1799
1800 // zext(2^K * (trunc X to iN)) to iM ->
1801 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1802 //
1803 // Proof:
1804 //
1805 // zext(2^K * (trunc X to iN)) to iM
1806 // = zext((trunc X to iN) << K) to iM
1807 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1808 // (because shl removes the top K bits)
1809 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1810 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1811 //
1812 if (SM->getNumOperands() == 2)
1813 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1814 if (MulLHS->getAPInt().isPowerOf2())
1815 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1816 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1817 MulLHS->getAPInt().logBase2();
1818 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1819 return getMulExpr(
1820 getZeroExtendExpr(MulLHS, Ty),
1821 getZeroExtendExpr(
1822 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1823 SCEV::FlagNUW, Depth + 1);
1824 }
1825 }
1826
1827 // The cast wasn't folded; create an explicit cast node.
1828 // Recompute the insert position, as it may have been invalidated.
1829 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1830 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1831 Op, Ty);
1832 UniqueSCEVs.InsertNode(S, IP);
1833 addToLoopUseLists(S);
1834 return S;
1835 }
1836
1837 const SCEV *
getSignExtendExpr(const SCEV * Op,Type * Ty,unsigned Depth)1838 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1839 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1840 "This is not an extending conversion!");
1841 assert(isSCEVable(Ty) &&
1842 "This is not a conversion to a SCEVable type!");
1843 Ty = getEffectiveSCEVType(Ty);
1844
1845 // Fold if the operand is constant.
1846 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1847 return getConstant(
1848 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1849
1850 // sext(sext(x)) --> sext(x)
1851 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1852 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1853
1854 // sext(zext(x)) --> zext(x)
1855 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1856 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1857
1858 // Before doing any expensive analysis, check to see if we've already
1859 // computed a SCEV for this Op and Ty.
1860 FoldingSetNodeID ID;
1861 ID.AddInteger(scSignExtend);
1862 ID.AddPointer(Op);
1863 ID.AddPointer(Ty);
1864 void *IP = nullptr;
1865 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1866 // Limit recursion depth.
1867 if (Depth > MaxCastDepth) {
1868 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1869 Op, Ty);
1870 UniqueSCEVs.InsertNode(S, IP);
1871 addToLoopUseLists(S);
1872 return S;
1873 }
1874
1875 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1876 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1877 // It's possible the bits taken off by the truncate were all sign bits. If
1878 // so, we should be able to simplify this further.
1879 const SCEV *X = ST->getOperand();
1880 ConstantRange CR = getSignedRange(X);
1881 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1882 unsigned NewBits = getTypeSizeInBits(Ty);
1883 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1884 CR.sextOrTrunc(NewBits)))
1885 return getTruncateOrSignExtend(X, Ty, Depth);
1886 }
1887
1888 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1889 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1890 if (SA->hasNoSignedWrap()) {
1891 // If the addition does not sign overflow then we can, by definition,
1892 // commute the sign extension with the addition operation.
1893 SmallVector<const SCEV *, 4> Ops;
1894 for (const auto *Op : SA->operands())
1895 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1896 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
1897 }
1898
1899 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1900 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1901 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1902 //
1903 // For instance, this will bring two seemingly different expressions:
1904 // 1 + sext(5 + 20 * %x + 24 * %y) and
1905 // sext(6 + 20 * %x + 24 * %y)
1906 // to the same form:
1907 // 2 + sext(4 + 20 * %x + 24 * %y)
1908 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1909 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1910 if (D != 0) {
1911 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
1912 const SCEV *SResidual =
1913 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1914 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1915 return getAddExpr(SSExtD, SSExtR,
1916 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1917 Depth + 1);
1918 }
1919 }
1920 }
1921 // If the input value is a chrec scev, and we can prove that the value
1922 // did not overflow the old, smaller, value, we can sign extend all of the
1923 // operands (often constants). This allows analysis of something like
1924 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1925 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1926 if (AR->isAffine()) {
1927 const SCEV *Start = AR->getStart();
1928 const SCEV *Step = AR->getStepRecurrence(*this);
1929 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1930 const Loop *L = AR->getLoop();
1931
1932 if (!AR->hasNoSignedWrap()) {
1933 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1934 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1935 }
1936
1937 // If we have special knowledge that this addrec won't overflow,
1938 // we don't need to do any further analysis.
1939 if (AR->hasNoSignedWrap())
1940 return getAddRecExpr(
1941 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
1942 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
1943
1944 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1945 // Note that this serves two purposes: It filters out loops that are
1946 // simply not analyzable, and it covers the case where this code is
1947 // being called from within backedge-taken count analysis, such that
1948 // attempting to ask for the backedge-taken count would likely result
1949 // in infinite recursion. In the later case, the analysis code will
1950 // cope with a conservative value, and it will take care to purge
1951 // that value once it has finished.
1952 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1953 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1954 // Manually compute the final value for AR, checking for
1955 // overflow.
1956
1957 // Check whether the backedge-taken count can be losslessly casted to
1958 // the addrec's type. The count is always unsigned.
1959 const SCEV *CastedMaxBECount =
1960 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1961 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1962 CastedMaxBECount, MaxBECount->getType(), Depth);
1963 if (MaxBECount == RecastedMaxBECount) {
1964 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1965 // Check whether Start+Step*MaxBECount has no signed overflow.
1966 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
1967 SCEV::FlagAnyWrap, Depth + 1);
1968 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
1969 SCEV::FlagAnyWrap,
1970 Depth + 1),
1971 WideTy, Depth + 1);
1972 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
1973 const SCEV *WideMaxBECount =
1974 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1975 const SCEV *OperandExtendedAdd =
1976 getAddExpr(WideStart,
1977 getMulExpr(WideMaxBECount,
1978 getSignExtendExpr(Step, WideTy, Depth + 1),
1979 SCEV::FlagAnyWrap, Depth + 1),
1980 SCEV::FlagAnyWrap, Depth + 1);
1981 if (SAdd == OperandExtendedAdd) {
1982 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1983 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
1984 // Return the expression with the addrec on the outside.
1985 return getAddRecExpr(
1986 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
1987 Depth + 1),
1988 getSignExtendExpr(Step, Ty, Depth + 1), L,
1989 AR->getNoWrapFlags());
1990 }
1991 // Similar to above, only this time treat the step value as unsigned.
1992 // This covers loops that count up with an unsigned step.
1993 OperandExtendedAdd =
1994 getAddExpr(WideStart,
1995 getMulExpr(WideMaxBECount,
1996 getZeroExtendExpr(Step, WideTy, Depth + 1),
1997 SCEV::FlagAnyWrap, Depth + 1),
1998 SCEV::FlagAnyWrap, Depth + 1);
1999 if (SAdd == OperandExtendedAdd) {
2000 // If AR wraps around then
2001 //
2002 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2003 // => SAdd != OperandExtendedAdd
2004 //
2005 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2006 // (SAdd == OperandExtendedAdd => AR is NW)
2007
2008 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2009
2010 // Return the expression with the addrec on the outside.
2011 return getAddRecExpr(
2012 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2013 Depth + 1),
2014 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2015 AR->getNoWrapFlags());
2016 }
2017 }
2018 }
2019
2020 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2021 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
2022 if (AR->hasNoSignedWrap()) {
2023 // Same as nsw case above - duplicated here to avoid a compile time
2024 // issue. It's not clear that the order of checks does matter, but
2025 // it's one of two issue possible causes for a change which was
2026 // reverted. Be conservative for the moment.
2027 return getAddRecExpr(
2028 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2029 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2030 }
2031
2032 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2033 // if D + (C - D + Step * n) could be proven to not signed wrap
2034 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2035 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2036 const APInt &C = SC->getAPInt();
2037 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2038 if (D != 0) {
2039 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2040 const SCEV *SResidual =
2041 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2042 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2043 return getAddExpr(SSExtD, SSExtR,
2044 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
2045 Depth + 1);
2046 }
2047 }
2048
2049 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2050 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2051 return getAddRecExpr(
2052 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2053 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2054 }
2055 }
2056
2057 // If the input value is provably positive and we could not simplify
2058 // away the sext build a zext instead.
2059 if (isKnownNonNegative(Op))
2060 return getZeroExtendExpr(Op, Ty, Depth + 1);
2061
2062 // The cast wasn't folded; create an explicit cast node.
2063 // Recompute the insert position, as it may have been invalidated.
2064 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2065 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2066 Op, Ty);
2067 UniqueSCEVs.InsertNode(S, IP);
2068 addToLoopUseLists(S);
2069 return S;
2070 }
2071
2072 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2073 /// unspecified bits out to the given type.
getAnyExtendExpr(const SCEV * Op,Type * Ty)2074 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
2075 Type *Ty) {
2076 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2077 "This is not an extending conversion!");
2078 assert(isSCEVable(Ty) &&
2079 "This is not a conversion to a SCEVable type!");
2080 Ty = getEffectiveSCEVType(Ty);
2081
2082 // Sign-extend negative constants.
2083 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2084 if (SC->getAPInt().isNegative())
2085 return getSignExtendExpr(Op, Ty);
2086
2087 // Peel off a truncate cast.
2088 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2089 const SCEV *NewOp = T->getOperand();
2090 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2091 return getAnyExtendExpr(NewOp, Ty);
2092 return getTruncateOrNoop(NewOp, Ty);
2093 }
2094
2095 // Next try a zext cast. If the cast is folded, use it.
2096 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2097 if (!isa<SCEVZeroExtendExpr>(ZExt))
2098 return ZExt;
2099
2100 // Next try a sext cast. If the cast is folded, use it.
2101 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2102 if (!isa<SCEVSignExtendExpr>(SExt))
2103 return SExt;
2104
2105 // Force the cast to be folded into the operands of an addrec.
2106 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2107 SmallVector<const SCEV *, 4> Ops;
2108 for (const SCEV *Op : AR->operands())
2109 Ops.push_back(getAnyExtendExpr(Op, Ty));
2110 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2111 }
2112
2113 // If the expression is obviously signed, use the sext cast value.
2114 if (isa<SCEVSMaxExpr>(Op))
2115 return SExt;
2116
2117 // Absent any other information, use the zext cast value.
2118 return ZExt;
2119 }
2120
2121 /// Process the given Ops list, which is a list of operands to be added under
2122 /// the given scale, update the given map. This is a helper function for
2123 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2124 /// that would form an add expression like this:
2125 ///
2126 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2127 ///
2128 /// where A and B are constants, update the map with these values:
2129 ///
2130 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2131 ///
2132 /// and add 13 + A*B*29 to AccumulatedConstant.
2133 /// This will allow getAddRecExpr to produce this:
2134 ///
2135 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2136 ///
2137 /// This form often exposes folding opportunities that are hidden in
2138 /// the original operand list.
2139 ///
2140 /// Return true iff it appears that any interesting folding opportunities
2141 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2142 /// the common case where no interesting opportunities are present, and
2143 /// is also used as a check to avoid infinite recursion.
2144 static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *,APInt> & M,SmallVectorImpl<const SCEV * > & NewOps,APInt & AccumulatedConstant,const SCEV * const * Ops,size_t NumOperands,const APInt & Scale,ScalarEvolution & SE)2145 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
2146 SmallVectorImpl<const SCEV *> &NewOps,
2147 APInt &AccumulatedConstant,
2148 const SCEV *const *Ops, size_t NumOperands,
2149 const APInt &Scale,
2150 ScalarEvolution &SE) {
2151 bool Interesting = false;
2152
2153 // Iterate over the add operands. They are sorted, with constants first.
2154 unsigned i = 0;
2155 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2156 ++i;
2157 // Pull a buried constant out to the outside.
2158 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2159 Interesting = true;
2160 AccumulatedConstant += Scale * C->getAPInt();
2161 }
2162
2163 // Next comes everything else. We're especially interested in multiplies
2164 // here, but they're in the middle, so just visit the rest with one loop.
2165 for (; i != NumOperands; ++i) {
2166 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2167 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2168 APInt NewScale =
2169 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2170 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2171 // A multiplication of a constant with another add; recurse.
2172 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2173 Interesting |=
2174 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2175 Add->op_begin(), Add->getNumOperands(),
2176 NewScale, SE);
2177 } else {
2178 // A multiplication of a constant with some other value. Update
2179 // the map.
2180 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
2181 const SCEV *Key = SE.getMulExpr(MulOps);
2182 auto Pair = M.insert({Key, NewScale});
2183 if (Pair.second) {
2184 NewOps.push_back(Pair.first->first);
2185 } else {
2186 Pair.first->second += NewScale;
2187 // The map already had an entry for this value, which may indicate
2188 // a folding opportunity.
2189 Interesting = true;
2190 }
2191 }
2192 } else {
2193 // An ordinary operand. Update the map.
2194 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2195 M.insert({Ops[i], Scale});
2196 if (Pair.second) {
2197 NewOps.push_back(Pair.first->first);
2198 } else {
2199 Pair.first->second += Scale;
2200 // The map already had an entry for this value, which may indicate
2201 // a folding opportunity.
2202 Interesting = true;
2203 }
2204 }
2205 }
2206
2207 return Interesting;
2208 }
2209
2210 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2211 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2212 // can't-overflow flags for the operation if possible.
2213 static SCEV::NoWrapFlags
StrengthenNoWrapFlags(ScalarEvolution * SE,SCEVTypes Type,const ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2214 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
2215 const ArrayRef<const SCEV *> Ops,
2216 SCEV::NoWrapFlags Flags) {
2217 using namespace std::placeholders;
2218
2219 using OBO = OverflowingBinaryOperator;
2220
2221 bool CanAnalyze =
2222 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2223 (void)CanAnalyze;
2224 assert(CanAnalyze && "don't call from other places!");
2225
2226 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2227 SCEV::NoWrapFlags SignOrUnsignWrap =
2228 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2229
2230 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2231 auto IsKnownNonNegative = [&](const SCEV *S) {
2232 return SE->isKnownNonNegative(S);
2233 };
2234
2235 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2236 Flags =
2237 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2238
2239 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2240
2241 if (SignOrUnsignWrap != SignOrUnsignMask &&
2242 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2243 isa<SCEVConstant>(Ops[0])) {
2244
2245 auto Opcode = [&] {
2246 switch (Type) {
2247 case scAddExpr:
2248 return Instruction::Add;
2249 case scMulExpr:
2250 return Instruction::Mul;
2251 default:
2252 llvm_unreachable("Unexpected SCEV op.");
2253 }
2254 }();
2255
2256 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2257
2258 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2259 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2260 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2261 Opcode, C, OBO::NoSignedWrap);
2262 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2263 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2264 }
2265
2266 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2267 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2268 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2269 Opcode, C, OBO::NoUnsignedWrap);
2270 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2271 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2272 }
2273 }
2274
2275 return Flags;
2276 }
2277
isAvailableAtLoopEntry(const SCEV * S,const Loop * L)2278 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
2279 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2280 }
2281
2282 /// Get a canonical add expression, or something simpler if possible.
getAddExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OrigFlags,unsigned Depth)2283 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2284 SCEV::NoWrapFlags OrigFlags,
2285 unsigned Depth) {
2286 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2287 "only nuw or nsw allowed");
2288 assert(!Ops.empty() && "Cannot get empty add!");
2289 if (Ops.size() == 1) return Ops[0];
2290 #ifndef NDEBUG
2291 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2292 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2293 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2294 "SCEVAddExpr operand types don't match!");
2295 #endif
2296
2297 // Sort by complexity, this groups all similar expression types together.
2298 GroupByComplexity(Ops, &LI, DT);
2299
2300 // If there are any constants, fold them together.
2301 unsigned Idx = 0;
2302 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2303 ++Idx;
2304 assert(Idx < Ops.size());
2305 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2306 // We found two constants, fold them together!
2307 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2308 if (Ops.size() == 2) return Ops[0];
2309 Ops.erase(Ops.begin()+1); // Erase the folded element
2310 LHSC = cast<SCEVConstant>(Ops[0]);
2311 }
2312
2313 // If we are left with a constant zero being added, strip it off.
2314 if (LHSC->getValue()->isZero()) {
2315 Ops.erase(Ops.begin());
2316 --Idx;
2317 }
2318
2319 if (Ops.size() == 1) return Ops[0];
2320 }
2321
2322 // Delay expensive flag strengthening until necessary.
2323 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2324 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2325 };
2326
2327 // Limit recursion calls depth.
2328 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2329 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2330
2331 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) {
2332 // Don't strengthen flags if we have no new information.
2333 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2334 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2335 Add->setNoWrapFlags(ComputeFlags(Ops));
2336 return S;
2337 }
2338
2339 // Okay, check to see if the same value occurs in the operand list more than
2340 // once. If so, merge them together into an multiply expression. Since we
2341 // sorted the list, these values are required to be adjacent.
2342 Type *Ty = Ops[0]->getType();
2343 bool FoundMatch = false;
2344 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2345 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2346 // Scan ahead to count how many equal operands there are.
2347 unsigned Count = 2;
2348 while (i+Count != e && Ops[i+Count] == Ops[i])
2349 ++Count;
2350 // Merge the values into a multiply.
2351 const SCEV *Scale = getConstant(Ty, Count);
2352 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2353 if (Ops.size() == Count)
2354 return Mul;
2355 Ops[i] = Mul;
2356 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2357 --i; e -= Count - 1;
2358 FoundMatch = true;
2359 }
2360 if (FoundMatch)
2361 return getAddExpr(Ops, OrigFlags, Depth + 1);
2362
2363 // Check for truncates. If all the operands are truncated from the same
2364 // type, see if factoring out the truncate would permit the result to be
2365 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2366 // if the contents of the resulting outer trunc fold to something simple.
2367 auto FindTruncSrcType = [&]() -> Type * {
2368 // We're ultimately looking to fold an addrec of truncs and muls of only
2369 // constants and truncs, so if we find any other types of SCEV
2370 // as operands of the addrec then we bail and return nullptr here.
2371 // Otherwise, we return the type of the operand of a trunc that we find.
2372 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2373 return T->getOperand()->getType();
2374 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2375 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2376 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2377 return T->getOperand()->getType();
2378 }
2379 return nullptr;
2380 };
2381 if (auto *SrcType = FindTruncSrcType()) {
2382 SmallVector<const SCEV *, 8> LargeOps;
2383 bool Ok = true;
2384 // Check all the operands to see if they can be represented in the
2385 // source type of the truncate.
2386 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2387 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2388 if (T->getOperand()->getType() != SrcType) {
2389 Ok = false;
2390 break;
2391 }
2392 LargeOps.push_back(T->getOperand());
2393 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2394 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2395 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2396 SmallVector<const SCEV *, 8> LargeMulOps;
2397 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2398 if (const SCEVTruncateExpr *T =
2399 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2400 if (T->getOperand()->getType() != SrcType) {
2401 Ok = false;
2402 break;
2403 }
2404 LargeMulOps.push_back(T->getOperand());
2405 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2406 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2407 } else {
2408 Ok = false;
2409 break;
2410 }
2411 }
2412 if (Ok)
2413 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2414 } else {
2415 Ok = false;
2416 break;
2417 }
2418 }
2419 if (Ok) {
2420 // Evaluate the expression in the larger type.
2421 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2422 // If it folds to something simple, use it. Otherwise, don't.
2423 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2424 return getTruncateExpr(Fold, Ty);
2425 }
2426 }
2427
2428 // Skip past any other cast SCEVs.
2429 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2430 ++Idx;
2431
2432 // If there are add operands they would be next.
2433 if (Idx < Ops.size()) {
2434 bool DeletedAdd = false;
2435 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2436 if (Ops.size() > AddOpsInlineThreshold ||
2437 Add->getNumOperands() > AddOpsInlineThreshold)
2438 break;
2439 // If we have an add, expand the add operands onto the end of the operands
2440 // list.
2441 Ops.erase(Ops.begin()+Idx);
2442 Ops.append(Add->op_begin(), Add->op_end());
2443 DeletedAdd = true;
2444 }
2445
2446 // If we deleted at least one add, we added operands to the end of the list,
2447 // and they are not necessarily sorted. Recurse to resort and resimplify
2448 // any operands we just acquired.
2449 if (DeletedAdd)
2450 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2451 }
2452
2453 // Skip over the add expression until we get to a multiply.
2454 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2455 ++Idx;
2456
2457 // Check to see if there are any folding opportunities present with
2458 // operands multiplied by constant values.
2459 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2460 uint64_t BitWidth = getTypeSizeInBits(Ty);
2461 DenseMap<const SCEV *, APInt> M;
2462 SmallVector<const SCEV *, 8> NewOps;
2463 APInt AccumulatedConstant(BitWidth, 0);
2464 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2465 Ops.data(), Ops.size(),
2466 APInt(BitWidth, 1), *this)) {
2467 struct APIntCompare {
2468 bool operator()(const APInt &LHS, const APInt &RHS) const {
2469 return LHS.ult(RHS);
2470 }
2471 };
2472
2473 // Some interesting folding opportunity is present, so its worthwhile to
2474 // re-generate the operands list. Group the operands by constant scale,
2475 // to avoid multiplying by the same constant scale multiple times.
2476 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2477 for (const SCEV *NewOp : NewOps)
2478 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2479 // Re-generate the operands list.
2480 Ops.clear();
2481 if (AccumulatedConstant != 0)
2482 Ops.push_back(getConstant(AccumulatedConstant));
2483 for (auto &MulOp : MulOpLists)
2484 if (MulOp.first != 0)
2485 Ops.push_back(getMulExpr(
2486 getConstant(MulOp.first),
2487 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2488 SCEV::FlagAnyWrap, Depth + 1));
2489 if (Ops.empty())
2490 return getZero(Ty);
2491 if (Ops.size() == 1)
2492 return Ops[0];
2493 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2494 }
2495 }
2496
2497 // If we are adding something to a multiply expression, make sure the
2498 // something is not already an operand of the multiply. If so, merge it into
2499 // the multiply.
2500 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2501 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2502 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2503 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2504 if (isa<SCEVConstant>(MulOpSCEV))
2505 continue;
2506 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2507 if (MulOpSCEV == Ops[AddOp]) {
2508 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2509 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2510 if (Mul->getNumOperands() != 2) {
2511 // If the multiply has more than two operands, we must get the
2512 // Y*Z term.
2513 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2514 Mul->op_begin()+MulOp);
2515 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2516 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2517 }
2518 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2519 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2520 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2521 SCEV::FlagAnyWrap, Depth + 1);
2522 if (Ops.size() == 2) return OuterMul;
2523 if (AddOp < Idx) {
2524 Ops.erase(Ops.begin()+AddOp);
2525 Ops.erase(Ops.begin()+Idx-1);
2526 } else {
2527 Ops.erase(Ops.begin()+Idx);
2528 Ops.erase(Ops.begin()+AddOp-1);
2529 }
2530 Ops.push_back(OuterMul);
2531 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2532 }
2533
2534 // Check this multiply against other multiplies being added together.
2535 for (unsigned OtherMulIdx = Idx+1;
2536 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2537 ++OtherMulIdx) {
2538 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2539 // If MulOp occurs in OtherMul, we can fold the two multiplies
2540 // together.
2541 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2542 OMulOp != e; ++OMulOp)
2543 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2544 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2545 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2546 if (Mul->getNumOperands() != 2) {
2547 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2548 Mul->op_begin()+MulOp);
2549 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2550 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2551 }
2552 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2553 if (OtherMul->getNumOperands() != 2) {
2554 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2555 OtherMul->op_begin()+OMulOp);
2556 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2557 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2558 }
2559 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2560 const SCEV *InnerMulSum =
2561 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2562 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2563 SCEV::FlagAnyWrap, Depth + 1);
2564 if (Ops.size() == 2) return OuterMul;
2565 Ops.erase(Ops.begin()+Idx);
2566 Ops.erase(Ops.begin()+OtherMulIdx-1);
2567 Ops.push_back(OuterMul);
2568 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2569 }
2570 }
2571 }
2572 }
2573
2574 // If there are any add recurrences in the operands list, see if any other
2575 // added values are loop invariant. If so, we can fold them into the
2576 // recurrence.
2577 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2578 ++Idx;
2579
2580 // Scan over all recurrences, trying to fold loop invariants into them.
2581 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2582 // Scan all of the other operands to this add and add them to the vector if
2583 // they are loop invariant w.r.t. the recurrence.
2584 SmallVector<const SCEV *, 8> LIOps;
2585 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2586 const Loop *AddRecLoop = AddRec->getLoop();
2587 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2588 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2589 LIOps.push_back(Ops[i]);
2590 Ops.erase(Ops.begin()+i);
2591 --i; --e;
2592 }
2593
2594 // If we found some loop invariants, fold them into the recurrence.
2595 if (!LIOps.empty()) {
2596 // Compute nowrap flags for the addition of the loop-invariant ops and
2597 // the addrec. Temporarily push it as an operand for that purpose.
2598 LIOps.push_back(AddRec);
2599 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2600 LIOps.pop_back();
2601
2602 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2603 LIOps.push_back(AddRec->getStart());
2604
2605 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2606 // This follows from the fact that the no-wrap flags on the outer add
2607 // expression are applicable on the 0th iteration, when the add recurrence
2608 // will be equal to its start value.
2609 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);
2610
2611 // Build the new addrec. Propagate the NUW and NSW flags if both the
2612 // outer add and the inner addrec are guaranteed to have no overflow.
2613 // Always propagate NW.
2614 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2615 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2616
2617 // If all of the other operands were loop invariant, we are done.
2618 if (Ops.size() == 1) return NewRec;
2619
2620 // Otherwise, add the folded AddRec by the non-invariant parts.
2621 for (unsigned i = 0;; ++i)
2622 if (Ops[i] == AddRec) {
2623 Ops[i] = NewRec;
2624 break;
2625 }
2626 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2627 }
2628
2629 // Okay, if there weren't any loop invariants to be folded, check to see if
2630 // there are multiple AddRec's with the same loop induction variable being
2631 // added together. If so, we can fold them.
2632 for (unsigned OtherIdx = Idx+1;
2633 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2634 ++OtherIdx) {
2635 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2636 // so that the 1st found AddRecExpr is dominated by all others.
2637 assert(DT.dominates(
2638 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2639 AddRec->getLoop()->getHeader()) &&
2640 "AddRecExprs are not sorted in reverse dominance order?");
2641 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2642 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2643 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2644 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2645 ++OtherIdx) {
2646 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2647 if (OtherAddRec->getLoop() == AddRecLoop) {
2648 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2649 i != e; ++i) {
2650 if (i >= AddRecOps.size()) {
2651 AddRecOps.append(OtherAddRec->op_begin()+i,
2652 OtherAddRec->op_end());
2653 break;
2654 }
2655 SmallVector<const SCEV *, 2> TwoOps = {
2656 AddRecOps[i], OtherAddRec->getOperand(i)};
2657 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2658 }
2659 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2660 }
2661 }
2662 // Step size has changed, so we cannot guarantee no self-wraparound.
2663 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2664 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2665 }
2666 }
2667
2668 // Otherwise couldn't fold anything into this recurrence. Move onto the
2669 // next one.
2670 }
2671
2672 // Okay, it looks like we really DO need an add expr. Check to see if we
2673 // already have one, otherwise create a new one.
2674 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2675 }
2676
2677 const SCEV *
getOrCreateAddExpr(ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2678 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2679 SCEV::NoWrapFlags Flags) {
2680 FoldingSetNodeID ID;
2681 ID.AddInteger(scAddExpr);
2682 for (const SCEV *Op : Ops)
2683 ID.AddPointer(Op);
2684 void *IP = nullptr;
2685 SCEVAddExpr *S =
2686 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2687 if (!S) {
2688 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2689 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2690 S = new (SCEVAllocator)
2691 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2692 UniqueSCEVs.InsertNode(S, IP);
2693 addToLoopUseLists(S);
2694 }
2695 S->setNoWrapFlags(Flags);
2696 return S;
2697 }
2698
2699 const SCEV *
getOrCreateAddRecExpr(ArrayRef<const SCEV * > Ops,const Loop * L,SCEV::NoWrapFlags Flags)2700 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2701 const Loop *L, SCEV::NoWrapFlags Flags) {
2702 FoldingSetNodeID ID;
2703 ID.AddInteger(scAddRecExpr);
2704 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2705 ID.AddPointer(Ops[i]);
2706 ID.AddPointer(L);
2707 void *IP = nullptr;
2708 SCEVAddRecExpr *S =
2709 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2710 if (!S) {
2711 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2712 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2713 S = new (SCEVAllocator)
2714 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2715 UniqueSCEVs.InsertNode(S, IP);
2716 addToLoopUseLists(S);
2717 }
2718 setNoWrapFlags(S, Flags);
2719 return S;
2720 }
2721
2722 const SCEV *
getOrCreateMulExpr(ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2723 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2724 SCEV::NoWrapFlags Flags) {
2725 FoldingSetNodeID ID;
2726 ID.AddInteger(scMulExpr);
2727 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2728 ID.AddPointer(Ops[i]);
2729 void *IP = nullptr;
2730 SCEVMulExpr *S =
2731 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2732 if (!S) {
2733 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2734 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2735 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2736 O, Ops.size());
2737 UniqueSCEVs.InsertNode(S, IP);
2738 addToLoopUseLists(S);
2739 }
2740 S->setNoWrapFlags(Flags);
2741 return S;
2742 }
2743
umul_ov(uint64_t i,uint64_t j,bool & Overflow)2744 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2745 uint64_t k = i*j;
2746 if (j > 1 && k / j != i) Overflow = true;
2747 return k;
2748 }
2749
2750 /// Compute the result of "n choose k", the binomial coefficient. If an
2751 /// intermediate computation overflows, Overflow will be set and the return will
2752 /// be garbage. Overflow is not cleared on absence of overflow.
Choose(uint64_t n,uint64_t k,bool & Overflow)2753 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2754 // We use the multiplicative formula:
2755 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2756 // At each iteration, we take the n-th term of the numeral and divide by the
2757 // (k-n)th term of the denominator. This division will always produce an
2758 // integral result, and helps reduce the chance of overflow in the
2759 // intermediate computations. However, we can still overflow even when the
2760 // final result would fit.
2761
2762 if (n == 0 || n == k) return 1;
2763 if (k > n) return 0;
2764
2765 if (k > n/2)
2766 k = n-k;
2767
2768 uint64_t r = 1;
2769 for (uint64_t i = 1; i <= k; ++i) {
2770 r = umul_ov(r, n-(i-1), Overflow);
2771 r /= i;
2772 }
2773 return r;
2774 }
2775
2776 /// Determine if any of the operands in this SCEV are a constant or if
2777 /// any of the add or multiply expressions in this SCEV contain a constant.
containsConstantInAddMulChain(const SCEV * StartExpr)2778 static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
2779 struct FindConstantInAddMulChain {
2780 bool FoundConstant = false;
2781
2782 bool follow(const SCEV *S) {
2783 FoundConstant |= isa<SCEVConstant>(S);
2784 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2785 }
2786
2787 bool isDone() const {
2788 return FoundConstant;
2789 }
2790 };
2791
2792 FindConstantInAddMulChain F;
2793 SCEVTraversal<FindConstantInAddMulChain> ST(F);
2794 ST.visitAll(StartExpr);
2795 return F.FoundConstant;
2796 }
2797
2798 /// Get a canonical multiply expression, or something simpler if possible.
getMulExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OrigFlags,unsigned Depth)2799 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2800 SCEV::NoWrapFlags OrigFlags,
2801 unsigned Depth) {
2802 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2803 "only nuw or nsw allowed");
2804 assert(!Ops.empty() && "Cannot get empty mul!");
2805 if (Ops.size() == 1) return Ops[0];
2806 #ifndef NDEBUG
2807 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2808 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2809 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2810 "SCEVMulExpr operand types don't match!");
2811 #endif
2812
2813 // Sort by complexity, this groups all similar expression types together.
2814 GroupByComplexity(Ops, &LI, DT);
2815
2816 // If there are any constants, fold them together.
2817 unsigned Idx = 0;
2818 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2819 ++Idx;
2820 assert(Idx < Ops.size());
2821 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2822 // We found two constants, fold them together!
2823 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
2824 if (Ops.size() == 2) return Ops[0];
2825 Ops.erase(Ops.begin()+1); // Erase the folded element
2826 LHSC = cast<SCEVConstant>(Ops[0]);
2827 }
2828
2829 // If we have a multiply of zero, it will always be zero.
2830 if (LHSC->getValue()->isZero())
2831 return LHSC;
2832
2833 // If we are left with a constant one being multiplied, strip it off.
2834 if (LHSC->getValue()->isOne()) {
2835 Ops.erase(Ops.begin());
2836 --Idx;
2837 }
2838
2839 if (Ops.size() == 1)
2840 return Ops[0];
2841 }
2842
2843 // Delay expensive flag strengthening until necessary.
2844 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2845 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
2846 };
2847
2848 // Limit recursion calls depth.
2849 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2850 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
2851
2852 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) {
2853 // Don't strengthen flags if we have no new information.
2854 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
2855 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
2856 Mul->setNoWrapFlags(ComputeFlags(Ops));
2857 return S;
2858 }
2859
2860 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2861 if (Ops.size() == 2) {
2862 // C1*(C2+V) -> C1*C2 + C1*V
2863 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
2864 // If any of Add's ops are Adds or Muls with a constant, apply this
2865 // transformation as well.
2866 //
2867 // TODO: There are some cases where this transformation is not
2868 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
2869 // this transformation should be narrowed down.
2870 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
2871 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
2872 SCEV::FlagAnyWrap, Depth + 1),
2873 getMulExpr(LHSC, Add->getOperand(1),
2874 SCEV::FlagAnyWrap, Depth + 1),
2875 SCEV::FlagAnyWrap, Depth + 1);
2876
2877 if (Ops[0]->isAllOnesValue()) {
2878 // If we have a mul by -1 of an add, try distributing the -1 among the
2879 // add operands.
2880 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
2881 SmallVector<const SCEV *, 4> NewOps;
2882 bool AnyFolded = false;
2883 for (const SCEV *AddOp : Add->operands()) {
2884 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
2885 Depth + 1);
2886 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
2887 NewOps.push_back(Mul);
2888 }
2889 if (AnyFolded)
2890 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
2891 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2892 // Negation preserves a recurrence's no self-wrap property.
2893 SmallVector<const SCEV *, 4> Operands;
2894 for (const SCEV *AddRecOp : AddRec->operands())
2895 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
2896 Depth + 1));
2897
2898 return getAddRecExpr(Operands, AddRec->getLoop(),
2899 AddRec->getNoWrapFlags(SCEV::FlagNW));
2900 }
2901 }
2902 }
2903 }
2904
2905 // Skip over the add expression until we get to a multiply.
2906 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2907 ++Idx;
2908
2909 // If there are mul operands inline them all into this expression.
2910 if (Idx < Ops.size()) {
2911 bool DeletedMul = false;
2912 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2913 if (Ops.size() > MulOpsInlineThreshold)
2914 break;
2915 // If we have an mul, expand the mul operands onto the end of the
2916 // operands list.
2917 Ops.erase(Ops.begin()+Idx);
2918 Ops.append(Mul->op_begin(), Mul->op_end());
2919 DeletedMul = true;
2920 }
2921
2922 // If we deleted at least one mul, we added operands to the end of the
2923 // list, and they are not necessarily sorted. Recurse to resort and
2924 // resimplify any operands we just acquired.
2925 if (DeletedMul)
2926 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2927 }
2928
2929 // If there are any add recurrences in the operands list, see if any other
2930 // added values are loop invariant. If so, we can fold them into the
2931 // recurrence.
2932 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2933 ++Idx;
2934
2935 // Scan over all recurrences, trying to fold loop invariants into them.
2936 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2937 // Scan all of the other operands to this mul and add them to the vector
2938 // if they are loop invariant w.r.t. the recurrence.
2939 SmallVector<const SCEV *, 8> LIOps;
2940 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2941 const Loop *AddRecLoop = AddRec->getLoop();
2942 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2943 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2944 LIOps.push_back(Ops[i]);
2945 Ops.erase(Ops.begin()+i);
2946 --i; --e;
2947 }
2948
2949 // If we found some loop invariants, fold them into the recurrence.
2950 if (!LIOps.empty()) {
2951 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
2952 SmallVector<const SCEV *, 4> NewOps;
2953 NewOps.reserve(AddRec->getNumOperands());
2954 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
2955 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2956 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
2957 SCEV::FlagAnyWrap, Depth + 1));
2958
2959 // Build the new addrec. Propagate the NUW and NSW flags if both the
2960 // outer mul and the inner addrec are guaranteed to have no overflow.
2961 //
2962 // No self-wrap cannot be guaranteed after changing the step size, but
2963 // will be inferred if either NUW or NSW is true.
2964 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec});
2965 const SCEV *NewRec = getAddRecExpr(
2966 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags));
2967
2968 // If all of the other operands were loop invariant, we are done.
2969 if (Ops.size() == 1) return NewRec;
2970
2971 // Otherwise, multiply the folded AddRec by the non-invariant parts.
2972 for (unsigned i = 0;; ++i)
2973 if (Ops[i] == AddRec) {
2974 Ops[i] = NewRec;
2975 break;
2976 }
2977 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2978 }
2979
2980 // Okay, if there weren't any loop invariants to be folded, check to see
2981 // if there are multiple AddRec's with the same loop induction variable
2982 // being multiplied together. If so, we can fold them.
2983
2984 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2985 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2986 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2987 // ]]],+,...up to x=2n}.
2988 // Note that the arguments to choose() are always integers with values
2989 // known at compile time, never SCEV objects.
2990 //
2991 // The implementation avoids pointless extra computations when the two
2992 // addrec's are of different length (mathematically, it's equivalent to
2993 // an infinite stream of zeros on the right).
2994 bool OpsModified = false;
2995 for (unsigned OtherIdx = Idx+1;
2996 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2997 ++OtherIdx) {
2998 const SCEVAddRecExpr *OtherAddRec =
2999 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3000 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3001 continue;
3002
3003 // Limit max number of arguments to avoid creation of unreasonably big
3004 // SCEVAddRecs with very complex operands.
3005 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3006 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3007 continue;
3008
3009 bool Overflow = false;
3010 Type *Ty = AddRec->getType();
3011 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3012 SmallVector<const SCEV*, 7> AddRecOps;
3013 for (int x = 0, xe = AddRec->getNumOperands() +
3014 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3015 SmallVector <const SCEV *, 7> SumOps;
3016 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3017 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3018 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3019 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3020 z < ze && !Overflow; ++z) {
3021 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3022 uint64_t Coeff;
3023 if (LargerThan64Bits)
3024 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3025 else
3026 Coeff = Coeff1*Coeff2;
3027 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3028 const SCEV *Term1 = AddRec->getOperand(y-z);
3029 const SCEV *Term2 = OtherAddRec->getOperand(z);
3030 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3031 SCEV::FlagAnyWrap, Depth + 1));
3032 }
3033 }
3034 if (SumOps.empty())
3035 SumOps.push_back(getZero(Ty));
3036 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3037 }
3038 if (!Overflow) {
3039 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3040 SCEV::FlagAnyWrap);
3041 if (Ops.size() == 2) return NewAddRec;
3042 Ops[Idx] = NewAddRec;
3043 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3044 OpsModified = true;
3045 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3046 if (!AddRec)
3047 break;
3048 }
3049 }
3050 if (OpsModified)
3051 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3052
3053 // Otherwise couldn't fold anything into this recurrence. Move onto the
3054 // next one.
3055 }
3056
3057 // Okay, it looks like we really DO need an mul expr. Check to see if we
3058 // already have one, otherwise create a new one.
3059 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3060 }
3061
3062 /// Represents an unsigned remainder expression based on unsigned division.
getURemExpr(const SCEV * LHS,const SCEV * RHS)3063 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
3064 const SCEV *RHS) {
3065 assert(getEffectiveSCEVType(LHS->getType()) ==
3066 getEffectiveSCEVType(RHS->getType()) &&
3067 "SCEVURemExpr operand types don't match!");
3068
3069 // Short-circuit easy cases
3070 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3071 // If constant is one, the result is trivial
3072 if (RHSC->getValue()->isOne())
3073 return getZero(LHS->getType()); // X urem 1 --> 0
3074
3075 // If constant is a power of two, fold into a zext(trunc(LHS)).
3076 if (RHSC->getAPInt().isPowerOf2()) {
3077 Type *FullTy = LHS->getType();
3078 Type *TruncTy =
3079 IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3080 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3081 }
3082 }
3083
3084 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3085 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3086 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3087 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3088 }
3089
3090 /// Get a canonical unsigned division expression, or something simpler if
3091 /// possible.
getUDivExpr(const SCEV * LHS,const SCEV * RHS)3092 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
3093 const SCEV *RHS) {
3094 assert(getEffectiveSCEVType(LHS->getType()) ==
3095 getEffectiveSCEVType(RHS->getType()) &&
3096 "SCEVUDivExpr operand types don't match!");
3097
3098 FoldingSetNodeID ID;
3099 ID.AddInteger(scUDivExpr);
3100 ID.AddPointer(LHS);
3101 ID.AddPointer(RHS);
3102 void *IP = nullptr;
3103 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3104 return S;
3105
3106 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3107 if (RHSC->getValue()->isOne())
3108 return LHS; // X udiv 1 --> x
3109 // If the denominator is zero, the result of the udiv is undefined. Don't
3110 // try to analyze it, because the resolution chosen here may differ from
3111 // the resolution chosen in other parts of the compiler.
3112 if (!RHSC->getValue()->isZero()) {
3113 // Determine if the division can be folded into the operands of
3114 // its operands.
3115 // TODO: Generalize this to non-constants by using known-bits information.
3116 Type *Ty = LHS->getType();
3117 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3118 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3119 // For non-power-of-two values, effectively round the value up to the
3120 // nearest power of two.
3121 if (!RHSC->getAPInt().isPowerOf2())
3122 ++MaxShiftAmt;
3123 IntegerType *ExtTy =
3124 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3125 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3126 if (const SCEVConstant *Step =
3127 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3128 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3129 const APInt &StepInt = Step->getAPInt();
3130 const APInt &DivInt = RHSC->getAPInt();
3131 if (!StepInt.urem(DivInt) &&
3132 getZeroExtendExpr(AR, ExtTy) ==
3133 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3134 getZeroExtendExpr(Step, ExtTy),
3135 AR->getLoop(), SCEV::FlagAnyWrap)) {
3136 SmallVector<const SCEV *, 4> Operands;
3137 for (const SCEV *Op : AR->operands())
3138 Operands.push_back(getUDivExpr(Op, RHS));
3139 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3140 }
3141 /// Get a canonical UDivExpr for a recurrence.
3142 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3143 // We can currently only fold X%N if X is constant.
3144 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3145 if (StartC && !DivInt.urem(StepInt) &&
3146 getZeroExtendExpr(AR, ExtTy) ==
3147 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3148 getZeroExtendExpr(Step, ExtTy),
3149 AR->getLoop(), SCEV::FlagAnyWrap)) {
3150 const APInt &StartInt = StartC->getAPInt();
3151 const APInt &StartRem = StartInt.urem(StepInt);
3152 if (StartRem != 0) {
3153 const SCEV *NewLHS =
3154 getAddRecExpr(getConstant(StartInt - StartRem), Step,
3155 AR->getLoop(), SCEV::FlagNW);
3156 if (LHS != NewLHS) {
3157 LHS = NewLHS;
3158
3159 // Reset the ID to include the new LHS, and check if it is
3160 // already cached.
3161 ID.clear();
3162 ID.AddInteger(scUDivExpr);
3163 ID.AddPointer(LHS);
3164 ID.AddPointer(RHS);
3165 IP = nullptr;
3166 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3167 return S;
3168 }
3169 }
3170 }
3171 }
3172 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3173 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3174 SmallVector<const SCEV *, 4> Operands;
3175 for (const SCEV *Op : M->operands())
3176 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3177 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3178 // Find an operand that's safely divisible.
3179 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3180 const SCEV *Op = M->getOperand(i);
3181 const SCEV *Div = getUDivExpr(Op, RHSC);
3182 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3183 Operands = SmallVector<const SCEV *, 4>(M->operands());
3184 Operands[i] = Div;
3185 return getMulExpr(Operands);
3186 }
3187 }
3188 }
3189
3190 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3191 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3192 if (auto *DivisorConstant =
3193 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3194 bool Overflow = false;
3195 APInt NewRHS =
3196 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3197 if (Overflow) {
3198 return getConstant(RHSC->getType(), 0, false);
3199 }
3200 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3201 }
3202 }
3203
3204 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3205 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3206 SmallVector<const SCEV *, 4> Operands;
3207 for (const SCEV *Op : A->operands())
3208 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3209 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3210 Operands.clear();
3211 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3212 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3213 if (isa<SCEVUDivExpr>(Op) ||
3214 getMulExpr(Op, RHS) != A->getOperand(i))
3215 break;
3216 Operands.push_back(Op);
3217 }
3218 if (Operands.size() == A->getNumOperands())
3219 return getAddExpr(Operands);
3220 }
3221 }
3222
3223 // Fold if both operands are constant.
3224 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3225 Constant *LHSCV = LHSC->getValue();
3226 Constant *RHSCV = RHSC->getValue();
3227 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3228 RHSCV)));
3229 }
3230 }
3231 }
3232
3233 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3234 // changes). Make sure we get a new one.
3235 IP = nullptr;
3236 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3237 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3238 LHS, RHS);
3239 UniqueSCEVs.InsertNode(S, IP);
3240 addToLoopUseLists(S);
3241 return S;
3242 }
3243
gcd(const SCEVConstant * C1,const SCEVConstant * C2)3244 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3245 APInt A = C1->getAPInt().abs();
3246 APInt B = C2->getAPInt().abs();
3247 uint32_t ABW = A.getBitWidth();
3248 uint32_t BBW = B.getBitWidth();
3249
3250 if (ABW > BBW)
3251 B = B.zext(ABW);
3252 else if (ABW < BBW)
3253 A = A.zext(BBW);
3254
3255 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3256 }
3257
3258 /// Get a canonical unsigned division expression, or something simpler if
3259 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3260 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3261 /// it's not exact because the udiv may be clearing bits.
getUDivExactExpr(const SCEV * LHS,const SCEV * RHS)3262 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
3263 const SCEV *RHS) {
3264 // TODO: we could try to find factors in all sorts of things, but for now we
3265 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3266 // end of this file for inspiration.
3267
3268 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3269 if (!Mul || !Mul->hasNoUnsignedWrap())
3270 return getUDivExpr(LHS, RHS);
3271
3272 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3273 // If the mulexpr multiplies by a constant, then that constant must be the
3274 // first element of the mulexpr.
3275 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3276 if (LHSCst == RHSCst) {
3277 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
3278 return getMulExpr(Operands);
3279 }
3280
3281 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3282 // that there's a factor provided by one of the other terms. We need to
3283 // check.
3284 APInt Factor = gcd(LHSCst, RHSCst);
3285 if (!Factor.isIntN(1)) {
3286 LHSCst =
3287 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3288 RHSCst =
3289 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3290 SmallVector<const SCEV *, 2> Operands;
3291 Operands.push_back(LHSCst);
3292 Operands.append(Mul->op_begin() + 1, Mul->op_end());
3293 LHS = getMulExpr(Operands);
3294 RHS = RHSCst;
3295 Mul = dyn_cast<SCEVMulExpr>(LHS);
3296 if (!Mul)
3297 return getUDivExactExpr(LHS, RHS);
3298 }
3299 }
3300 }
3301
3302 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3303 if (Mul->getOperand(i) == RHS) {
3304 SmallVector<const SCEV *, 2> Operands;
3305 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3306 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3307 return getMulExpr(Operands);
3308 }
3309 }
3310
3311 return getUDivExpr(LHS, RHS);
3312 }
3313
3314 /// Get an add recurrence expression for the specified loop. Simplify the
3315 /// expression as much as possible.
getAddRecExpr(const SCEV * Start,const SCEV * Step,const Loop * L,SCEV::NoWrapFlags Flags)3316 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3317 const Loop *L,
3318 SCEV::NoWrapFlags Flags) {
3319 SmallVector<const SCEV *, 4> Operands;
3320 Operands.push_back(Start);
3321 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3322 if (StepChrec->getLoop() == L) {
3323 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3324 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3325 }
3326
3327 Operands.push_back(Step);
3328 return getAddRecExpr(Operands, L, Flags);
3329 }
3330
3331 /// Get an add recurrence expression for the specified loop. Simplify the
3332 /// expression as much as possible.
3333 const SCEV *
getAddRecExpr(SmallVectorImpl<const SCEV * > & Operands,const Loop * L,SCEV::NoWrapFlags Flags)3334 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
3335 const Loop *L, SCEV::NoWrapFlags Flags) {
3336 if (Operands.size() == 1) return Operands[0];
3337 #ifndef NDEBUG
3338 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3339 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
3340 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
3341 "SCEVAddRecExpr operand types don't match!");
3342 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3343 assert(isLoopInvariant(Operands[i], L) &&
3344 "SCEVAddRecExpr operand is not loop-invariant!");
3345 #endif
3346
3347 if (Operands.back()->isZero()) {
3348 Operands.pop_back();
3349 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3350 }
3351
3352 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3353 // use that information to infer NUW and NSW flags. However, computing a
3354 // BE count requires calling getAddRecExpr, so we may not yet have a
3355 // meaningful BE count at this point (and if we don't, we'd be stuck
3356 // with a SCEVCouldNotCompute as the cached BE count).
3357
3358 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3359
3360 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3361 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3362 const Loop *NestedLoop = NestedAR->getLoop();
3363 if (L->contains(NestedLoop)
3364 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3365 : (!NestedLoop->contains(L) &&
3366 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3367 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
3368 Operands[0] = NestedAR->getStart();
3369 // AddRecs require their operands be loop-invariant with respect to their
3370 // loops. Don't perform this transformation if it would break this
3371 // requirement.
3372 bool AllInvariant = all_of(
3373 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3374
3375 if (AllInvariant) {
3376 // Create a recurrence for the outer loop with the same step size.
3377 //
3378 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3379 // inner recurrence has the same property.
3380 SCEV::NoWrapFlags OuterFlags =
3381 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3382
3383 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3384 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3385 return isLoopInvariant(Op, NestedLoop);
3386 });
3387
3388 if (AllInvariant) {
3389 // Ok, both add recurrences are valid after the transformation.
3390 //
3391 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3392 // the outer recurrence has the same property.
3393 SCEV::NoWrapFlags InnerFlags =
3394 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3395 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3396 }
3397 }
3398 // Reset Operands to its original state.
3399 Operands[0] = NestedAR;
3400 }
3401 }
3402
3403 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3404 // already have one, otherwise create a new one.
3405 return getOrCreateAddRecExpr(Operands, L, Flags);
3406 }
3407
3408 const SCEV *
getGEPExpr(GEPOperator * GEP,const SmallVectorImpl<const SCEV * > & IndexExprs)3409 ScalarEvolution::getGEPExpr(GEPOperator *GEP,
3410 const SmallVectorImpl<const SCEV *> &IndexExprs) {
3411 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3412 // getSCEV(Base)->getType() has the same address space as Base->getType()
3413 // because SCEV::getType() preserves the address space.
3414 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3415 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3416 // instruction to its SCEV, because the Instruction may be guarded by control
3417 // flow and the no-overflow bits may not be valid for the expression in any
3418 // context. This can be fixed similarly to how these flags are handled for
3419 // adds.
3420 SCEV::NoWrapFlags OffsetWrap =
3421 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3422
3423 Type *CurTy = GEP->getType();
3424 bool FirstIter = true;
3425 SmallVector<const SCEV *, 4> Offsets;
3426 for (const SCEV *IndexExpr : IndexExprs) {
3427 // Compute the (potentially symbolic) offset in bytes for this index.
3428 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3429 // For a struct, add the member offset.
3430 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3431 unsigned FieldNo = Index->getZExtValue();
3432 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3433 Offsets.push_back(FieldOffset);
3434
3435 // Update CurTy to the type of the field at Index.
3436 CurTy = STy->getTypeAtIndex(Index);
3437 } else {
3438 // Update CurTy to its element type.
3439 if (FirstIter) {
3440 assert(isa<PointerType>(CurTy) &&
3441 "The first index of a GEP indexes a pointer");
3442 CurTy = GEP->getSourceElementType();
3443 FirstIter = false;
3444 } else {
3445 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
3446 }
3447 // For an array, add the element offset, explicitly scaled.
3448 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3449 // Getelementptr indices are signed.
3450 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3451
3452 // Multiply the index by the element size to compute the element offset.
3453 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3454 Offsets.push_back(LocalOffset);
3455 }
3456 }
3457
3458 // Handle degenerate case of GEP without offsets.
3459 if (Offsets.empty())
3460 return BaseExpr;
3461
3462 // Add the offsets together, assuming nsw if inbounds.
3463 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
3464 // Add the base address and the offset. We cannot use the nsw flag, as the
3465 // base address is unsigned. However, if we know that the offset is
3466 // non-negative, we can use nuw.
3467 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset)
3468 ? SCEV::FlagNUW : SCEV::FlagAnyWrap;
3469 return getAddExpr(BaseExpr, Offset, BaseWrap);
3470 }
3471
3472 std::tuple<SCEV *, FoldingSetNodeID, void *>
findExistingSCEVInCache(SCEVTypes SCEVType,ArrayRef<const SCEV * > Ops)3473 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3474 ArrayRef<const SCEV *> Ops) {
3475 FoldingSetNodeID ID;
3476 void *IP = nullptr;
3477 ID.AddInteger(SCEVType);
3478 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3479 ID.AddPointer(Ops[i]);
3480 return std::tuple<SCEV *, FoldingSetNodeID, void *>(
3481 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP);
3482 }
3483
getAbsExpr(const SCEV * Op,bool IsNSW)3484 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3485 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3486 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3487 }
3488
getSignumExpr(const SCEV * Op)3489 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) {
3490 Type *Ty = Op->getType();
3491 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty));
3492 }
3493
getMinMaxExpr(SCEVTypes Kind,SmallVectorImpl<const SCEV * > & Ops)3494 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
3495 SmallVectorImpl<const SCEV *> &Ops) {
3496 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3497 if (Ops.size() == 1) return Ops[0];
3498 #ifndef NDEBUG
3499 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3500 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3501 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3502 "Operand types don't match!");
3503 #endif
3504
3505 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3506 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3507
3508 // Sort by complexity, this groups all similar expression types together.
3509 GroupByComplexity(Ops, &LI, DT);
3510
3511 // Check if we have created the same expression before.
3512 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) {
3513 return S;
3514 }
3515
3516 // If there are any constants, fold them together.
3517 unsigned Idx = 0;
3518 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3519 ++Idx;
3520 assert(Idx < Ops.size());
3521 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3522 if (Kind == scSMaxExpr)
3523 return APIntOps::smax(LHS, RHS);
3524 else if (Kind == scSMinExpr)
3525 return APIntOps::smin(LHS, RHS);
3526 else if (Kind == scUMaxExpr)
3527 return APIntOps::umax(LHS, RHS);
3528 else if (Kind == scUMinExpr)
3529 return APIntOps::umin(LHS, RHS);
3530 llvm_unreachable("Unknown SCEV min/max opcode");
3531 };
3532
3533 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3534 // We found two constants, fold them together!
3535 ConstantInt *Fold = ConstantInt::get(
3536 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3537 Ops[0] = getConstant(Fold);
3538 Ops.erase(Ops.begin()+1); // Erase the folded element
3539 if (Ops.size() == 1) return Ops[0];
3540 LHSC = cast<SCEVConstant>(Ops[0]);
3541 }
3542
3543 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3544 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3545
3546 if (IsMax ? IsMinV : IsMaxV) {
3547 // If we are left with a constant minimum(/maximum)-int, strip it off.
3548 Ops.erase(Ops.begin());
3549 --Idx;
3550 } else if (IsMax ? IsMaxV : IsMinV) {
3551 // If we have a max(/min) with a constant maximum(/minimum)-int,
3552 // it will always be the extremum.
3553 return LHSC;
3554 }
3555
3556 if (Ops.size() == 1) return Ops[0];
3557 }
3558
3559 // Find the first operation of the same kind
3560 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3561 ++Idx;
3562
3563 // Check to see if one of the operands is of the same kind. If so, expand its
3564 // operands onto our operand list, and recurse to simplify.
3565 if (Idx < Ops.size()) {
3566 bool DeletedAny = false;
3567 while (Ops[Idx]->getSCEVType() == Kind) {
3568 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3569 Ops.erase(Ops.begin()+Idx);
3570 Ops.append(SMME->op_begin(), SMME->op_end());
3571 DeletedAny = true;
3572 }
3573
3574 if (DeletedAny)
3575 return getMinMaxExpr(Kind, Ops);
3576 }
3577
3578 // Okay, check to see if the same value occurs in the operand list twice. If
3579 // so, delete one. Since we sorted the list, these values are required to
3580 // be adjacent.
3581 llvm::CmpInst::Predicate GEPred =
3582 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
3583 llvm::CmpInst::Predicate LEPred =
3584 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
3585 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3586 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3587 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3588 if (Ops[i] == Ops[i + 1] ||
3589 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3590 // X op Y op Y --> X op Y
3591 // X op Y --> X, if we know X, Y are ordered appropriately
3592 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3593 --i;
3594 --e;
3595 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3596 Ops[i + 1])) {
3597 // X op Y --> Y, if we know X, Y are ordered appropriately
3598 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3599 --i;
3600 --e;
3601 }
3602 }
3603
3604 if (Ops.size() == 1) return Ops[0];
3605
3606 assert(!Ops.empty() && "Reduced smax down to nothing!");
3607
3608 // Okay, it looks like we really DO need an expr. Check to see if we
3609 // already have one, otherwise create a new one.
3610 const SCEV *ExistingSCEV;
3611 FoldingSetNodeID ID;
3612 void *IP;
3613 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops);
3614 if (ExistingSCEV)
3615 return ExistingSCEV;
3616 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3617 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3618 SCEV *S = new (SCEVAllocator)
3619 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3620
3621 UniqueSCEVs.InsertNode(S, IP);
3622 addToLoopUseLists(S);
3623 return S;
3624 }
3625
getSMaxExpr(const SCEV * LHS,const SCEV * RHS)3626 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3627 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3628 return getSMaxExpr(Ops);
3629 }
3630
getSMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3631 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3632 return getMinMaxExpr(scSMaxExpr, Ops);
3633 }
3634
getUMaxExpr(const SCEV * LHS,const SCEV * RHS)3635 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3636 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3637 return getUMaxExpr(Ops);
3638 }
3639
getUMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3640 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3641 return getMinMaxExpr(scUMaxExpr, Ops);
3642 }
3643
getSMinExpr(const SCEV * LHS,const SCEV * RHS)3644 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3645 const SCEV *RHS) {
3646 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3647 return getSMinExpr(Ops);
3648 }
3649
getSMinExpr(SmallVectorImpl<const SCEV * > & Ops)3650 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3651 return getMinMaxExpr(scSMinExpr, Ops);
3652 }
3653
getUMinExpr(const SCEV * LHS,const SCEV * RHS)3654 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3655 const SCEV *RHS) {
3656 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3657 return getUMinExpr(Ops);
3658 }
3659
getUMinExpr(SmallVectorImpl<const SCEV * > & Ops)3660 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3661 return getMinMaxExpr(scUMinExpr, Ops);
3662 }
3663
3664 const SCEV *
getSizeOfScalableVectorExpr(Type * IntTy,ScalableVectorType * ScalableTy)3665 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy,
3666 ScalableVectorType *ScalableTy) {
3667 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo());
3668 Constant *One = ConstantInt::get(IntTy, 1);
3669 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One);
3670 // Note that the expression we created is the final expression, we don't
3671 // want to simplify it any further Also, if we call a normal getSCEV(),
3672 // we'll end up in an endless recursion. So just create an SCEVUnknown.
3673 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy));
3674 }
3675
getSizeOfExpr(Type * IntTy,Type * AllocTy)3676 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3677 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy))
3678 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy);
3679 // We can bypass creating a target-independent constant expression and then
3680 // folding it back into a ConstantInt. This is just a compile-time
3681 // optimization.
3682 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3683 }
3684
getStoreSizeOfExpr(Type * IntTy,Type * StoreTy)3685 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
3686 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy))
3687 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy);
3688 // We can bypass creating a target-independent constant expression and then
3689 // folding it back into a ConstantInt. This is just a compile-time
3690 // optimization.
3691 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
3692 }
3693
getOffsetOfExpr(Type * IntTy,StructType * STy,unsigned FieldNo)3694 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3695 StructType *STy,
3696 unsigned FieldNo) {
3697 // We can bypass creating a target-independent constant expression and then
3698 // folding it back into a ConstantInt. This is just a compile-time
3699 // optimization.
3700 return getConstant(
3701 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3702 }
3703
getUnknown(Value * V)3704 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3705 // Don't attempt to do anything other than create a SCEVUnknown object
3706 // here. createSCEV only calls getUnknown after checking for all other
3707 // interesting possibilities, and any other code that calls getUnknown
3708 // is doing so in order to hide a value from SCEV canonicalization.
3709
3710 FoldingSetNodeID ID;
3711 ID.AddInteger(scUnknown);
3712 ID.AddPointer(V);
3713 void *IP = nullptr;
3714 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3715 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3716 "Stale SCEVUnknown in uniquing map!");
3717 return S;
3718 }
3719 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3720 FirstUnknown);
3721 FirstUnknown = cast<SCEVUnknown>(S);
3722 UniqueSCEVs.InsertNode(S, IP);
3723 return S;
3724 }
3725
3726 //===----------------------------------------------------------------------===//
3727 // Basic SCEV Analysis and PHI Idiom Recognition Code
3728 //
3729
3730 /// Test if values of the given type are analyzable within the SCEV
3731 /// framework. This primarily includes integer types, and it can optionally
3732 /// include pointer types if the ScalarEvolution class has access to
3733 /// target-specific information.
isSCEVable(Type * Ty) const3734 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3735 // Integers and pointers are always SCEVable.
3736 return Ty->isIntOrPtrTy();
3737 }
3738
3739 /// Return the size in bits of the specified type, for which isSCEVable must
3740 /// return true.
getTypeSizeInBits(Type * Ty) const3741 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3742 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3743 if (Ty->isPointerTy())
3744 return getDataLayout().getIndexTypeSizeInBits(Ty);
3745 return getDataLayout().getTypeSizeInBits(Ty);
3746 }
3747
3748 /// Return a type with the same bitwidth as the given type and which represents
3749 /// how SCEV will treat the given type, for which isSCEVable must return
3750 /// true. For pointer types, this is the pointer index sized integer type.
getEffectiveSCEVType(Type * Ty) const3751 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3752 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3753
3754 if (Ty->isIntegerTy())
3755 return Ty;
3756
3757 // The only other support type is pointer.
3758 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3759 return getDataLayout().getIndexType(Ty);
3760 }
3761
getWiderType(Type * T1,Type * T2) const3762 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
3763 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
3764 }
3765
getCouldNotCompute()3766 const SCEV *ScalarEvolution::getCouldNotCompute() {
3767 return CouldNotCompute.get();
3768 }
3769
checkValidity(const SCEV * S) const3770 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3771 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
3772 auto *SU = dyn_cast<SCEVUnknown>(S);
3773 return SU && SU->getValue() == nullptr;
3774 });
3775
3776 return !ContainsNulls;
3777 }
3778
containsAddRecurrence(const SCEV * S)3779 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
3780 HasRecMapType::iterator I = HasRecMap.find(S);
3781 if (I != HasRecMap.end())
3782 return I->second;
3783
3784 bool FoundAddRec =
3785 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
3786 HasRecMap.insert({S, FoundAddRec});
3787 return FoundAddRec;
3788 }
3789
3790 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3791 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3792 /// offset I, then return {S', I}, else return {\p S, nullptr}.
splitAddExpr(const SCEV * S)3793 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
3794 const auto *Add = dyn_cast<SCEVAddExpr>(S);
3795 if (!Add)
3796 return {S, nullptr};
3797
3798 if (Add->getNumOperands() != 2)
3799 return {S, nullptr};
3800
3801 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
3802 if (!ConstOp)
3803 return {S, nullptr};
3804
3805 return {Add->getOperand(1), ConstOp->getValue()};
3806 }
3807
3808 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3809 /// by the value and offset from any ValueOffsetPair in the set.
3810 SetVector<ScalarEvolution::ValueOffsetPair> *
getSCEVValues(const SCEV * S)3811 ScalarEvolution::getSCEVValues(const SCEV *S) {
3812 ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
3813 if (SI == ExprValueMap.end())
3814 return nullptr;
3815 #ifndef NDEBUG
3816 if (VerifySCEVMap) {
3817 // Check there is no dangling Value in the set returned.
3818 for (const auto &VE : SI->second)
3819 assert(ValueExprMap.count(VE.first));
3820 }
3821 #endif
3822 return &SI->second;
3823 }
3824
3825 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
3826 /// cannot be used separately. eraseValueFromMap should be used to remove
3827 /// V from ValueExprMap and ExprValueMap at the same time.
eraseValueFromMap(Value * V)3828 void ScalarEvolution::eraseValueFromMap(Value *V) {
3829 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3830 if (I != ValueExprMap.end()) {
3831 const SCEV *S = I->second;
3832 // Remove {V, 0} from the set of ExprValueMap[S]
3833 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S))
3834 SV->remove({V, nullptr});
3835
3836 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
3837 const SCEV *Stripped;
3838 ConstantInt *Offset;
3839 std::tie(Stripped, Offset) = splitAddExpr(S);
3840 if (Offset != nullptr) {
3841 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped))
3842 SV->remove({V, Offset});
3843 }
3844 ValueExprMap.erase(V);
3845 }
3846 }
3847
3848 /// Check whether value has nuw/nsw/exact set but SCEV does not.
3849 /// TODO: In reality it is better to check the poison recursively
3850 /// but this is better than nothing.
SCEVLostPoisonFlags(const SCEV * S,const Value * V)3851 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
3852 if (auto *I = dyn_cast<Instruction>(V)) {
3853 if (isa<OverflowingBinaryOperator>(I)) {
3854 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
3855 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
3856 return true;
3857 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
3858 return true;
3859 }
3860 } else if (isa<PossiblyExactOperator>(I) && I->isExact())
3861 return true;
3862 }
3863 return false;
3864 }
3865
3866 /// Return an existing SCEV if it exists, otherwise analyze the expression and
3867 /// create a new one.
getSCEV(Value * V)3868 const SCEV *ScalarEvolution::getSCEV(Value *V) {
3869 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3870
3871 const SCEV *S = getExistingSCEV(V);
3872 if (S == nullptr) {
3873 S = createSCEV(V);
3874 // During PHI resolution, it is possible to create two SCEVs for the same
3875 // V, so it is needed to double check whether V->S is inserted into
3876 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
3877 std::pair<ValueExprMapType::iterator, bool> Pair =
3878 ValueExprMap.insert({SCEVCallbackVH(V, this), S});
3879 if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
3880 ExprValueMap[S].insert({V, nullptr});
3881
3882 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
3883 // ExprValueMap.
3884 const SCEV *Stripped = S;
3885 ConstantInt *Offset = nullptr;
3886 std::tie(Stripped, Offset) = splitAddExpr(S);
3887 // If stripped is SCEVUnknown, don't bother to save
3888 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
3889 // increase the complexity of the expansion code.
3890 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
3891 // because it may generate add/sub instead of GEP in SCEV expansion.
3892 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
3893 !isa<GetElementPtrInst>(V))
3894 ExprValueMap[Stripped].insert({V, Offset});
3895 }
3896 }
3897 return S;
3898 }
3899
getExistingSCEV(Value * V)3900 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
3901 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3902
3903 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3904 if (I != ValueExprMap.end()) {
3905 const SCEV *S = I->second;
3906 if (checkValidity(S))
3907 return S;
3908 eraseValueFromMap(V);
3909 forgetMemoizedResults(S);
3910 }
3911 return nullptr;
3912 }
3913
3914 /// Return a SCEV corresponding to -V = -1*V
getNegativeSCEV(const SCEV * V,SCEV::NoWrapFlags Flags)3915 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
3916 SCEV::NoWrapFlags Flags) {
3917 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3918 return getConstant(
3919 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
3920
3921 Type *Ty = V->getType();
3922 Ty = getEffectiveSCEVType(Ty);
3923 return getMulExpr(V, getMinusOne(Ty), Flags);
3924 }
3925
3926 /// If Expr computes ~A, return A else return nullptr
MatchNotExpr(const SCEV * Expr)3927 static const SCEV *MatchNotExpr(const SCEV *Expr) {
3928 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
3929 if (!Add || Add->getNumOperands() != 2 ||
3930 !Add->getOperand(0)->isAllOnesValue())
3931 return nullptr;
3932
3933 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
3934 if (!AddRHS || AddRHS->getNumOperands() != 2 ||
3935 !AddRHS->getOperand(0)->isAllOnesValue())
3936 return nullptr;
3937
3938 return AddRHS->getOperand(1);
3939 }
3940
3941 /// Return a SCEV corresponding to ~V = -1-V
getNotSCEV(const SCEV * V)3942 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
3943 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3944 return getConstant(
3945 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
3946
3947 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
3948 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
3949 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
3950 SmallVector<const SCEV *, 2> MatchedOperands;
3951 for (const SCEV *Operand : MME->operands()) {
3952 const SCEV *Matched = MatchNotExpr(Operand);
3953 if (!Matched)
3954 return (const SCEV *)nullptr;
3955 MatchedOperands.push_back(Matched);
3956 }
3957 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
3958 MatchedOperands);
3959 };
3960 if (const SCEV *Replaced = MatchMinMaxNegation(MME))
3961 return Replaced;
3962 }
3963
3964 Type *Ty = V->getType();
3965 Ty = getEffectiveSCEVType(Ty);
3966 return getMinusSCEV(getMinusOne(Ty), V);
3967 }
3968
getMinusSCEV(const SCEV * LHS,const SCEV * RHS,SCEV::NoWrapFlags Flags,unsigned Depth)3969 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
3970 SCEV::NoWrapFlags Flags,
3971 unsigned Depth) {
3972 // Fast path: X - X --> 0.
3973 if (LHS == RHS)
3974 return getZero(LHS->getType());
3975
3976 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
3977 // makes it so that we cannot make much use of NUW.
3978 auto AddFlags = SCEV::FlagAnyWrap;
3979 const bool RHSIsNotMinSigned =
3980 !getSignedRangeMin(RHS).isMinSignedValue();
3981 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) {
3982 // Let M be the minimum representable signed value. Then (-1)*RHS
3983 // signed-wraps if and only if RHS is M. That can happen even for
3984 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
3985 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
3986 // (-1)*RHS, we need to prove that RHS != M.
3987 //
3988 // If LHS is non-negative and we know that LHS - RHS does not
3989 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
3990 // either by proving that RHS > M or that LHS >= 0.
3991 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
3992 AddFlags = SCEV::FlagNSW;
3993 }
3994 }
3995
3996 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
3997 // RHS is NSW and LHS >= 0.
3998 //
3999 // The difficulty here is that the NSW flag may have been proven
4000 // relative to a loop that is to be found in a recurrence in LHS and
4001 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4002 // larger scope than intended.
4003 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4004
4005 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4006 }
4007
getTruncateOrZeroExtend(const SCEV * V,Type * Ty,unsigned Depth)4008 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
4009 unsigned Depth) {
4010 Type *SrcTy = V->getType();
4011 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4012 "Cannot truncate or zero extend with non-integer arguments!");
4013 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4014 return V; // No conversion
4015 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4016 return getTruncateExpr(V, Ty, Depth);
4017 return getZeroExtendExpr(V, Ty, Depth);
4018 }
4019
getTruncateOrSignExtend(const SCEV * V,Type * Ty,unsigned Depth)4020 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
4021 unsigned Depth) {
4022 Type *SrcTy = V->getType();
4023 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4024 "Cannot truncate or zero extend with non-integer arguments!");
4025 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4026 return V; // No conversion
4027 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4028 return getTruncateExpr(V, Ty, Depth);
4029 return getSignExtendExpr(V, Ty, Depth);
4030 }
4031
4032 const SCEV *
getNoopOrZeroExtend(const SCEV * V,Type * Ty)4033 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
4034 Type *SrcTy = V->getType();
4035 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4036 "Cannot noop or zero extend with non-integer arguments!");
4037 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4038 "getNoopOrZeroExtend cannot truncate!");
4039 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4040 return V; // No conversion
4041 return getZeroExtendExpr(V, Ty);
4042 }
4043
4044 const SCEV *
getNoopOrSignExtend(const SCEV * V,Type * Ty)4045 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
4046 Type *SrcTy = V->getType();
4047 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4048 "Cannot noop or sign extend with non-integer arguments!");
4049 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4050 "getNoopOrSignExtend cannot truncate!");
4051 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4052 return V; // No conversion
4053 return getSignExtendExpr(V, Ty);
4054 }
4055
4056 const SCEV *
getNoopOrAnyExtend(const SCEV * V,Type * Ty)4057 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
4058 Type *SrcTy = V->getType();
4059 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4060 "Cannot noop or any extend with non-integer arguments!");
4061 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4062 "getNoopOrAnyExtend cannot truncate!");
4063 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4064 return V; // No conversion
4065 return getAnyExtendExpr(V, Ty);
4066 }
4067
4068 const SCEV *
getTruncateOrNoop(const SCEV * V,Type * Ty)4069 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
4070 Type *SrcTy = V->getType();
4071 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4072 "Cannot truncate or noop with non-integer arguments!");
4073 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4074 "getTruncateOrNoop cannot extend!");
4075 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4076 return V; // No conversion
4077 return getTruncateExpr(V, Ty);
4078 }
4079
getUMaxFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)4080 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
4081 const SCEV *RHS) {
4082 const SCEV *PromotedLHS = LHS;
4083 const SCEV *PromotedRHS = RHS;
4084
4085 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4086 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4087 else
4088 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4089
4090 return getUMaxExpr(PromotedLHS, PromotedRHS);
4091 }
4092
getUMinFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)4093 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
4094 const SCEV *RHS) {
4095 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4096 return getUMinFromMismatchedTypes(Ops);
4097 }
4098
getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV * > & Ops)4099 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
4100 SmallVectorImpl<const SCEV *> &Ops) {
4101 assert(!Ops.empty() && "At least one operand must be!");
4102 // Trivial case.
4103 if (Ops.size() == 1)
4104 return Ops[0];
4105
4106 // Find the max type first.
4107 Type *MaxType = nullptr;
4108 for (auto *S : Ops)
4109 if (MaxType)
4110 MaxType = getWiderType(MaxType, S->getType());
4111 else
4112 MaxType = S->getType();
4113 assert(MaxType && "Failed to find maximum type!");
4114
4115 // Extend all ops to max type.
4116 SmallVector<const SCEV *, 2> PromotedOps;
4117 for (auto *S : Ops)
4118 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4119
4120 // Generate umin.
4121 return getUMinExpr(PromotedOps);
4122 }
4123
getPointerBase(const SCEV * V)4124 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
4125 // A pointer operand may evaluate to a nonpointer expression, such as null.
4126 if (!V->getType()->isPointerTy())
4127 return V;
4128
4129 while (true) {
4130 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) {
4131 V = Cast->getOperand();
4132 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
4133 const SCEV *PtrOp = nullptr;
4134 for (const SCEV *NAryOp : NAry->operands()) {
4135 if (NAryOp->getType()->isPointerTy()) {
4136 // Cannot find the base of an expression with multiple pointer ops.
4137 if (PtrOp)
4138 return V;
4139 PtrOp = NAryOp;
4140 }
4141 }
4142 if (!PtrOp) // All operands were non-pointer.
4143 return V;
4144 V = PtrOp;
4145 } else // Not something we can look further into.
4146 return V;
4147 }
4148 }
4149
4150 /// Push users of the given Instruction onto the given Worklist.
4151 static void
PushDefUseChildren(Instruction * I,SmallVectorImpl<Instruction * > & Worklist)4152 PushDefUseChildren(Instruction *I,
4153 SmallVectorImpl<Instruction *> &Worklist) {
4154 // Push the def-use children onto the Worklist stack.
4155 for (User *U : I->users())
4156 Worklist.push_back(cast<Instruction>(U));
4157 }
4158
forgetSymbolicName(Instruction * PN,const SCEV * SymName)4159 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
4160 SmallVector<Instruction *, 16> Worklist;
4161 PushDefUseChildren(PN, Worklist);
4162
4163 SmallPtrSet<Instruction *, 8> Visited;
4164 Visited.insert(PN);
4165 while (!Worklist.empty()) {
4166 Instruction *I = Worklist.pop_back_val();
4167 if (!Visited.insert(I).second)
4168 continue;
4169
4170 auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4171 if (It != ValueExprMap.end()) {
4172 const SCEV *Old = It->second;
4173
4174 // Short-circuit the def-use traversal if the symbolic name
4175 // ceases to appear in expressions.
4176 if (Old != SymName && !hasOperand(Old, SymName))
4177 continue;
4178
4179 // SCEVUnknown for a PHI either means that it has an unrecognized
4180 // structure, it's a PHI that's in the progress of being computed
4181 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4182 // additional loop trip count information isn't going to change anything.
4183 // In the second case, createNodeForPHI will perform the necessary
4184 // updates on its own when it gets to that point. In the third, we do
4185 // want to forget the SCEVUnknown.
4186 if (!isa<PHINode>(I) ||
4187 !isa<SCEVUnknown>(Old) ||
4188 (I != PN && Old == SymName)) {
4189 eraseValueFromMap(It->first);
4190 forgetMemoizedResults(Old);
4191 }
4192 }
4193
4194 PushDefUseChildren(I, Worklist);
4195 }
4196 }
4197
4198 namespace {
4199
4200 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4201 /// expression in case its Loop is L. If it is not L then
4202 /// if IgnoreOtherLoops is true then use AddRec itself
4203 /// otherwise rewrite cannot be done.
4204 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4205 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4206 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool IgnoreOtherLoops=true)4207 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4208 bool IgnoreOtherLoops = true) {
4209 SCEVInitRewriter Rewriter(L, SE);
4210 const SCEV *Result = Rewriter.visit(S);
4211 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4212 return SE.getCouldNotCompute();
4213 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4214 ? SE.getCouldNotCompute()
4215 : Result;
4216 }
4217
visitUnknown(const SCEVUnknown * Expr)4218 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4219 if (!SE.isLoopInvariant(Expr, L))
4220 SeenLoopVariantSCEVUnknown = true;
4221 return Expr;
4222 }
4223
visitAddRecExpr(const SCEVAddRecExpr * Expr)4224 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4225 // Only re-write AddRecExprs for this loop.
4226 if (Expr->getLoop() == L)
4227 return Expr->getStart();
4228 SeenOtherLoops = true;
4229 return Expr;
4230 }
4231
hasSeenLoopVariantSCEVUnknown()4232 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4233
hasSeenOtherLoops()4234 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4235
4236 private:
SCEVInitRewriter(const Loop * L,ScalarEvolution & SE)4237 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4238 : SCEVRewriteVisitor(SE), L(L) {}
4239
4240 const Loop *L;
4241 bool SeenLoopVariantSCEVUnknown = false;
4242 bool SeenOtherLoops = false;
4243 };
4244
4245 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4246 /// increment expression in case its Loop is L. If it is not L then
4247 /// use AddRec itself.
4248 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4249 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4250 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4251 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4252 SCEVPostIncRewriter Rewriter(L, SE);
4253 const SCEV *Result = Rewriter.visit(S);
4254 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4255 ? SE.getCouldNotCompute()
4256 : Result;
4257 }
4258
visitUnknown(const SCEVUnknown * Expr)4259 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4260 if (!SE.isLoopInvariant(Expr, L))
4261 SeenLoopVariantSCEVUnknown = true;
4262 return Expr;
4263 }
4264
visitAddRecExpr(const SCEVAddRecExpr * Expr)4265 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4266 // Only re-write AddRecExprs for this loop.
4267 if (Expr->getLoop() == L)
4268 return Expr->getPostIncExpr(SE);
4269 SeenOtherLoops = true;
4270 return Expr;
4271 }
4272
hasSeenLoopVariantSCEVUnknown()4273 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4274
hasSeenOtherLoops()4275 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4276
4277 private:
SCEVPostIncRewriter(const Loop * L,ScalarEvolution & SE)4278 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4279 : SCEVRewriteVisitor(SE), L(L) {}
4280
4281 const Loop *L;
4282 bool SeenLoopVariantSCEVUnknown = false;
4283 bool SeenOtherLoops = false;
4284 };
4285
4286 /// This class evaluates the compare condition by matching it against the
4287 /// condition of loop latch. If there is a match we assume a true value
4288 /// for the condition while building SCEV nodes.
4289 class SCEVBackedgeConditionFolder
4290 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4291 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4292 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4293 ScalarEvolution &SE) {
4294 bool IsPosBECond = false;
4295 Value *BECond = nullptr;
4296 if (BasicBlock *Latch = L->getLoopLatch()) {
4297 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4298 if (BI && BI->isConditional()) {
4299 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
4300 "Both outgoing branches should not target same header!");
4301 BECond = BI->getCondition();
4302 IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4303 } else {
4304 return S;
4305 }
4306 }
4307 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4308 return Rewriter.visit(S);
4309 }
4310
visitUnknown(const SCEVUnknown * Expr)4311 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4312 const SCEV *Result = Expr;
4313 bool InvariantF = SE.isLoopInvariant(Expr, L);
4314
4315 if (!InvariantF) {
4316 Instruction *I = cast<Instruction>(Expr->getValue());
4317 switch (I->getOpcode()) {
4318 case Instruction::Select: {
4319 SelectInst *SI = cast<SelectInst>(I);
4320 Optional<const SCEV *> Res =
4321 compareWithBackedgeCondition(SI->getCondition());
4322 if (Res.hasValue()) {
4323 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4324 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4325 }
4326 break;
4327 }
4328 default: {
4329 Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4330 if (Res.hasValue())
4331 Result = Res.getValue();
4332 break;
4333 }
4334 }
4335 }
4336 return Result;
4337 }
4338
4339 private:
SCEVBackedgeConditionFolder(const Loop * L,Value * BECond,bool IsPosBECond,ScalarEvolution & SE)4340 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4341 bool IsPosBECond, ScalarEvolution &SE)
4342 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4343 IsPositiveBECond(IsPosBECond) {}
4344
4345 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4346
4347 const Loop *L;
4348 /// Loop back condition.
4349 Value *BackedgeCond = nullptr;
4350 /// Set to true if loop back is on positive branch condition.
4351 bool IsPositiveBECond;
4352 };
4353
4354 Optional<const SCEV *>
compareWithBackedgeCondition(Value * IC)4355 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4356
4357 // If value matches the backedge condition for loop latch,
4358 // then return a constant evolution node based on loopback
4359 // branch taken.
4360 if (BackedgeCond == IC)
4361 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4362 : SE.getZero(Type::getInt1Ty(SE.getContext()));
4363 return None;
4364 }
4365
4366 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4367 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4368 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4369 ScalarEvolution &SE) {
4370 SCEVShiftRewriter Rewriter(L, SE);
4371 const SCEV *Result = Rewriter.visit(S);
4372 return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4373 }
4374
visitUnknown(const SCEVUnknown * Expr)4375 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4376 // Only allow AddRecExprs for this loop.
4377 if (!SE.isLoopInvariant(Expr, L))
4378 Valid = false;
4379 return Expr;
4380 }
4381
visitAddRecExpr(const SCEVAddRecExpr * Expr)4382 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4383 if (Expr->getLoop() == L && Expr->isAffine())
4384 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4385 Valid = false;
4386 return Expr;
4387 }
4388
isValid()4389 bool isValid() { return Valid; }
4390
4391 private:
SCEVShiftRewriter(const Loop * L,ScalarEvolution & SE)4392 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4393 : SCEVRewriteVisitor(SE), L(L) {}
4394
4395 const Loop *L;
4396 bool Valid = true;
4397 };
4398
4399 } // end anonymous namespace
4400
4401 SCEV::NoWrapFlags
proveNoWrapViaConstantRanges(const SCEVAddRecExpr * AR)4402 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4403 if (!AR->isAffine())
4404 return SCEV::FlagAnyWrap;
4405
4406 using OBO = OverflowingBinaryOperator;
4407
4408 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
4409
4410 if (!AR->hasNoSignedWrap()) {
4411 ConstantRange AddRecRange = getSignedRange(AR);
4412 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4413
4414 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4415 Instruction::Add, IncRange, OBO::NoSignedWrap);
4416 if (NSWRegion.contains(AddRecRange))
4417 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4418 }
4419
4420 if (!AR->hasNoUnsignedWrap()) {
4421 ConstantRange AddRecRange = getUnsignedRange(AR);
4422 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4423
4424 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4425 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4426 if (NUWRegion.contains(AddRecRange))
4427 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4428 }
4429
4430 return Result;
4431 }
4432
4433 SCEV::NoWrapFlags
proveNoSignedWrapViaInduction(const SCEVAddRecExpr * AR)4434 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4435 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4436
4437 if (AR->hasNoSignedWrap())
4438 return Result;
4439
4440 if (!AR->isAffine())
4441 return Result;
4442
4443 const SCEV *Step = AR->getStepRecurrence(*this);
4444 const Loop *L = AR->getLoop();
4445
4446 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4447 // Note that this serves two purposes: It filters out loops that are
4448 // simply not analyzable, and it covers the case where this code is
4449 // being called from within backedge-taken count analysis, such that
4450 // attempting to ask for the backedge-taken count would likely result
4451 // in infinite recursion. In the later case, the analysis code will
4452 // cope with a conservative value, and it will take care to purge
4453 // that value once it has finished.
4454 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4455
4456 // Normally, in the cases we can prove no-overflow via a
4457 // backedge guarding condition, we can also compute a backedge
4458 // taken count for the loop. The exceptions are assumptions and
4459 // guards present in the loop -- SCEV is not great at exploiting
4460 // these to compute max backedge taken counts, but can still use
4461 // these to prove lack of overflow. Use this fact to avoid
4462 // doing extra work that may not pay off.
4463
4464 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4465 AC.assumptions().empty())
4466 return Result;
4467
4468 // If the backedge is guarded by a comparison with the pre-inc value the
4469 // addrec is safe. Also, if the entry is guarded by a comparison with the
4470 // start value and the backedge is guarded by a comparison with the post-inc
4471 // value, the addrec is safe.
4472 ICmpInst::Predicate Pred;
4473 const SCEV *OverflowLimit =
4474 getSignedOverflowLimitForStep(Step, &Pred, this);
4475 if (OverflowLimit &&
4476 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
4477 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
4478 Result = setFlags(Result, SCEV::FlagNSW);
4479 }
4480 return Result;
4481 }
4482 SCEV::NoWrapFlags
proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr * AR)4483 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4484 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4485
4486 if (AR->hasNoUnsignedWrap())
4487 return Result;
4488
4489 if (!AR->isAffine())
4490 return Result;
4491
4492 const SCEV *Step = AR->getStepRecurrence(*this);
4493 unsigned BitWidth = getTypeSizeInBits(AR->getType());
4494 const Loop *L = AR->getLoop();
4495
4496 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4497 // Note that this serves two purposes: It filters out loops that are
4498 // simply not analyzable, and it covers the case where this code is
4499 // being called from within backedge-taken count analysis, such that
4500 // attempting to ask for the backedge-taken count would likely result
4501 // in infinite recursion. In the later case, the analysis code will
4502 // cope with a conservative value, and it will take care to purge
4503 // that value once it has finished.
4504 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4505
4506 // Normally, in the cases we can prove no-overflow via a
4507 // backedge guarding condition, we can also compute a backedge
4508 // taken count for the loop. The exceptions are assumptions and
4509 // guards present in the loop -- SCEV is not great at exploiting
4510 // these to compute max backedge taken counts, but can still use
4511 // these to prove lack of overflow. Use this fact to avoid
4512 // doing extra work that may not pay off.
4513
4514 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4515 AC.assumptions().empty())
4516 return Result;
4517
4518 // If the backedge is guarded by a comparison with the pre-inc value the
4519 // addrec is safe. Also, if the entry is guarded by a comparison with the
4520 // start value and the backedge is guarded by a comparison with the post-inc
4521 // value, the addrec is safe.
4522 if (isKnownPositive(Step)) {
4523 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
4524 getUnsignedRangeMax(Step));
4525 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
4526 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
4527 Result = setFlags(Result, SCEV::FlagNUW);
4528 }
4529 }
4530
4531 return Result;
4532 }
4533
4534 namespace {
4535
4536 /// Represents an abstract binary operation. This may exist as a
4537 /// normal instruction or constant expression, or may have been
4538 /// derived from an expression tree.
4539 struct BinaryOp {
4540 unsigned Opcode;
4541 Value *LHS;
4542 Value *RHS;
4543 bool IsNSW = false;
4544 bool IsNUW = false;
4545 bool IsExact = false;
4546
4547 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4548 /// constant expression.
4549 Operator *Op = nullptr;
4550
BinaryOp__anon2e4d85961111::BinaryOp4551 explicit BinaryOp(Operator *Op)
4552 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
4553 Op(Op) {
4554 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4555 IsNSW = OBO->hasNoSignedWrap();
4556 IsNUW = OBO->hasNoUnsignedWrap();
4557 }
4558 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op))
4559 IsExact = PEO->isExact();
4560 }
4561
BinaryOp__anon2e4d85961111::BinaryOp4562 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
4563 bool IsNUW = false, bool IsExact = false)
4564 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW),
4565 IsExact(IsExact) {}
4566 };
4567
4568 } // end anonymous namespace
4569
4570 /// Try to map \p V into a BinaryOp, and return \c None on failure.
MatchBinaryOp(Value * V,DominatorTree & DT)4571 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
4572 auto *Op = dyn_cast<Operator>(V);
4573 if (!Op)
4574 return None;
4575
4576 // Implementation detail: all the cleverness here should happen without
4577 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4578 // SCEV expressions when possible, and we should not break that.
4579
4580 switch (Op->getOpcode()) {
4581 case Instruction::Add:
4582 case Instruction::Sub:
4583 case Instruction::Mul:
4584 case Instruction::UDiv:
4585 case Instruction::URem:
4586 case Instruction::And:
4587 case Instruction::Or:
4588 case Instruction::AShr:
4589 case Instruction::Shl:
4590 return BinaryOp(Op);
4591
4592 case Instruction::Xor:
4593 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
4594 // If the RHS of the xor is a signmask, then this is just an add.
4595 // Instcombine turns add of signmask into xor as a strength reduction step.
4596 if (RHSC->getValue().isSignMask())
4597 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
4598 return BinaryOp(Op);
4599
4600 case Instruction::LShr:
4601 // Turn logical shift right of a constant into a unsigned divide.
4602 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
4603 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
4604
4605 // If the shift count is not less than the bitwidth, the result of
4606 // the shift is undefined. Don't try to analyze it, because the
4607 // resolution chosen here may differ from the resolution chosen in
4608 // other parts of the compiler.
4609 if (SA->getValue().ult(BitWidth)) {
4610 Constant *X =
4611 ConstantInt::get(SA->getContext(),
4612 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4613 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
4614 }
4615 }
4616 return BinaryOp(Op);
4617
4618 case Instruction::ExtractValue: {
4619 auto *EVI = cast<ExtractValueInst>(Op);
4620 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4621 break;
4622
4623 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
4624 if (!WO)
4625 break;
4626
4627 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4628 bool Signed = WO->isSigned();
4629 // TODO: Should add nuw/nsw flags for mul as well.
4630 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
4631 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
4632
4633 // Now that we know that all uses of the arithmetic-result component of
4634 // CI are guarded by the overflow check, we can go ahead and pretend
4635 // that the arithmetic is non-overflowing.
4636 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
4637 /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
4638 }
4639
4640 default:
4641 break;
4642 }
4643
4644 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
4645 // semantics as a Sub, return a binary sub expression.
4646 if (auto *II = dyn_cast<IntrinsicInst>(V))
4647 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
4648 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
4649
4650 return None;
4651 }
4652
4653 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4654 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4655 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4656 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4657 /// follows one of the following patterns:
4658 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4659 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4660 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4661 /// we return the type of the truncation operation, and indicate whether the
4662 /// truncated type should be treated as signed/unsigned by setting
4663 /// \p Signed to true/false, respectively.
isSimpleCastedPHI(const SCEV * Op,const SCEVUnknown * SymbolicPHI,bool & Signed,ScalarEvolution & SE)4664 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
4665 bool &Signed, ScalarEvolution &SE) {
4666 // The case where Op == SymbolicPHI (that is, with no type conversions on
4667 // the way) is handled by the regular add recurrence creating logic and
4668 // would have already been triggered in createAddRecForPHI. Reaching it here
4669 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4670 // because one of the other operands of the SCEVAddExpr updating this PHI is
4671 // not invariant).
4672 //
4673 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4674 // this case predicates that allow us to prove that Op == SymbolicPHI will
4675 // be added.
4676 if (Op == SymbolicPHI)
4677 return nullptr;
4678
4679 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
4680 unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
4681 if (SourceBits != NewBits)
4682 return nullptr;
4683
4684 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
4685 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
4686 if (!SExt && !ZExt)
4687 return nullptr;
4688 const SCEVTruncateExpr *Trunc =
4689 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
4690 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4691 if (!Trunc)
4692 return nullptr;
4693 const SCEV *X = Trunc->getOperand();
4694 if (X != SymbolicPHI)
4695 return nullptr;
4696 Signed = SExt != nullptr;
4697 return Trunc->getType();
4698 }
4699
isIntegerLoopHeaderPHI(const PHINode * PN,LoopInfo & LI)4700 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
4701 if (!PN->getType()->isIntegerTy())
4702 return nullptr;
4703 const Loop *L = LI.getLoopFor(PN->getParent());
4704 if (!L || L->getHeader() != PN->getParent())
4705 return nullptr;
4706 return L;
4707 }
4708
4709 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4710 // computation that updates the phi follows the following pattern:
4711 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4712 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4713 // If so, try to see if it can be rewritten as an AddRecExpr under some
4714 // Predicates. If successful, return them as a pair. Also cache the results
4715 // of the analysis.
4716 //
4717 // Example usage scenario:
4718 // Say the Rewriter is called for the following SCEV:
4719 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4720 // where:
4721 // %X = phi i64 (%Start, %BEValue)
4722 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4723 // and call this function with %SymbolicPHI = %X.
4724 //
4725 // The analysis will find that the value coming around the backedge has
4726 // the following SCEV:
4727 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4728 // Upon concluding that this matches the desired pattern, the function
4729 // will return the pair {NewAddRec, SmallPredsVec} where:
4730 // NewAddRec = {%Start,+,%Step}
4731 // SmallPredsVec = {P1, P2, P3} as follows:
4732 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4733 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4734 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4735 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4736 // under the predicates {P1,P2,P3}.
4737 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4738 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4739 //
4740 // TODO's:
4741 //
4742 // 1) Extend the Induction descriptor to also support inductions that involve
4743 // casts: When needed (namely, when we are called in the context of the
4744 // vectorizer induction analysis), a Set of cast instructions will be
4745 // populated by this method, and provided back to isInductionPHI. This is
4746 // needed to allow the vectorizer to properly record them to be ignored by
4747 // the cost model and to avoid vectorizing them (otherwise these casts,
4748 // which are redundant under the runtime overflow checks, will be
4749 // vectorized, which can be costly).
4750 //
4751 // 2) Support additional induction/PHISCEV patterns: We also want to support
4752 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4753 // after the induction update operation (the induction increment):
4754 //
4755 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4756 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4757 //
4758 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4759 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4760 //
4761 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4762 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCastsImpl(const SCEVUnknown * SymbolicPHI)4763 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
4764 SmallVector<const SCEVPredicate *, 3> Predicates;
4765
4766 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4767 // return an AddRec expression under some predicate.
4768
4769 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4770 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4771 assert(L && "Expecting an integer loop header phi");
4772
4773 // The loop may have multiple entrances or multiple exits; we can analyze
4774 // this phi as an addrec if it has a unique entry value and a unique
4775 // backedge value.
4776 Value *BEValueV = nullptr, *StartValueV = nullptr;
4777 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
4778 Value *V = PN->getIncomingValue(i);
4779 if (L->contains(PN->getIncomingBlock(i))) {
4780 if (!BEValueV) {
4781 BEValueV = V;
4782 } else if (BEValueV != V) {
4783 BEValueV = nullptr;
4784 break;
4785 }
4786 } else if (!StartValueV) {
4787 StartValueV = V;
4788 } else if (StartValueV != V) {
4789 StartValueV = nullptr;
4790 break;
4791 }
4792 }
4793 if (!BEValueV || !StartValueV)
4794 return None;
4795
4796 const SCEV *BEValue = getSCEV(BEValueV);
4797
4798 // If the value coming around the backedge is an add with the symbolic
4799 // value we just inserted, possibly with casts that we can ignore under
4800 // an appropriate runtime guard, then we found a simple induction variable!
4801 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
4802 if (!Add)
4803 return None;
4804
4805 // If there is a single occurrence of the symbolic value, possibly
4806 // casted, replace it with a recurrence.
4807 unsigned FoundIndex = Add->getNumOperands();
4808 Type *TruncTy = nullptr;
4809 bool Signed;
4810 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
4811 if ((TruncTy =
4812 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
4813 if (FoundIndex == e) {
4814 FoundIndex = i;
4815 break;
4816 }
4817
4818 if (FoundIndex == Add->getNumOperands())
4819 return None;
4820
4821 // Create an add with everything but the specified operand.
4822 SmallVector<const SCEV *, 8> Ops;
4823 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
4824 if (i != FoundIndex)
4825 Ops.push_back(Add->getOperand(i));
4826 const SCEV *Accum = getAddExpr(Ops);
4827
4828 // The runtime checks will not be valid if the step amount is
4829 // varying inside the loop.
4830 if (!isLoopInvariant(Accum, L))
4831 return None;
4832
4833 // *** Part2: Create the predicates
4834
4835 // Analysis was successful: we have a phi-with-cast pattern for which we
4836 // can return an AddRec expression under the following predicates:
4837 //
4838 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
4839 // fits within the truncated type (does not overflow) for i = 0 to n-1.
4840 // P2: An Equal predicate that guarantees that
4841 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
4842 // P3: An Equal predicate that guarantees that
4843 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
4844 //
4845 // As we next prove, the above predicates guarantee that:
4846 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
4847 //
4848 //
4849 // More formally, we want to prove that:
4850 // Expr(i+1) = Start + (i+1) * Accum
4851 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4852 //
4853 // Given that:
4854 // 1) Expr(0) = Start
4855 // 2) Expr(1) = Start + Accum
4856 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
4857 // 3) Induction hypothesis (step i):
4858 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
4859 //
4860 // Proof:
4861 // Expr(i+1) =
4862 // = Start + (i+1)*Accum
4863 // = (Start + i*Accum) + Accum
4864 // = Expr(i) + Accum
4865 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
4866 // :: from step i
4867 //
4868 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
4869 //
4870 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
4871 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
4872 // + Accum :: from P3
4873 //
4874 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
4875 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
4876 //
4877 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
4878 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4879 //
4880 // By induction, the same applies to all iterations 1<=i<n:
4881 //
4882
4883 // Create a truncated addrec for which we will add a no overflow check (P1).
4884 const SCEV *StartVal = getSCEV(StartValueV);
4885 const SCEV *PHISCEV =
4886 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
4887 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
4888
4889 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
4890 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
4891 // will be constant.
4892 //
4893 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
4894 // add P1.
4895 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
4896 SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
4897 Signed ? SCEVWrapPredicate::IncrementNSSW
4898 : SCEVWrapPredicate::IncrementNUSW;
4899 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
4900 Predicates.push_back(AddRecPred);
4901 }
4902
4903 // Create the Equal Predicates P2,P3:
4904
4905 // It is possible that the predicates P2 and/or P3 are computable at
4906 // compile time due to StartVal and/or Accum being constants.
4907 // If either one is, then we can check that now and escape if either P2
4908 // or P3 is false.
4909
4910 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
4911 // for each of StartVal and Accum
4912 auto getExtendedExpr = [&](const SCEV *Expr,
4913 bool CreateSignExtend) -> const SCEV * {
4914 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
4915 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
4916 const SCEV *ExtendedExpr =
4917 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
4918 : getZeroExtendExpr(TruncatedExpr, Expr->getType());
4919 return ExtendedExpr;
4920 };
4921
4922 // Given:
4923 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
4924 // = getExtendedExpr(Expr)
4925 // Determine whether the predicate P: Expr == ExtendedExpr
4926 // is known to be false at compile time
4927 auto PredIsKnownFalse = [&](const SCEV *Expr,
4928 const SCEV *ExtendedExpr) -> bool {
4929 return Expr != ExtendedExpr &&
4930 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
4931 };
4932
4933 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
4934 if (PredIsKnownFalse(StartVal, StartExtended)) {
4935 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
4936 return None;
4937 }
4938
4939 // The Step is always Signed (because the overflow checks are either
4940 // NSSW or NUSW)
4941 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
4942 if (PredIsKnownFalse(Accum, AccumExtended)) {
4943 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
4944 return None;
4945 }
4946
4947 auto AppendPredicate = [&](const SCEV *Expr,
4948 const SCEV *ExtendedExpr) -> void {
4949 if (Expr != ExtendedExpr &&
4950 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
4951 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
4952 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
4953 Predicates.push_back(Pred);
4954 }
4955 };
4956
4957 AppendPredicate(StartVal, StartExtended);
4958 AppendPredicate(Accum, AccumExtended);
4959
4960 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
4961 // which the casts had been folded away. The caller can rewrite SymbolicPHI
4962 // into NewAR if it will also add the runtime overflow checks specified in
4963 // Predicates.
4964 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
4965
4966 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
4967 std::make_pair(NewAR, Predicates);
4968 // Remember the result of the analysis for this SCEV at this locayyytion.
4969 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
4970 return PredRewrite;
4971 }
4972
4973 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCasts(const SCEVUnknown * SymbolicPHI)4974 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
4975 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4976 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4977 if (!L)
4978 return None;
4979
4980 // Check to see if we already analyzed this PHI.
4981 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
4982 if (I != PredicatedSCEVRewrites.end()) {
4983 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
4984 I->second;
4985 // Analysis was done before and failed to create an AddRec:
4986 if (Rewrite.first == SymbolicPHI)
4987 return None;
4988 // Analysis was done before and succeeded to create an AddRec under
4989 // a predicate:
4990 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
4991 assert(!(Rewrite.second).empty() && "Expected to find Predicates");
4992 return Rewrite;
4993 }
4994
4995 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
4996 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
4997
4998 // Record in the cache that the analysis failed
4999 if (!Rewrite) {
5000 SmallVector<const SCEVPredicate *, 3> Predicates;
5001 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5002 return None;
5003 }
5004
5005 return Rewrite;
5006 }
5007
5008 // FIXME: This utility is currently required because the Rewriter currently
5009 // does not rewrite this expression:
5010 // {0, +, (sext ix (trunc iy to ix) to iy)}
5011 // into {0, +, %step},
5012 // even when the following Equal predicate exists:
5013 // "%step == (sext ix (trunc iy to ix) to iy)".
areAddRecsEqualWithPreds(const SCEVAddRecExpr * AR1,const SCEVAddRecExpr * AR2) const5014 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
5015 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
5016 if (AR1 == AR2)
5017 return true;
5018
5019 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
5020 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
5021 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
5022 return false;
5023 return true;
5024 };
5025
5026 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
5027 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
5028 return false;
5029 return true;
5030 }
5031
5032 /// A helper function for createAddRecFromPHI to handle simple cases.
5033 ///
5034 /// This function tries to find an AddRec expression for the simplest (yet most
5035 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5036 /// If it fails, createAddRecFromPHI will use a more general, but slow,
5037 /// technique for finding the AddRec expression.
createSimpleAffineAddRec(PHINode * PN,Value * BEValueV,Value * StartValueV)5038 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
5039 Value *BEValueV,
5040 Value *StartValueV) {
5041 const Loop *L = LI.getLoopFor(PN->getParent());
5042 assert(L && L->getHeader() == PN->getParent());
5043 assert(BEValueV && StartValueV);
5044
5045 auto BO = MatchBinaryOp(BEValueV, DT);
5046 if (!BO)
5047 return nullptr;
5048
5049 if (BO->Opcode != Instruction::Add)
5050 return nullptr;
5051
5052 const SCEV *Accum = nullptr;
5053 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5054 Accum = getSCEV(BO->RHS);
5055 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5056 Accum = getSCEV(BO->LHS);
5057
5058 if (!Accum)
5059 return nullptr;
5060
5061 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5062 if (BO->IsNUW)
5063 Flags = setFlags(Flags, SCEV::FlagNUW);
5064 if (BO->IsNSW)
5065 Flags = setFlags(Flags, SCEV::FlagNSW);
5066
5067 const SCEV *StartVal = getSCEV(StartValueV);
5068 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5069
5070 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5071
5072 // We can add Flags to the post-inc expression only if we
5073 // know that it is *undefined behavior* for BEValueV to
5074 // overflow.
5075 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5076 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5077 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5078
5079 return PHISCEV;
5080 }
5081
createAddRecFromPHI(PHINode * PN)5082 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5083 const Loop *L = LI.getLoopFor(PN->getParent());
5084 if (!L || L->getHeader() != PN->getParent())
5085 return nullptr;
5086
5087 // The loop may have multiple entrances or multiple exits; we can analyze
5088 // this phi as an addrec if it has a unique entry value and a unique
5089 // backedge value.
5090 Value *BEValueV = nullptr, *StartValueV = nullptr;
5091 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5092 Value *V = PN->getIncomingValue(i);
5093 if (L->contains(PN->getIncomingBlock(i))) {
5094 if (!BEValueV) {
5095 BEValueV = V;
5096 } else if (BEValueV != V) {
5097 BEValueV = nullptr;
5098 break;
5099 }
5100 } else if (!StartValueV) {
5101 StartValueV = V;
5102 } else if (StartValueV != V) {
5103 StartValueV = nullptr;
5104 break;
5105 }
5106 }
5107 if (!BEValueV || !StartValueV)
5108 return nullptr;
5109
5110 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5111 "PHI node already processed?");
5112
5113 // First, try to find AddRec expression without creating a fictituos symbolic
5114 // value for PN.
5115 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5116 return S;
5117
5118 // Handle PHI node value symbolically.
5119 const SCEV *SymbolicName = getUnknown(PN);
5120 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
5121
5122 // Using this symbolic name for the PHI, analyze the value coming around
5123 // the back-edge.
5124 const SCEV *BEValue = getSCEV(BEValueV);
5125
5126 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5127 // has a special value for the first iteration of the loop.
5128
5129 // If the value coming around the backedge is an add with the symbolic
5130 // value we just inserted, then we found a simple induction variable!
5131 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5132 // If there is a single occurrence of the symbolic value, replace it
5133 // with a recurrence.
5134 unsigned FoundIndex = Add->getNumOperands();
5135 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5136 if (Add->getOperand(i) == SymbolicName)
5137 if (FoundIndex == e) {
5138 FoundIndex = i;
5139 break;
5140 }
5141
5142 if (FoundIndex != Add->getNumOperands()) {
5143 // Create an add with everything but the specified operand.
5144 SmallVector<const SCEV *, 8> Ops;
5145 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5146 if (i != FoundIndex)
5147 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5148 L, *this));
5149 const SCEV *Accum = getAddExpr(Ops);
5150
5151 // This is not a valid addrec if the step amount is varying each
5152 // loop iteration, but is not itself an addrec in this loop.
5153 if (isLoopInvariant(Accum, L) ||
5154 (isa<SCEVAddRecExpr>(Accum) &&
5155 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5156 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5157
5158 if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5159 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5160 if (BO->IsNUW)
5161 Flags = setFlags(Flags, SCEV::FlagNUW);
5162 if (BO->IsNSW)
5163 Flags = setFlags(Flags, SCEV::FlagNSW);
5164 }
5165 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5166 // If the increment is an inbounds GEP, then we know the address
5167 // space cannot be wrapped around. We cannot make any guarantee
5168 // about signed or unsigned overflow because pointers are
5169 // unsigned but we may have a negative index from the base
5170 // pointer. We can guarantee that no unsigned wrap occurs if the
5171 // indices form a positive value.
5172 if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5173 Flags = setFlags(Flags, SCEV::FlagNW);
5174
5175 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5176 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5177 Flags = setFlags(Flags, SCEV::FlagNUW);
5178 }
5179
5180 // We cannot transfer nuw and nsw flags from subtraction
5181 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5182 // for instance.
5183 }
5184
5185 const SCEV *StartVal = getSCEV(StartValueV);
5186 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5187
5188 // Okay, for the entire analysis of this edge we assumed the PHI
5189 // to be symbolic. We now need to go back and purge all of the
5190 // entries for the scalars that use the symbolic expression.
5191 forgetSymbolicName(PN, SymbolicName);
5192 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5193
5194 // We can add Flags to the post-inc expression only if we
5195 // know that it is *undefined behavior* for BEValueV to
5196 // overflow.
5197 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5198 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5199 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5200
5201 return PHISCEV;
5202 }
5203 }
5204 } else {
5205 // Otherwise, this could be a loop like this:
5206 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5207 // In this case, j = {1,+,1} and BEValue is j.
5208 // Because the other in-value of i (0) fits the evolution of BEValue
5209 // i really is an addrec evolution.
5210 //
5211 // We can generalize this saying that i is the shifted value of BEValue
5212 // by one iteration:
5213 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5214 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5215 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5216 if (Shifted != getCouldNotCompute() &&
5217 Start != getCouldNotCompute()) {
5218 const SCEV *StartVal = getSCEV(StartValueV);
5219 if (Start == StartVal) {
5220 // Okay, for the entire analysis of this edge we assumed the PHI
5221 // to be symbolic. We now need to go back and purge all of the
5222 // entries for the scalars that use the symbolic expression.
5223 forgetSymbolicName(PN, SymbolicName);
5224 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
5225 return Shifted;
5226 }
5227 }
5228 }
5229
5230 // Remove the temporary PHI node SCEV that has been inserted while intending
5231 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5232 // as it will prevent later (possibly simpler) SCEV expressions to be added
5233 // to the ValueExprMap.
5234 eraseValueFromMap(PN);
5235
5236 return nullptr;
5237 }
5238
5239 // Checks if the SCEV S is available at BB. S is considered available at BB
5240 // if S can be materialized at BB without introducing a fault.
IsAvailableOnEntry(const Loop * L,DominatorTree & DT,const SCEV * S,BasicBlock * BB)5241 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5242 BasicBlock *BB) {
5243 struct CheckAvailable {
5244 bool TraversalDone = false;
5245 bool Available = true;
5246
5247 const Loop *L = nullptr; // The loop BB is in (can be nullptr)
5248 BasicBlock *BB = nullptr;
5249 DominatorTree &DT;
5250
5251 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5252 : L(L), BB(BB), DT(DT) {}
5253
5254 bool setUnavailable() {
5255 TraversalDone = true;
5256 Available = false;
5257 return false;
5258 }
5259
5260 bool follow(const SCEV *S) {
5261 switch (S->getSCEVType()) {
5262 case scConstant:
5263 case scPtrToInt:
5264 case scTruncate:
5265 case scZeroExtend:
5266 case scSignExtend:
5267 case scAddExpr:
5268 case scMulExpr:
5269 case scUMaxExpr:
5270 case scSMaxExpr:
5271 case scUMinExpr:
5272 case scSMinExpr:
5273 // These expressions are available if their operand(s) is/are.
5274 return true;
5275
5276 case scAddRecExpr: {
5277 // We allow add recurrences that are on the loop BB is in, or some
5278 // outer loop. This guarantees availability because the value of the
5279 // add recurrence at BB is simply the "current" value of the induction
5280 // variable. We can relax this in the future; for instance an add
5281 // recurrence on a sibling dominating loop is also available at BB.
5282 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5283 if (L && (ARLoop == L || ARLoop->contains(L)))
5284 return true;
5285
5286 return setUnavailable();
5287 }
5288
5289 case scUnknown: {
5290 // For SCEVUnknown, we check for simple dominance.
5291 const auto *SU = cast<SCEVUnknown>(S);
5292 Value *V = SU->getValue();
5293
5294 if (isa<Argument>(V))
5295 return false;
5296
5297 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5298 return false;
5299
5300 return setUnavailable();
5301 }
5302
5303 case scUDivExpr:
5304 case scCouldNotCompute:
5305 // We do not try to smart about these at all.
5306 return setUnavailable();
5307 }
5308 llvm_unreachable("Unknown SCEV kind!");
5309 }
5310
5311 bool isDone() { return TraversalDone; }
5312 };
5313
5314 CheckAvailable CA(L, BB, DT);
5315 SCEVTraversal<CheckAvailable> ST(CA);
5316
5317 ST.visitAll(S);
5318 return CA.Available;
5319 }
5320
5321 // Try to match a control flow sequence that branches out at BI and merges back
5322 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5323 // match.
BrPHIToSelect(DominatorTree & DT,BranchInst * BI,PHINode * Merge,Value * & C,Value * & LHS,Value * & RHS)5324 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
5325 Value *&C, Value *&LHS, Value *&RHS) {
5326 C = BI->getCondition();
5327
5328 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5329 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5330
5331 if (!LeftEdge.isSingleEdge())
5332 return false;
5333
5334 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5335
5336 Use &LeftUse = Merge->getOperandUse(0);
5337 Use &RightUse = Merge->getOperandUse(1);
5338
5339 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5340 LHS = LeftUse;
5341 RHS = RightUse;
5342 return true;
5343 }
5344
5345 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5346 LHS = RightUse;
5347 RHS = LeftUse;
5348 return true;
5349 }
5350
5351 return false;
5352 }
5353
createNodeFromSelectLikePHI(PHINode * PN)5354 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5355 auto IsReachable =
5356 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5357 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5358 const Loop *L = LI.getLoopFor(PN->getParent());
5359
5360 // We don't want to break LCSSA, even in a SCEV expression tree.
5361 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5362 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5363 return nullptr;
5364
5365 // Try to match
5366 //
5367 // br %cond, label %left, label %right
5368 // left:
5369 // br label %merge
5370 // right:
5371 // br label %merge
5372 // merge:
5373 // V = phi [ %x, %left ], [ %y, %right ]
5374 //
5375 // as "select %cond, %x, %y"
5376
5377 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5378 assert(IDom && "At least the entry block should dominate PN");
5379
5380 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5381 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5382
5383 if (BI && BI->isConditional() &&
5384 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5385 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5386 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5387 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5388 }
5389
5390 return nullptr;
5391 }
5392
createNodeForPHI(PHINode * PN)5393 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5394 if (const SCEV *S = createAddRecFromPHI(PN))
5395 return S;
5396
5397 if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5398 return S;
5399
5400 // If the PHI has a single incoming value, follow that value, unless the
5401 // PHI's incoming blocks are in a different loop, in which case doing so
5402 // risks breaking LCSSA form. Instcombine would normally zap these, but
5403 // it doesn't have DominatorTree information, so it may miss cases.
5404 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5405 if (LI.replacementPreservesLCSSAForm(PN, V))
5406 return getSCEV(V);
5407
5408 // If it's not a loop phi, we can't handle it yet.
5409 return getUnknown(PN);
5410 }
5411
createNodeForSelectOrPHI(Instruction * I,Value * Cond,Value * TrueVal,Value * FalseVal)5412 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5413 Value *Cond,
5414 Value *TrueVal,
5415 Value *FalseVal) {
5416 // Handle "constant" branch or select. This can occur for instance when a
5417 // loop pass transforms an inner loop and moves on to process the outer loop.
5418 if (auto *CI = dyn_cast<ConstantInt>(Cond))
5419 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5420
5421 // Try to match some simple smax or umax patterns.
5422 auto *ICI = dyn_cast<ICmpInst>(Cond);
5423 if (!ICI)
5424 return getUnknown(I);
5425
5426 Value *LHS = ICI->getOperand(0);
5427 Value *RHS = ICI->getOperand(1);
5428
5429 switch (ICI->getPredicate()) {
5430 case ICmpInst::ICMP_SLT:
5431 case ICmpInst::ICMP_SLE:
5432 std::swap(LHS, RHS);
5433 LLVM_FALLTHROUGH;
5434 case ICmpInst::ICMP_SGT:
5435 case ICmpInst::ICMP_SGE:
5436 // a >s b ? a+x : b+x -> smax(a, b)+x
5437 // a >s b ? b+x : a+x -> smin(a, b)+x
5438 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5439 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType());
5440 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType());
5441 const SCEV *LA = getSCEV(TrueVal);
5442 const SCEV *RA = getSCEV(FalseVal);
5443 const SCEV *LDiff = getMinusSCEV(LA, LS);
5444 const SCEV *RDiff = getMinusSCEV(RA, RS);
5445 if (LDiff == RDiff)
5446 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
5447 LDiff = getMinusSCEV(LA, RS);
5448 RDiff = getMinusSCEV(RA, LS);
5449 if (LDiff == RDiff)
5450 return getAddExpr(getSMinExpr(LS, RS), LDiff);
5451 }
5452 break;
5453 case ICmpInst::ICMP_ULT:
5454 case ICmpInst::ICMP_ULE:
5455 std::swap(LHS, RHS);
5456 LLVM_FALLTHROUGH;
5457 case ICmpInst::ICMP_UGT:
5458 case ICmpInst::ICMP_UGE:
5459 // a >u b ? a+x : b+x -> umax(a, b)+x
5460 // a >u b ? b+x : a+x -> umin(a, b)+x
5461 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5462 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5463 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType());
5464 const SCEV *LA = getSCEV(TrueVal);
5465 const SCEV *RA = getSCEV(FalseVal);
5466 const SCEV *LDiff = getMinusSCEV(LA, LS);
5467 const SCEV *RDiff = getMinusSCEV(RA, RS);
5468 if (LDiff == RDiff)
5469 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
5470 LDiff = getMinusSCEV(LA, RS);
5471 RDiff = getMinusSCEV(RA, LS);
5472 if (LDiff == RDiff)
5473 return getAddExpr(getUMinExpr(LS, RS), LDiff);
5474 }
5475 break;
5476 case ICmpInst::ICMP_NE:
5477 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5478 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5479 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5480 const SCEV *One = getOne(I->getType());
5481 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5482 const SCEV *LA = getSCEV(TrueVal);
5483 const SCEV *RA = getSCEV(FalseVal);
5484 const SCEV *LDiff = getMinusSCEV(LA, LS);
5485 const SCEV *RDiff = getMinusSCEV(RA, One);
5486 if (LDiff == RDiff)
5487 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5488 }
5489 break;
5490 case ICmpInst::ICMP_EQ:
5491 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5492 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5493 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5494 const SCEV *One = getOne(I->getType());
5495 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5496 const SCEV *LA = getSCEV(TrueVal);
5497 const SCEV *RA = getSCEV(FalseVal);
5498 const SCEV *LDiff = getMinusSCEV(LA, One);
5499 const SCEV *RDiff = getMinusSCEV(RA, LS);
5500 if (LDiff == RDiff)
5501 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5502 }
5503 break;
5504 default:
5505 break;
5506 }
5507
5508 return getUnknown(I);
5509 }
5510
5511 /// Expand GEP instructions into add and multiply operations. This allows them
5512 /// to be analyzed by regular SCEV code.
createNodeForGEP(GEPOperator * GEP)5513 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
5514 // Don't attempt to analyze GEPs over unsized objects.
5515 if (!GEP->getSourceElementType()->isSized())
5516 return getUnknown(GEP);
5517
5518 SmallVector<const SCEV *, 4> IndexExprs;
5519 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
5520 IndexExprs.push_back(getSCEV(*Index));
5521 return getGEPExpr(GEP, IndexExprs);
5522 }
5523
GetMinTrailingZerosImpl(const SCEV * S)5524 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
5525 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5526 return C->getAPInt().countTrailingZeros();
5527
5528 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S))
5529 return GetMinTrailingZeros(I->getOperand());
5530
5531 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
5532 return std::min(GetMinTrailingZeros(T->getOperand()),
5533 (uint32_t)getTypeSizeInBits(T->getType()));
5534
5535 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
5536 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5537 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5538 ? getTypeSizeInBits(E->getType())
5539 : OpRes;
5540 }
5541
5542 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
5543 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5544 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5545 ? getTypeSizeInBits(E->getType())
5546 : OpRes;
5547 }
5548
5549 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
5550 // The result is the min of all operands results.
5551 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5552 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5553 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5554 return MinOpRes;
5555 }
5556
5557 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
5558 // The result is the sum of all operands results.
5559 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
5560 uint32_t BitWidth = getTypeSizeInBits(M->getType());
5561 for (unsigned i = 1, e = M->getNumOperands();
5562 SumOpRes != BitWidth && i != e; ++i)
5563 SumOpRes =
5564 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
5565 return SumOpRes;
5566 }
5567
5568 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
5569 // The result is the min of all operands results.
5570 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5571 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5572 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5573 return MinOpRes;
5574 }
5575
5576 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
5577 // The result is the min of all operands results.
5578 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5579 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5580 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5581 return MinOpRes;
5582 }
5583
5584 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
5585 // The result is the min of all operands results.
5586 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5587 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5588 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5589 return MinOpRes;
5590 }
5591
5592 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5593 // For a SCEVUnknown, ask ValueTracking.
5594 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
5595 return Known.countMinTrailingZeros();
5596 }
5597
5598 // SCEVUDivExpr
5599 return 0;
5600 }
5601
GetMinTrailingZeros(const SCEV * S)5602 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
5603 auto I = MinTrailingZerosCache.find(S);
5604 if (I != MinTrailingZerosCache.end())
5605 return I->second;
5606
5607 uint32_t Result = GetMinTrailingZerosImpl(S);
5608 auto InsertPair = MinTrailingZerosCache.insert({S, Result});
5609 assert(InsertPair.second && "Should insert a new key");
5610 return InsertPair.first->second;
5611 }
5612
5613 /// Helper method to assign a range to V from metadata present in the IR.
GetRangeFromMetadata(Value * V)5614 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
5615 if (Instruction *I = dyn_cast<Instruction>(V))
5616 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
5617 return getConstantRangeFromMetadata(*MD);
5618
5619 return None;
5620 }
5621
setNoWrapFlags(SCEVAddRecExpr * AddRec,SCEV::NoWrapFlags Flags)5622 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
5623 SCEV::NoWrapFlags Flags) {
5624 if (AddRec->getNoWrapFlags(Flags) != Flags) {
5625 AddRec->setNoWrapFlags(Flags);
5626 UnsignedRanges.erase(AddRec);
5627 SignedRanges.erase(AddRec);
5628 }
5629 }
5630
5631 /// Determine the range for a particular SCEV. If SignHint is
5632 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5633 /// with a "cleaner" unsigned (resp. signed) representation.
5634 const ConstantRange &
getRangeRef(const SCEV * S,ScalarEvolution::RangeSignHint SignHint)5635 ScalarEvolution::getRangeRef(const SCEV *S,
5636 ScalarEvolution::RangeSignHint SignHint) {
5637 DenseMap<const SCEV *, ConstantRange> &Cache =
5638 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
5639 : SignedRanges;
5640 ConstantRange::PreferredRangeType RangeType =
5641 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
5642 ? ConstantRange::Unsigned : ConstantRange::Signed;
5643
5644 // See if we've computed this range already.
5645 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
5646 if (I != Cache.end())
5647 return I->second;
5648
5649 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5650 return setRange(C, SignHint, ConstantRange(C->getAPInt()));
5651
5652 unsigned BitWidth = getTypeSizeInBits(S->getType());
5653 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
5654 using OBO = OverflowingBinaryOperator;
5655
5656 // If the value has known zeros, the maximum value will have those known zeros
5657 // as well.
5658 uint32_t TZ = GetMinTrailingZeros(S);
5659 if (TZ != 0) {
5660 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
5661 ConservativeResult =
5662 ConstantRange(APInt::getMinValue(BitWidth),
5663 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
5664 else
5665 ConservativeResult = ConstantRange(
5666 APInt::getSignedMinValue(BitWidth),
5667 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
5668 }
5669
5670 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
5671 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
5672 unsigned WrapType = OBO::AnyWrap;
5673 if (Add->hasNoSignedWrap())
5674 WrapType |= OBO::NoSignedWrap;
5675 if (Add->hasNoUnsignedWrap())
5676 WrapType |= OBO::NoUnsignedWrap;
5677 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
5678 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint),
5679 WrapType, RangeType);
5680 return setRange(Add, SignHint,
5681 ConservativeResult.intersectWith(X, RangeType));
5682 }
5683
5684 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
5685 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
5686 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
5687 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
5688 return setRange(Mul, SignHint,
5689 ConservativeResult.intersectWith(X, RangeType));
5690 }
5691
5692 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
5693 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
5694 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
5695 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
5696 return setRange(SMax, SignHint,
5697 ConservativeResult.intersectWith(X, RangeType));
5698 }
5699
5700 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
5701 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
5702 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
5703 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
5704 return setRange(UMax, SignHint,
5705 ConservativeResult.intersectWith(X, RangeType));
5706 }
5707
5708 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
5709 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
5710 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
5711 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
5712 return setRange(SMin, SignHint,
5713 ConservativeResult.intersectWith(X, RangeType));
5714 }
5715
5716 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
5717 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
5718 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
5719 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
5720 return setRange(UMin, SignHint,
5721 ConservativeResult.intersectWith(X, RangeType));
5722 }
5723
5724 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
5725 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
5726 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
5727 return setRange(UDiv, SignHint,
5728 ConservativeResult.intersectWith(X.udiv(Y), RangeType));
5729 }
5730
5731 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
5732 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
5733 return setRange(ZExt, SignHint,
5734 ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
5735 RangeType));
5736 }
5737
5738 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
5739 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
5740 return setRange(SExt, SignHint,
5741 ConservativeResult.intersectWith(X.signExtend(BitWidth),
5742 RangeType));
5743 }
5744
5745 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) {
5746 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint);
5747 return setRange(PtrToInt, SignHint, X);
5748 }
5749
5750 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
5751 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
5752 return setRange(Trunc, SignHint,
5753 ConservativeResult.intersectWith(X.truncate(BitWidth),
5754 RangeType));
5755 }
5756
5757 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
5758 // If there's no unsigned wrap, the value will never be less than its
5759 // initial value.
5760 if (AddRec->hasNoUnsignedWrap()) {
5761 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
5762 if (!UnsignedMinValue.isNullValue())
5763 ConservativeResult = ConservativeResult.intersectWith(
5764 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
5765 }
5766
5767 // If there's no signed wrap, and all the operands except initial value have
5768 // the same sign or zero, the value won't ever be:
5769 // 1: smaller than initial value if operands are non negative,
5770 // 2: bigger than initial value if operands are non positive.
5771 // For both cases, value can not cross signed min/max boundary.
5772 if (AddRec->hasNoSignedWrap()) {
5773 bool AllNonNeg = true;
5774 bool AllNonPos = true;
5775 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
5776 if (!isKnownNonNegative(AddRec->getOperand(i)))
5777 AllNonNeg = false;
5778 if (!isKnownNonPositive(AddRec->getOperand(i)))
5779 AllNonPos = false;
5780 }
5781 if (AllNonNeg)
5782 ConservativeResult = ConservativeResult.intersectWith(
5783 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()),
5784 APInt::getSignedMinValue(BitWidth)),
5785 RangeType);
5786 else if (AllNonPos)
5787 ConservativeResult = ConservativeResult.intersectWith(
5788 ConstantRange::getNonEmpty(
5789 APInt::getSignedMinValue(BitWidth),
5790 getSignedRangeMax(AddRec->getStart()) + 1),
5791 RangeType);
5792 }
5793
5794 // TODO: non-affine addrec
5795 if (AddRec->isAffine()) {
5796 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
5797 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
5798 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
5799 auto RangeFromAffine = getRangeForAffineAR(
5800 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
5801 BitWidth);
5802 ConservativeResult =
5803 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
5804
5805 auto RangeFromFactoring = getRangeViaFactoring(
5806 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
5807 BitWidth);
5808 ConservativeResult =
5809 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
5810 }
5811
5812 // Now try symbolic BE count and more powerful methods.
5813 if (UseExpensiveRangeSharpening) {
5814 const SCEV *SymbolicMaxBECount =
5815 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
5816 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
5817 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
5818 AddRec->hasNoSelfWrap()) {
5819 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
5820 AddRec, SymbolicMaxBECount, BitWidth, SignHint);
5821 ConservativeResult =
5822 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
5823 }
5824 }
5825 }
5826
5827 return setRange(AddRec, SignHint, std::move(ConservativeResult));
5828 }
5829
5830 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5831 // Check if the IR explicitly contains !range metadata.
5832 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
5833 if (MDRange.hasValue())
5834 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
5835 RangeType);
5836
5837 // Split here to avoid paying the compile-time cost of calling both
5838 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
5839 // if needed.
5840 const DataLayout &DL = getDataLayout();
5841 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
5842 // For a SCEVUnknown, ask ValueTracking.
5843 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
5844 if (Known.getBitWidth() != BitWidth)
5845 Known = Known.zextOrTrunc(BitWidth);
5846 // If Known does not result in full-set, intersect with it.
5847 if (Known.getMinValue() != Known.getMaxValue() + 1)
5848 ConservativeResult = ConservativeResult.intersectWith(
5849 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
5850 RangeType);
5851 } else {
5852 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
5853 "generalize as needed!");
5854 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
5855 // If the pointer size is larger than the index size type, this can cause
5856 // NS to be larger than BitWidth. So compensate for this.
5857 if (U->getType()->isPointerTy()) {
5858 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
5859 int ptrIdxDiff = ptrSize - BitWidth;
5860 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
5861 NS -= ptrIdxDiff;
5862 }
5863
5864 if (NS > 1)
5865 ConservativeResult = ConservativeResult.intersectWith(
5866 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
5867 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
5868 RangeType);
5869 }
5870
5871 // A range of Phi is a subset of union of all ranges of its input.
5872 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
5873 // Make sure that we do not run over cycled Phis.
5874 if (PendingPhiRanges.insert(Phi).second) {
5875 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
5876 for (auto &Op : Phi->operands()) {
5877 auto OpRange = getRangeRef(getSCEV(Op), SignHint);
5878 RangeFromOps = RangeFromOps.unionWith(OpRange);
5879 // No point to continue if we already have a full set.
5880 if (RangeFromOps.isFullSet())
5881 break;
5882 }
5883 ConservativeResult =
5884 ConservativeResult.intersectWith(RangeFromOps, RangeType);
5885 bool Erased = PendingPhiRanges.erase(Phi);
5886 assert(Erased && "Failed to erase Phi properly?");
5887 (void) Erased;
5888 }
5889 }
5890
5891 return setRange(U, SignHint, std::move(ConservativeResult));
5892 }
5893
5894 return setRange(S, SignHint, std::move(ConservativeResult));
5895 }
5896
5897 // Given a StartRange, Step and MaxBECount for an expression compute a range of
5898 // values that the expression can take. Initially, the expression has a value
5899 // from StartRange and then is changed by Step up to MaxBECount times. Signed
5900 // argument defines if we treat Step as signed or unsigned.
getRangeForAffineARHelper(APInt Step,const ConstantRange & StartRange,const APInt & MaxBECount,unsigned BitWidth,bool Signed)5901 static ConstantRange getRangeForAffineARHelper(APInt Step,
5902 const ConstantRange &StartRange,
5903 const APInt &MaxBECount,
5904 unsigned BitWidth, bool Signed) {
5905 // If either Step or MaxBECount is 0, then the expression won't change, and we
5906 // just need to return the initial range.
5907 if (Step == 0 || MaxBECount == 0)
5908 return StartRange;
5909
5910 // If we don't know anything about the initial value (i.e. StartRange is
5911 // FullRange), then we don't know anything about the final range either.
5912 // Return FullRange.
5913 if (StartRange.isFullSet())
5914 return ConstantRange::getFull(BitWidth);
5915
5916 // If Step is signed and negative, then we use its absolute value, but we also
5917 // note that we're moving in the opposite direction.
5918 bool Descending = Signed && Step.isNegative();
5919
5920 if (Signed)
5921 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
5922 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
5923 // This equations hold true due to the well-defined wrap-around behavior of
5924 // APInt.
5925 Step = Step.abs();
5926
5927 // Check if Offset is more than full span of BitWidth. If it is, the
5928 // expression is guaranteed to overflow.
5929 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
5930 return ConstantRange::getFull(BitWidth);
5931
5932 // Offset is by how much the expression can change. Checks above guarantee no
5933 // overflow here.
5934 APInt Offset = Step * MaxBECount;
5935
5936 // Minimum value of the final range will match the minimal value of StartRange
5937 // if the expression is increasing and will be decreased by Offset otherwise.
5938 // Maximum value of the final range will match the maximal value of StartRange
5939 // if the expression is decreasing and will be increased by Offset otherwise.
5940 APInt StartLower = StartRange.getLower();
5941 APInt StartUpper = StartRange.getUpper() - 1;
5942 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
5943 : (StartUpper + std::move(Offset));
5944
5945 // It's possible that the new minimum/maximum value will fall into the initial
5946 // range (due to wrap around). This means that the expression can take any
5947 // value in this bitwidth, and we have to return full range.
5948 if (StartRange.contains(MovedBoundary))
5949 return ConstantRange::getFull(BitWidth);
5950
5951 APInt NewLower =
5952 Descending ? std::move(MovedBoundary) : std::move(StartLower);
5953 APInt NewUpper =
5954 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
5955 NewUpper += 1;
5956
5957 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
5958 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
5959 }
5960
getRangeForAffineAR(const SCEV * Start,const SCEV * Step,const SCEV * MaxBECount,unsigned BitWidth)5961 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
5962 const SCEV *Step,
5963 const SCEV *MaxBECount,
5964 unsigned BitWidth) {
5965 assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
5966 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
5967 "Precondition!");
5968
5969 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
5970 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
5971
5972 // First, consider step signed.
5973 ConstantRange StartSRange = getSignedRange(Start);
5974 ConstantRange StepSRange = getSignedRange(Step);
5975
5976 // If Step can be both positive and negative, we need to find ranges for the
5977 // maximum absolute step values in both directions and union them.
5978 ConstantRange SR =
5979 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
5980 MaxBECountValue, BitWidth, /* Signed = */ true);
5981 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
5982 StartSRange, MaxBECountValue,
5983 BitWidth, /* Signed = */ true));
5984
5985 // Next, consider step unsigned.
5986 ConstantRange UR = getRangeForAffineARHelper(
5987 getUnsignedRangeMax(Step), getUnsignedRange(Start),
5988 MaxBECountValue, BitWidth, /* Signed = */ false);
5989
5990 // Finally, intersect signed and unsigned ranges.
5991 return SR.intersectWith(UR, ConstantRange::Smallest);
5992 }
5993
getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr * AddRec,const SCEV * MaxBECount,unsigned BitWidth,ScalarEvolution::RangeSignHint SignHint)5994 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
5995 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
5996 ScalarEvolution::RangeSignHint SignHint) {
5997 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
5998 assert(AddRec->hasNoSelfWrap() &&
5999 "This only works for non-self-wrapping AddRecs!");
6000 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
6001 const SCEV *Step = AddRec->getStepRecurrence(*this);
6002 // Only deal with constant step to save compile time.
6003 if (!isa<SCEVConstant>(Step))
6004 return ConstantRange::getFull(BitWidth);
6005 // Let's make sure that we can prove that we do not self-wrap during
6006 // MaxBECount iterations. We need this because MaxBECount is a maximum
6007 // iteration count estimate, and we might infer nw from some exit for which we
6008 // do not know max exit count (or any other side reasoning).
6009 // TODO: Turn into assert at some point.
6010 if (getTypeSizeInBits(MaxBECount->getType()) >
6011 getTypeSizeInBits(AddRec->getType()))
6012 return ConstantRange::getFull(BitWidth);
6013 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
6014 const SCEV *RangeWidth = getMinusOne(AddRec->getType());
6015 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
6016 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
6017 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
6018 MaxItersWithoutWrap))
6019 return ConstantRange::getFull(BitWidth);
6020
6021 ICmpInst::Predicate LEPred =
6022 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
6023 ICmpInst::Predicate GEPred =
6024 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
6025 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
6026
6027 // We know that there is no self-wrap. Let's take Start and End values and
6028 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
6029 // the iteration. They either lie inside the range [Min(Start, End),
6030 // Max(Start, End)] or outside it:
6031 //
6032 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
6033 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
6034 //
6035 // No self wrap flag guarantees that the intermediate values cannot be BOTH
6036 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
6037 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
6038 // Start <= End and step is positive, or Start >= End and step is negative.
6039 const SCEV *Start = AddRec->getStart();
6040 ConstantRange StartRange = getRangeRef(Start, SignHint);
6041 ConstantRange EndRange = getRangeRef(End, SignHint);
6042 ConstantRange RangeBetween = StartRange.unionWith(EndRange);
6043 // If they already cover full iteration space, we will know nothing useful
6044 // even if we prove what we want to prove.
6045 if (RangeBetween.isFullSet())
6046 return RangeBetween;
6047 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
6048 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
6049 : RangeBetween.isWrappedSet();
6050 if (IsWrappedSet)
6051 return ConstantRange::getFull(BitWidth);
6052
6053 if (isKnownPositive(Step) &&
6054 isKnownPredicateViaConstantRanges(LEPred, Start, End))
6055 return RangeBetween;
6056 else if (isKnownNegative(Step) &&
6057 isKnownPredicateViaConstantRanges(GEPred, Start, End))
6058 return RangeBetween;
6059 return ConstantRange::getFull(BitWidth);
6060 }
6061
getRangeViaFactoring(const SCEV * Start,const SCEV * Step,const SCEV * MaxBECount,unsigned BitWidth)6062 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
6063 const SCEV *Step,
6064 const SCEV *MaxBECount,
6065 unsigned BitWidth) {
6066 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
6067 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
6068
6069 struct SelectPattern {
6070 Value *Condition = nullptr;
6071 APInt TrueValue;
6072 APInt FalseValue;
6073
6074 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
6075 const SCEV *S) {
6076 Optional<unsigned> CastOp;
6077 APInt Offset(BitWidth, 0);
6078
6079 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&
6080 "Should be!");
6081
6082 // Peel off a constant offset:
6083 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
6084 // In the future we could consider being smarter here and handle
6085 // {Start+Step,+,Step} too.
6086 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
6087 return;
6088
6089 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
6090 S = SA->getOperand(1);
6091 }
6092
6093 // Peel off a cast operation
6094 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
6095 CastOp = SCast->getSCEVType();
6096 S = SCast->getOperand();
6097 }
6098
6099 using namespace llvm::PatternMatch;
6100
6101 auto *SU = dyn_cast<SCEVUnknown>(S);
6102 const APInt *TrueVal, *FalseVal;
6103 if (!SU ||
6104 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
6105 m_APInt(FalseVal)))) {
6106 Condition = nullptr;
6107 return;
6108 }
6109
6110 TrueValue = *TrueVal;
6111 FalseValue = *FalseVal;
6112
6113 // Re-apply the cast we peeled off earlier
6114 if (CastOp.hasValue())
6115 switch (*CastOp) {
6116 default:
6117 llvm_unreachable("Unknown SCEV cast type!");
6118
6119 case scTruncate:
6120 TrueValue = TrueValue.trunc(BitWidth);
6121 FalseValue = FalseValue.trunc(BitWidth);
6122 break;
6123 case scZeroExtend:
6124 TrueValue = TrueValue.zext(BitWidth);
6125 FalseValue = FalseValue.zext(BitWidth);
6126 break;
6127 case scSignExtend:
6128 TrueValue = TrueValue.sext(BitWidth);
6129 FalseValue = FalseValue.sext(BitWidth);
6130 break;
6131 }
6132
6133 // Re-apply the constant offset we peeled off earlier
6134 TrueValue += Offset;
6135 FalseValue += Offset;
6136 }
6137
6138 bool isRecognized() { return Condition != nullptr; }
6139 };
6140
6141 SelectPattern StartPattern(*this, BitWidth, Start);
6142 if (!StartPattern.isRecognized())
6143 return ConstantRange::getFull(BitWidth);
6144
6145 SelectPattern StepPattern(*this, BitWidth, Step);
6146 if (!StepPattern.isRecognized())
6147 return ConstantRange::getFull(BitWidth);
6148
6149 if (StartPattern.Condition != StepPattern.Condition) {
6150 // We don't handle this case today; but we could, by considering four
6151 // possibilities below instead of two. I'm not sure if there are cases where
6152 // that will help over what getRange already does, though.
6153 return ConstantRange::getFull(BitWidth);
6154 }
6155
6156 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
6157 // construct arbitrary general SCEV expressions here. This function is called
6158 // from deep in the call stack, and calling getSCEV (on a sext instruction,
6159 // say) can end up caching a suboptimal value.
6160
6161 // FIXME: without the explicit `this` receiver below, MSVC errors out with
6162 // C2352 and C2512 (otherwise it isn't needed).
6163
6164 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
6165 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
6166 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
6167 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
6168
6169 ConstantRange TrueRange =
6170 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
6171 ConstantRange FalseRange =
6172 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
6173
6174 return TrueRange.unionWith(FalseRange);
6175 }
6176
getNoWrapFlagsFromUB(const Value * V)6177 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
6178 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
6179 const BinaryOperator *BinOp = cast<BinaryOperator>(V);
6180
6181 // Return early if there are no flags to propagate to the SCEV.
6182 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6183 if (BinOp->hasNoUnsignedWrap())
6184 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
6185 if (BinOp->hasNoSignedWrap())
6186 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
6187 if (Flags == SCEV::FlagAnyWrap)
6188 return SCEV::FlagAnyWrap;
6189
6190 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
6191 }
6192
isSCEVExprNeverPoison(const Instruction * I)6193 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
6194 // Here we check that I is in the header of the innermost loop containing I,
6195 // since we only deal with instructions in the loop header. The actual loop we
6196 // need to check later will come from an add recurrence, but getting that
6197 // requires computing the SCEV of the operands, which can be expensive. This
6198 // check we can do cheaply to rule out some cases early.
6199 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent());
6200 if (InnermostContainingLoop == nullptr ||
6201 InnermostContainingLoop->getHeader() != I->getParent())
6202 return false;
6203
6204 // Only proceed if we can prove that I does not yield poison.
6205 if (!programUndefinedIfPoison(I))
6206 return false;
6207
6208 // At this point we know that if I is executed, then it does not wrap
6209 // according to at least one of NSW or NUW. If I is not executed, then we do
6210 // not know if the calculation that I represents would wrap. Multiple
6211 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6212 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6213 // derived from other instructions that map to the same SCEV. We cannot make
6214 // that guarantee for cases where I is not executed. So we need to find the
6215 // loop that I is considered in relation to and prove that I is executed for
6216 // every iteration of that loop. That implies that the value that I
6217 // calculates does not wrap anywhere in the loop, so then we can apply the
6218 // flags to the SCEV.
6219 //
6220 // We check isLoopInvariant to disambiguate in case we are adding recurrences
6221 // from different loops, so that we know which loop to prove that I is
6222 // executed in.
6223 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) {
6224 // I could be an extractvalue from a call to an overflow intrinsic.
6225 // TODO: We can do better here in some cases.
6226 if (!isSCEVable(I->getOperand(OpIndex)->getType()))
6227 return false;
6228 const SCEV *Op = getSCEV(I->getOperand(OpIndex));
6229 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
6230 bool AllOtherOpsLoopInvariant = true;
6231 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands();
6232 ++OtherOpIndex) {
6233 if (OtherOpIndex != OpIndex) {
6234 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex));
6235 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) {
6236 AllOtherOpsLoopInvariant = false;
6237 break;
6238 }
6239 }
6240 }
6241 if (AllOtherOpsLoopInvariant &&
6242 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop()))
6243 return true;
6244 }
6245 }
6246 return false;
6247 }
6248
isAddRecNeverPoison(const Instruction * I,const Loop * L)6249 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
6250 // If we know that \c I can never be poison period, then that's enough.
6251 if (isSCEVExprNeverPoison(I))
6252 return true;
6253
6254 // For an add recurrence specifically, we assume that infinite loops without
6255 // side effects are undefined behavior, and then reason as follows:
6256 //
6257 // If the add recurrence is poison in any iteration, it is poison on all
6258 // future iterations (since incrementing poison yields poison). If the result
6259 // of the add recurrence is fed into the loop latch condition and the loop
6260 // does not contain any throws or exiting blocks other than the latch, we now
6261 // have the ability to "choose" whether the backedge is taken or not (by
6262 // choosing a sufficiently evil value for the poison feeding into the branch)
6263 // for every iteration including and after the one in which \p I first became
6264 // poison. There are two possibilities (let's call the iteration in which \p
6265 // I first became poison as K):
6266 //
6267 // 1. In the set of iterations including and after K, the loop body executes
6268 // no side effects. In this case executing the backege an infinte number
6269 // of times will yield undefined behavior.
6270 //
6271 // 2. In the set of iterations including and after K, the loop body executes
6272 // at least one side effect. In this case, that specific instance of side
6273 // effect is control dependent on poison, which also yields undefined
6274 // behavior.
6275
6276 auto *ExitingBB = L->getExitingBlock();
6277 auto *LatchBB = L->getLoopLatch();
6278 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6279 return false;
6280
6281 SmallPtrSet<const Instruction *, 16> Pushed;
6282 SmallVector<const Instruction *, 8> PoisonStack;
6283
6284 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6285 // things that are known to be poison under that assumption go on the
6286 // PoisonStack.
6287 Pushed.insert(I);
6288 PoisonStack.push_back(I);
6289
6290 bool LatchControlDependentOnPoison = false;
6291 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
6292 const Instruction *Poison = PoisonStack.pop_back_val();
6293
6294 for (auto *PoisonUser : Poison->users()) {
6295 if (propagatesPoison(cast<Operator>(PoisonUser))) {
6296 if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
6297 PoisonStack.push_back(cast<Instruction>(PoisonUser));
6298 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6299 assert(BI->isConditional() && "Only possibility!");
6300 if (BI->getParent() == LatchBB) {
6301 LatchControlDependentOnPoison = true;
6302 break;
6303 }
6304 }
6305 }
6306 }
6307
6308 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6309 }
6310
6311 ScalarEvolution::LoopProperties
getLoopProperties(const Loop * L)6312 ScalarEvolution::getLoopProperties(const Loop *L) {
6313 using LoopProperties = ScalarEvolution::LoopProperties;
6314
6315 auto Itr = LoopPropertiesCache.find(L);
6316 if (Itr == LoopPropertiesCache.end()) {
6317 auto HasSideEffects = [](Instruction *I) {
6318 if (auto *SI = dyn_cast<StoreInst>(I))
6319 return !SI->isSimple();
6320
6321 return I->mayHaveSideEffects();
6322 };
6323
6324 LoopProperties LP = {/* HasNoAbnormalExits */ true,
6325 /*HasNoSideEffects*/ true};
6326
6327 for (auto *BB : L->getBlocks())
6328 for (auto &I : *BB) {
6329 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6330 LP.HasNoAbnormalExits = false;
6331 if (HasSideEffects(&I))
6332 LP.HasNoSideEffects = false;
6333 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
6334 break; // We're already as pessimistic as we can get.
6335 }
6336
6337 auto InsertPair = LoopPropertiesCache.insert({L, LP});
6338 assert(InsertPair.second && "We just checked!");
6339 Itr = InsertPair.first;
6340 }
6341
6342 return Itr->second;
6343 }
6344
createSCEV(Value * V)6345 const SCEV *ScalarEvolution::createSCEV(Value *V) {
6346 if (!isSCEVable(V->getType()))
6347 return getUnknown(V);
6348
6349 if (Instruction *I = dyn_cast<Instruction>(V)) {
6350 // Don't attempt to analyze instructions in blocks that aren't
6351 // reachable. Such instructions don't matter, and they aren't required
6352 // to obey basic rules for definitions dominating uses which this
6353 // analysis depends on.
6354 if (!DT.isReachableFromEntry(I->getParent()))
6355 return getUnknown(UndefValue::get(V->getType()));
6356 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
6357 return getConstant(CI);
6358 else if (isa<ConstantPointerNull>(V))
6359 // FIXME: we shouldn't special-case null pointer constant.
6360 return getZero(V->getType());
6361 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
6362 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
6363 else if (!isa<ConstantExpr>(V))
6364 return getUnknown(V);
6365
6366 Operator *U = cast<Operator>(V);
6367 if (auto BO = MatchBinaryOp(U, DT)) {
6368 switch (BO->Opcode) {
6369 case Instruction::Add: {
6370 // The simple thing to do would be to just call getSCEV on both operands
6371 // and call getAddExpr with the result. However if we're looking at a
6372 // bunch of things all added together, this can be quite inefficient,
6373 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6374 // Instead, gather up all the operands and make a single getAddExpr call.
6375 // LLVM IR canonical form means we need only traverse the left operands.
6376 SmallVector<const SCEV *, 4> AddOps;
6377 do {
6378 if (BO->Op) {
6379 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6380 AddOps.push_back(OpSCEV);
6381 break;
6382 }
6383
6384 // If a NUW or NSW flag can be applied to the SCEV for this
6385 // addition, then compute the SCEV for this addition by itself
6386 // with a separate call to getAddExpr. We need to do that
6387 // instead of pushing the operands of the addition onto AddOps,
6388 // since the flags are only known to apply to this particular
6389 // addition - they may not apply to other additions that can be
6390 // formed with operands from AddOps.
6391 const SCEV *RHS = getSCEV(BO->RHS);
6392 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6393 if (Flags != SCEV::FlagAnyWrap) {
6394 const SCEV *LHS = getSCEV(BO->LHS);
6395 if (BO->Opcode == Instruction::Sub)
6396 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
6397 else
6398 AddOps.push_back(getAddExpr(LHS, RHS, Flags));
6399 break;
6400 }
6401 }
6402
6403 if (BO->Opcode == Instruction::Sub)
6404 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
6405 else
6406 AddOps.push_back(getSCEV(BO->RHS));
6407
6408 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6409 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
6410 NewBO->Opcode != Instruction::Sub)) {
6411 AddOps.push_back(getSCEV(BO->LHS));
6412 break;
6413 }
6414 BO = NewBO;
6415 } while (true);
6416
6417 return getAddExpr(AddOps);
6418 }
6419
6420 case Instruction::Mul: {
6421 SmallVector<const SCEV *, 4> MulOps;
6422 do {
6423 if (BO->Op) {
6424 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6425 MulOps.push_back(OpSCEV);
6426 break;
6427 }
6428
6429 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6430 if (Flags != SCEV::FlagAnyWrap) {
6431 MulOps.push_back(
6432 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
6433 break;
6434 }
6435 }
6436
6437 MulOps.push_back(getSCEV(BO->RHS));
6438 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6439 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
6440 MulOps.push_back(getSCEV(BO->LHS));
6441 break;
6442 }
6443 BO = NewBO;
6444 } while (true);
6445
6446 return getMulExpr(MulOps);
6447 }
6448 case Instruction::UDiv:
6449 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6450 case Instruction::URem:
6451 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6452 case Instruction::Sub: {
6453 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6454 if (BO->Op)
6455 Flags = getNoWrapFlagsFromUB(BO->Op);
6456 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
6457 }
6458 case Instruction::And:
6459 // For an expression like x&255 that merely masks off the high bits,
6460 // use zext(trunc(x)) as the SCEV expression.
6461 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6462 if (CI->isZero())
6463 return getSCEV(BO->RHS);
6464 if (CI->isMinusOne())
6465 return getSCEV(BO->LHS);
6466 const APInt &A = CI->getValue();
6467
6468 // Instcombine's ShrinkDemandedConstant may strip bits out of
6469 // constants, obscuring what would otherwise be a low-bits mask.
6470 // Use computeKnownBits to compute what ShrinkDemandedConstant
6471 // knew about to reconstruct a low-bits mask value.
6472 unsigned LZ = A.countLeadingZeros();
6473 unsigned TZ = A.countTrailingZeros();
6474 unsigned BitWidth = A.getBitWidth();
6475 KnownBits Known(BitWidth);
6476 computeKnownBits(BO->LHS, Known, getDataLayout(),
6477 0, &AC, nullptr, &DT);
6478
6479 APInt EffectiveMask =
6480 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
6481 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
6482 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
6483 const SCEV *LHS = getSCEV(BO->LHS);
6484 const SCEV *ShiftedLHS = nullptr;
6485 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
6486 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
6487 // For an expression like (x * 8) & 8, simplify the multiply.
6488 unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
6489 unsigned GCD = std::min(MulZeros, TZ);
6490 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
6491 SmallVector<const SCEV*, 4> MulOps;
6492 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
6493 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
6494 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
6495 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
6496 }
6497 }
6498 if (!ShiftedLHS)
6499 ShiftedLHS = getUDivExpr(LHS, MulCount);
6500 return getMulExpr(
6501 getZeroExtendExpr(
6502 getTruncateExpr(ShiftedLHS,
6503 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
6504 BO->LHS->getType()),
6505 MulCount);
6506 }
6507 }
6508 break;
6509
6510 case Instruction::Or:
6511 // If the RHS of the Or is a constant, we may have something like:
6512 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6513 // optimizations will transparently handle this case.
6514 //
6515 // In order for this transformation to be safe, the LHS must be of the
6516 // form X*(2^n) and the Or constant must be less than 2^n.
6517 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6518 const SCEV *LHS = getSCEV(BO->LHS);
6519 const APInt &CIVal = CI->getValue();
6520 if (GetMinTrailingZeros(LHS) >=
6521 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
6522 // Build a plain add SCEV.
6523 return getAddExpr(LHS, getSCEV(CI),
6524 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
6525 }
6526 }
6527 break;
6528
6529 case Instruction::Xor:
6530 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6531 // If the RHS of xor is -1, then this is a not operation.
6532 if (CI->isMinusOne())
6533 return getNotSCEV(getSCEV(BO->LHS));
6534
6535 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6536 // This is a variant of the check for xor with -1, and it handles
6537 // the case where instcombine has trimmed non-demanded bits out
6538 // of an xor with -1.
6539 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
6540 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
6541 if (LBO->getOpcode() == Instruction::And &&
6542 LCI->getValue() == CI->getValue())
6543 if (const SCEVZeroExtendExpr *Z =
6544 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
6545 Type *UTy = BO->LHS->getType();
6546 const SCEV *Z0 = Z->getOperand();
6547 Type *Z0Ty = Z0->getType();
6548 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
6549
6550 // If C is a low-bits mask, the zero extend is serving to
6551 // mask off the high bits. Complement the operand and
6552 // re-apply the zext.
6553 if (CI->getValue().isMask(Z0TySize))
6554 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
6555
6556 // If C is a single bit, it may be in the sign-bit position
6557 // before the zero-extend. In this case, represent the xor
6558 // using an add, which is equivalent, and re-apply the zext.
6559 APInt Trunc = CI->getValue().trunc(Z0TySize);
6560 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
6561 Trunc.isSignMask())
6562 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
6563 UTy);
6564 }
6565 }
6566 break;
6567
6568 case Instruction::Shl:
6569 // Turn shift left of a constant amount into a multiply.
6570 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
6571 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
6572
6573 // If the shift count is not less than the bitwidth, the result of
6574 // the shift is undefined. Don't try to analyze it, because the
6575 // resolution chosen here may differ from the resolution chosen in
6576 // other parts of the compiler.
6577 if (SA->getValue().uge(BitWidth))
6578 break;
6579
6580 // We can safely preserve the nuw flag in all cases. It's also safe to
6581 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
6582 // requires special handling. It can be preserved as long as we're not
6583 // left shifting by bitwidth - 1.
6584 auto Flags = SCEV::FlagAnyWrap;
6585 if (BO->Op) {
6586 auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
6587 if ((MulFlags & SCEV::FlagNSW) &&
6588 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1)))
6589 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW);
6590 if (MulFlags & SCEV::FlagNUW)
6591 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW);
6592 }
6593
6594 Constant *X = ConstantInt::get(
6595 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
6596 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
6597 }
6598 break;
6599
6600 case Instruction::AShr: {
6601 // AShr X, C, where C is a constant.
6602 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
6603 if (!CI)
6604 break;
6605
6606 Type *OuterTy = BO->LHS->getType();
6607 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
6608 // If the shift count is not less than the bitwidth, the result of
6609 // the shift is undefined. Don't try to analyze it, because the
6610 // resolution chosen here may differ from the resolution chosen in
6611 // other parts of the compiler.
6612 if (CI->getValue().uge(BitWidth))
6613 break;
6614
6615 if (CI->isZero())
6616 return getSCEV(BO->LHS); // shift by zero --> noop
6617
6618 uint64_t AShrAmt = CI->getZExtValue();
6619 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
6620
6621 Operator *L = dyn_cast<Operator>(BO->LHS);
6622 if (L && L->getOpcode() == Instruction::Shl) {
6623 // X = Shl A, n
6624 // Y = AShr X, m
6625 // Both n and m are constant.
6626
6627 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
6628 if (L->getOperand(1) == BO->RHS)
6629 // For a two-shift sext-inreg, i.e. n = m,
6630 // use sext(trunc(x)) as the SCEV expression.
6631 return getSignExtendExpr(
6632 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
6633
6634 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
6635 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
6636 uint64_t ShlAmt = ShlAmtCI->getZExtValue();
6637 if (ShlAmt > AShrAmt) {
6638 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
6639 // expression. We already checked that ShlAmt < BitWidth, so
6640 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
6641 // ShlAmt - AShrAmt < Amt.
6642 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
6643 ShlAmt - AShrAmt);
6644 return getSignExtendExpr(
6645 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
6646 getConstant(Mul)), OuterTy);
6647 }
6648 }
6649 }
6650 if (BO->IsExact) {
6651 // Given exact arithmetic in-bounds right-shift by a constant,
6652 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x)
6653 const SCEV *X = getSCEV(BO->LHS);
6654 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false);
6655 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt);
6656 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult));
6657 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW);
6658 }
6659 break;
6660 }
6661 }
6662 }
6663
6664 switch (U->getOpcode()) {
6665 case Instruction::Trunc:
6666 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
6667
6668 case Instruction::ZExt:
6669 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
6670
6671 case Instruction::SExt:
6672 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
6673 // The NSW flag of a subtract does not always survive the conversion to
6674 // A + (-1)*B. By pushing sign extension onto its operands we are much
6675 // more likely to preserve NSW and allow later AddRec optimisations.
6676 //
6677 // NOTE: This is effectively duplicating this logic from getSignExtend:
6678 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
6679 // but by that point the NSW information has potentially been lost.
6680 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
6681 Type *Ty = U->getType();
6682 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
6683 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
6684 return getMinusSCEV(V1, V2, SCEV::FlagNSW);
6685 }
6686 }
6687 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
6688
6689 case Instruction::BitCast:
6690 // BitCasts are no-op casts so we just eliminate the cast.
6691 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
6692 return getSCEV(U->getOperand(0));
6693 break;
6694
6695 case Instruction::PtrToInt: {
6696 // Pointer to integer cast is straight-forward, so do model it.
6697 Value *Ptr = U->getOperand(0);
6698 const SCEV *Op = getSCEV(Ptr);
6699 Type *DstIntTy = U->getType();
6700 // SCEV doesn't have constant pointer expression type, but it supports
6701 // nullptr constant (and only that one), which is modelled in SCEV as a
6702 // zero integer constant. So just skip the ptrtoint cast for constants.
6703 if (isa<SCEVConstant>(Op))
6704 return getTruncateOrZeroExtend(Op, DstIntTy);
6705 Type *PtrTy = Ptr->getType();
6706 Type *IntPtrTy = getDataLayout().getIntPtrType(PtrTy);
6707 // But only if effective SCEV (integer) type is wide enough to represent
6708 // all possible pointer values.
6709 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(PtrTy)) !=
6710 getDataLayout().getTypeSizeInBits(IntPtrTy))
6711 return getUnknown(V);
6712 return getPtrToIntExpr(Op, DstIntTy);
6713 }
6714 case Instruction::IntToPtr:
6715 // Just don't deal with inttoptr casts.
6716 return getUnknown(V);
6717
6718 case Instruction::SDiv:
6719 // If both operands are non-negative, this is just an udiv.
6720 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
6721 isKnownNonNegative(getSCEV(U->getOperand(1))))
6722 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
6723 break;
6724
6725 case Instruction::SRem:
6726 // If both operands are non-negative, this is just an urem.
6727 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
6728 isKnownNonNegative(getSCEV(U->getOperand(1))))
6729 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
6730 break;
6731
6732 case Instruction::GetElementPtr:
6733 return createNodeForGEP(cast<GEPOperator>(U));
6734
6735 case Instruction::PHI:
6736 return createNodeForPHI(cast<PHINode>(U));
6737
6738 case Instruction::Select:
6739 // U can also be a select constant expr, which let fall through. Since
6740 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
6741 // constant expressions cannot have instructions as operands, we'd have
6742 // returned getUnknown for a select constant expressions anyway.
6743 if (isa<Instruction>(U))
6744 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
6745 U->getOperand(1), U->getOperand(2));
6746 break;
6747
6748 case Instruction::Call:
6749 case Instruction::Invoke:
6750 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
6751 return getSCEV(RV);
6752
6753 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
6754 switch (II->getIntrinsicID()) {
6755 case Intrinsic::abs:
6756 return getAbsExpr(
6757 getSCEV(II->getArgOperand(0)),
6758 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
6759 case Intrinsic::umax:
6760 return getUMaxExpr(getSCEV(II->getArgOperand(0)),
6761 getSCEV(II->getArgOperand(1)));
6762 case Intrinsic::umin:
6763 return getUMinExpr(getSCEV(II->getArgOperand(0)),
6764 getSCEV(II->getArgOperand(1)));
6765 case Intrinsic::smax:
6766 return getSMaxExpr(getSCEV(II->getArgOperand(0)),
6767 getSCEV(II->getArgOperand(1)));
6768 case Intrinsic::smin:
6769 return getSMinExpr(getSCEV(II->getArgOperand(0)),
6770 getSCEV(II->getArgOperand(1)));
6771 case Intrinsic::usub_sat: {
6772 const SCEV *X = getSCEV(II->getArgOperand(0));
6773 const SCEV *Y = getSCEV(II->getArgOperand(1));
6774 const SCEV *ClampedY = getUMinExpr(X, Y);
6775 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
6776 }
6777 case Intrinsic::uadd_sat: {
6778 const SCEV *X = getSCEV(II->getArgOperand(0));
6779 const SCEV *Y = getSCEV(II->getArgOperand(1));
6780 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
6781 return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
6782 }
6783 case Intrinsic::start_loop_iterations:
6784 // A start_loop_iterations is just equivalent to the first operand for
6785 // SCEV purposes.
6786 return getSCEV(II->getArgOperand(0));
6787 default:
6788 break;
6789 }
6790 }
6791 break;
6792 }
6793
6794 return getUnknown(V);
6795 }
6796
6797 //===----------------------------------------------------------------------===//
6798 // Iteration Count Computation Code
6799 //
6800
getConstantTripCount(const SCEVConstant * ExitCount)6801 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
6802 if (!ExitCount)
6803 return 0;
6804
6805 ConstantInt *ExitConst = ExitCount->getValue();
6806
6807 // Guard against huge trip counts.
6808 if (ExitConst->getValue().getActiveBits() > 32)
6809 return 0;
6810
6811 // In case of integer overflow, this returns 0, which is correct.
6812 return ((unsigned)ExitConst->getZExtValue()) + 1;
6813 }
6814
getSmallConstantTripCount(const Loop * L)6815 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
6816 if (BasicBlock *ExitingBB = L->getExitingBlock())
6817 return getSmallConstantTripCount(L, ExitingBB);
6818
6819 // No trip count information for multiple exits.
6820 return 0;
6821 }
6822
6823 unsigned
getSmallConstantTripCount(const Loop * L,const BasicBlock * ExitingBlock)6824 ScalarEvolution::getSmallConstantTripCount(const Loop *L,
6825 const BasicBlock *ExitingBlock) {
6826 assert(ExitingBlock && "Must pass a non-null exiting block!");
6827 assert(L->isLoopExiting(ExitingBlock) &&
6828 "Exiting block must actually branch out of the loop!");
6829 const SCEVConstant *ExitCount =
6830 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
6831 return getConstantTripCount(ExitCount);
6832 }
6833
getSmallConstantMaxTripCount(const Loop * L)6834 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
6835 const auto *MaxExitCount =
6836 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
6837 return getConstantTripCount(MaxExitCount);
6838 }
6839
getSmallConstantTripMultiple(const Loop * L)6840 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
6841 if (BasicBlock *ExitingBB = L->getExitingBlock())
6842 return getSmallConstantTripMultiple(L, ExitingBB);
6843
6844 // No trip multiple information for multiple exits.
6845 return 0;
6846 }
6847
6848 /// Returns the largest constant divisor of the trip count of this loop as a
6849 /// normal unsigned value, if possible. This means that the actual trip count is
6850 /// always a multiple of the returned value (don't forget the trip count could
6851 /// very well be zero as well!).
6852 ///
6853 /// Returns 1 if the trip count is unknown or not guaranteed to be the
6854 /// multiple of a constant (which is also the case if the trip count is simply
6855 /// constant, use getSmallConstantTripCount for that case), Will also return 1
6856 /// if the trip count is very large (>= 2^32).
6857 ///
6858 /// As explained in the comments for getSmallConstantTripCount, this assumes
6859 /// that control exits the loop via ExitingBlock.
6860 unsigned
getSmallConstantTripMultiple(const Loop * L,const BasicBlock * ExitingBlock)6861 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
6862 const BasicBlock *ExitingBlock) {
6863 assert(ExitingBlock && "Must pass a non-null exiting block!");
6864 assert(L->isLoopExiting(ExitingBlock) &&
6865 "Exiting block must actually branch out of the loop!");
6866 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
6867 if (ExitCount == getCouldNotCompute())
6868 return 1;
6869
6870 // Get the trip count from the BE count by adding 1.
6871 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType()));
6872
6873 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
6874 if (!TC)
6875 // Attempt to factor more general cases. Returns the greatest power of
6876 // two divisor. If overflow happens, the trip count expression is still
6877 // divisible by the greatest power of 2 divisor returned.
6878 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr));
6879
6880 ConstantInt *Result = TC->getValue();
6881
6882 // Guard against huge trip counts (this requires checking
6883 // for zero to handle the case where the trip count == -1 and the
6884 // addition wraps).
6885 if (!Result || Result->getValue().getActiveBits() > 32 ||
6886 Result->getValue().getActiveBits() == 0)
6887 return 1;
6888
6889 return (unsigned)Result->getZExtValue();
6890 }
6891
getExitCount(const Loop * L,const BasicBlock * ExitingBlock,ExitCountKind Kind)6892 const SCEV *ScalarEvolution::getExitCount(const Loop *L,
6893 const BasicBlock *ExitingBlock,
6894 ExitCountKind Kind) {
6895 switch (Kind) {
6896 case Exact:
6897 case SymbolicMaximum:
6898 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
6899 case ConstantMaximum:
6900 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
6901 };
6902 llvm_unreachable("Invalid ExitCountKind!");
6903 }
6904
6905 const SCEV *
getPredicatedBackedgeTakenCount(const Loop * L,SCEVUnionPredicate & Preds)6906 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
6907 SCEVUnionPredicate &Preds) {
6908 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
6909 }
6910
getBackedgeTakenCount(const Loop * L,ExitCountKind Kind)6911 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
6912 ExitCountKind Kind) {
6913 switch (Kind) {
6914 case Exact:
6915 return getBackedgeTakenInfo(L).getExact(L, this);
6916 case ConstantMaximum:
6917 return getBackedgeTakenInfo(L).getConstantMax(this);
6918 case SymbolicMaximum:
6919 return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
6920 };
6921 llvm_unreachable("Invalid ExitCountKind!");
6922 }
6923
isBackedgeTakenCountMaxOrZero(const Loop * L)6924 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
6925 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
6926 }
6927
6928 /// Push PHI nodes in the header of the given loop onto the given Worklist.
6929 static void
PushLoopPHIs(const Loop * L,SmallVectorImpl<Instruction * > & Worklist)6930 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
6931 BasicBlock *Header = L->getHeader();
6932
6933 // Push all Loop-header PHIs onto the Worklist stack.
6934 for (PHINode &PN : Header->phis())
6935 Worklist.push_back(&PN);
6936 }
6937
6938 const ScalarEvolution::BackedgeTakenInfo &
getPredicatedBackedgeTakenInfo(const Loop * L)6939 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
6940 auto &BTI = getBackedgeTakenInfo(L);
6941 if (BTI.hasFullInfo())
6942 return BTI;
6943
6944 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
6945
6946 if (!Pair.second)
6947 return Pair.first->second;
6948
6949 BackedgeTakenInfo Result =
6950 computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
6951
6952 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
6953 }
6954
6955 ScalarEvolution::BackedgeTakenInfo &
getBackedgeTakenInfo(const Loop * L)6956 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
6957 // Initially insert an invalid entry for this loop. If the insertion
6958 // succeeds, proceed to actually compute a backedge-taken count and
6959 // update the value. The temporary CouldNotCompute value tells SCEV
6960 // code elsewhere that it shouldn't attempt to request a new
6961 // backedge-taken count, which could result in infinite recursion.
6962 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
6963 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
6964 if (!Pair.second)
6965 return Pair.first->second;
6966
6967 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
6968 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
6969 // must be cleared in this scope.
6970 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
6971
6972 // In product build, there are no usage of statistic.
6973 (void)NumTripCountsComputed;
6974 (void)NumTripCountsNotComputed;
6975 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
6976 const SCEV *BEExact = Result.getExact(L, this);
6977 if (BEExact != getCouldNotCompute()) {
6978 assert(isLoopInvariant(BEExact, L) &&
6979 isLoopInvariant(Result.getConstantMax(this), L) &&
6980 "Computed backedge-taken count isn't loop invariant for loop!");
6981 ++NumTripCountsComputed;
6982 } else if (Result.getConstantMax(this) == getCouldNotCompute() &&
6983 isa<PHINode>(L->getHeader()->begin())) {
6984 // Only count loops that have phi nodes as not being computable.
6985 ++NumTripCountsNotComputed;
6986 }
6987 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
6988
6989 // Now that we know more about the trip count for this loop, forget any
6990 // existing SCEV values for PHI nodes in this loop since they are only
6991 // conservative estimates made without the benefit of trip count
6992 // information. This is similar to the code in forgetLoop, except that
6993 // it handles SCEVUnknown PHI nodes specially.
6994 if (Result.hasAnyInfo()) {
6995 SmallVector<Instruction *, 16> Worklist;
6996 PushLoopPHIs(L, Worklist);
6997
6998 SmallPtrSet<Instruction *, 8> Discovered;
6999 while (!Worklist.empty()) {
7000 Instruction *I = Worklist.pop_back_val();
7001
7002 ValueExprMapType::iterator It =
7003 ValueExprMap.find_as(static_cast<Value *>(I));
7004 if (It != ValueExprMap.end()) {
7005 const SCEV *Old = It->second;
7006
7007 // SCEVUnknown for a PHI either means that it has an unrecognized
7008 // structure, or it's a PHI that's in the progress of being computed
7009 // by createNodeForPHI. In the former case, additional loop trip
7010 // count information isn't going to change anything. In the later
7011 // case, createNodeForPHI will perform the necessary updates on its
7012 // own when it gets to that point.
7013 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
7014 eraseValueFromMap(It->first);
7015 forgetMemoizedResults(Old);
7016 }
7017 if (PHINode *PN = dyn_cast<PHINode>(I))
7018 ConstantEvolutionLoopExitValue.erase(PN);
7019 }
7020
7021 // Since we don't need to invalidate anything for correctness and we're
7022 // only invalidating to make SCEV's results more precise, we get to stop
7023 // early to avoid invalidating too much. This is especially important in
7024 // cases like:
7025 //
7026 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
7027 // loop0:
7028 // %pn0 = phi
7029 // ...
7030 // loop1:
7031 // %pn1 = phi
7032 // ...
7033 //
7034 // where both loop0 and loop1's backedge taken count uses the SCEV
7035 // expression for %v. If we don't have the early stop below then in cases
7036 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
7037 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
7038 // count for loop1, effectively nullifying SCEV's trip count cache.
7039 for (auto *U : I->users())
7040 if (auto *I = dyn_cast<Instruction>(U)) {
7041 auto *LoopForUser = LI.getLoopFor(I->getParent());
7042 if (LoopForUser && L->contains(LoopForUser) &&
7043 Discovered.insert(I).second)
7044 Worklist.push_back(I);
7045 }
7046 }
7047 }
7048
7049 // Re-lookup the insert position, since the call to
7050 // computeBackedgeTakenCount above could result in a
7051 // recusive call to getBackedgeTakenInfo (on a different
7052 // loop), which would invalidate the iterator computed
7053 // earlier.
7054 return BackedgeTakenCounts.find(L)->second = std::move(Result);
7055 }
7056
forgetAllLoops()7057 void ScalarEvolution::forgetAllLoops() {
7058 // This method is intended to forget all info about loops. It should
7059 // invalidate caches as if the following happened:
7060 // - The trip counts of all loops have changed arbitrarily
7061 // - Every llvm::Value has been updated in place to produce a different
7062 // result.
7063 BackedgeTakenCounts.clear();
7064 PredicatedBackedgeTakenCounts.clear();
7065 LoopPropertiesCache.clear();
7066 ConstantEvolutionLoopExitValue.clear();
7067 ValueExprMap.clear();
7068 ValuesAtScopes.clear();
7069 LoopDispositions.clear();
7070 BlockDispositions.clear();
7071 UnsignedRanges.clear();
7072 SignedRanges.clear();
7073 ExprValueMap.clear();
7074 HasRecMap.clear();
7075 MinTrailingZerosCache.clear();
7076 PredicatedSCEVRewrites.clear();
7077 }
7078
forgetLoop(const Loop * L)7079 void ScalarEvolution::forgetLoop(const Loop *L) {
7080 // Drop any stored trip count value.
7081 auto RemoveLoopFromBackedgeMap =
7082 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) {
7083 auto BTCPos = Map.find(L);
7084 if (BTCPos != Map.end()) {
7085 BTCPos->second.clear();
7086 Map.erase(BTCPos);
7087 }
7088 };
7089
7090 SmallVector<const Loop *, 16> LoopWorklist(1, L);
7091 SmallVector<Instruction *, 32> Worklist;
7092 SmallPtrSet<Instruction *, 16> Visited;
7093
7094 // Iterate over all the loops and sub-loops to drop SCEV information.
7095 while (!LoopWorklist.empty()) {
7096 auto *CurrL = LoopWorklist.pop_back_val();
7097
7098 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL);
7099 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL);
7100
7101 // Drop information about predicated SCEV rewrites for this loop.
7102 for (auto I = PredicatedSCEVRewrites.begin();
7103 I != PredicatedSCEVRewrites.end();) {
7104 std::pair<const SCEV *, const Loop *> Entry = I->first;
7105 if (Entry.second == CurrL)
7106 PredicatedSCEVRewrites.erase(I++);
7107 else
7108 ++I;
7109 }
7110
7111 auto LoopUsersItr = LoopUsers.find(CurrL);
7112 if (LoopUsersItr != LoopUsers.end()) {
7113 for (auto *S : LoopUsersItr->second)
7114 forgetMemoizedResults(S);
7115 LoopUsers.erase(LoopUsersItr);
7116 }
7117
7118 // Drop information about expressions based on loop-header PHIs.
7119 PushLoopPHIs(CurrL, Worklist);
7120
7121 while (!Worklist.empty()) {
7122 Instruction *I = Worklist.pop_back_val();
7123 if (!Visited.insert(I).second)
7124 continue;
7125
7126 ValueExprMapType::iterator It =
7127 ValueExprMap.find_as(static_cast<Value *>(I));
7128 if (It != ValueExprMap.end()) {
7129 eraseValueFromMap(It->first);
7130 forgetMemoizedResults(It->second);
7131 if (PHINode *PN = dyn_cast<PHINode>(I))
7132 ConstantEvolutionLoopExitValue.erase(PN);
7133 }
7134
7135 PushDefUseChildren(I, Worklist);
7136 }
7137
7138 LoopPropertiesCache.erase(CurrL);
7139 // Forget all contained loops too, to avoid dangling entries in the
7140 // ValuesAtScopes map.
7141 LoopWorklist.append(CurrL->begin(), CurrL->end());
7142 }
7143 }
7144
forgetTopmostLoop(const Loop * L)7145 void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
7146 while (Loop *Parent = L->getParentLoop())
7147 L = Parent;
7148 forgetLoop(L);
7149 }
7150
forgetValue(Value * V)7151 void ScalarEvolution::forgetValue(Value *V) {
7152 Instruction *I = dyn_cast<Instruction>(V);
7153 if (!I) return;
7154
7155 // Drop information about expressions based on loop-header PHIs.
7156 SmallVector<Instruction *, 16> Worklist;
7157 Worklist.push_back(I);
7158
7159 SmallPtrSet<Instruction *, 8> Visited;
7160 while (!Worklist.empty()) {
7161 I = Worklist.pop_back_val();
7162 if (!Visited.insert(I).second)
7163 continue;
7164
7165 ValueExprMapType::iterator It =
7166 ValueExprMap.find_as(static_cast<Value *>(I));
7167 if (It != ValueExprMap.end()) {
7168 eraseValueFromMap(It->first);
7169 forgetMemoizedResults(It->second);
7170 if (PHINode *PN = dyn_cast<PHINode>(I))
7171 ConstantEvolutionLoopExitValue.erase(PN);
7172 }
7173
7174 PushDefUseChildren(I, Worklist);
7175 }
7176 }
7177
forgetLoopDispositions(const Loop * L)7178 void ScalarEvolution::forgetLoopDispositions(const Loop *L) {
7179 LoopDispositions.clear();
7180 }
7181
7182 /// Get the exact loop backedge taken count considering all loop exits. A
7183 /// computable result can only be returned for loops with all exiting blocks
7184 /// dominating the latch. howFarToZero assumes that the limit of each loop test
7185 /// is never skipped. This is a valid assumption as long as the loop exits via
7186 /// that test. For precise results, it is the caller's responsibility to specify
7187 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
7188 const SCEV *
getExact(const Loop * L,ScalarEvolution * SE,SCEVUnionPredicate * Preds) const7189 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
7190 SCEVUnionPredicate *Preds) const {
7191 // If any exits were not computable, the loop is not computable.
7192 if (!isComplete() || ExitNotTaken.empty())
7193 return SE->getCouldNotCompute();
7194
7195 const BasicBlock *Latch = L->getLoopLatch();
7196 // All exiting blocks we have collected must dominate the only backedge.
7197 if (!Latch)
7198 return SE->getCouldNotCompute();
7199
7200 // All exiting blocks we have gathered dominate loop's latch, so exact trip
7201 // count is simply a minimum out of all these calculated exit counts.
7202 SmallVector<const SCEV *, 2> Ops;
7203 for (auto &ENT : ExitNotTaken) {
7204 const SCEV *BECount = ENT.ExactNotTaken;
7205 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
7206 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
7207 "We should only have known counts for exiting blocks that dominate "
7208 "latch!");
7209
7210 Ops.push_back(BECount);
7211
7212 if (Preds && !ENT.hasAlwaysTruePredicate())
7213 Preds->add(ENT.Predicate.get());
7214
7215 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
7216 "Predicate should be always true!");
7217 }
7218
7219 return SE->getUMinFromMismatchedTypes(Ops);
7220 }
7221
7222 /// Get the exact not taken count for this loop exit.
7223 const SCEV *
getExact(const BasicBlock * ExitingBlock,ScalarEvolution * SE) const7224 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
7225 ScalarEvolution *SE) const {
7226 for (auto &ENT : ExitNotTaken)
7227 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7228 return ENT.ExactNotTaken;
7229
7230 return SE->getCouldNotCompute();
7231 }
7232
getConstantMax(const BasicBlock * ExitingBlock,ScalarEvolution * SE) const7233 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
7234 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
7235 for (auto &ENT : ExitNotTaken)
7236 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7237 return ENT.MaxNotTaken;
7238
7239 return SE->getCouldNotCompute();
7240 }
7241
7242 /// getConstantMax - Get the constant max backedge taken count for the loop.
7243 const SCEV *
getConstantMax(ScalarEvolution * SE) const7244 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
7245 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7246 return !ENT.hasAlwaysTruePredicate();
7247 };
7248
7249 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax())
7250 return SE->getCouldNotCompute();
7251
7252 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
7253 isa<SCEVConstant>(getConstantMax())) &&
7254 "No point in having a non-constant max backedge taken count!");
7255 return getConstantMax();
7256 }
7257
7258 const SCEV *
getSymbolicMax(const Loop * L,ScalarEvolution * SE)7259 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
7260 ScalarEvolution *SE) {
7261 if (!SymbolicMax)
7262 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
7263 return SymbolicMax;
7264 }
7265
isConstantMaxOrZero(ScalarEvolution * SE) const7266 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
7267 ScalarEvolution *SE) const {
7268 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7269 return !ENT.hasAlwaysTruePredicate();
7270 };
7271 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
7272 }
7273
hasOperand(const SCEV * S,ScalarEvolution * SE) const7274 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
7275 ScalarEvolution *SE) const {
7276 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() &&
7277 SE->hasOperand(getConstantMax(), S))
7278 return true;
7279
7280 for (auto &ENT : ExitNotTaken)
7281 if (ENT.ExactNotTaken != SE->getCouldNotCompute() &&
7282 SE->hasOperand(ENT.ExactNotTaken, S))
7283 return true;
7284
7285 return false;
7286 }
7287
ExitLimit(const SCEV * E)7288 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
7289 : ExactNotTaken(E), MaxNotTaken(E) {
7290 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7291 isa<SCEVConstant>(MaxNotTaken)) &&
7292 "No point in having a non-constant max backedge taken count!");
7293 }
7294
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero,ArrayRef<const SmallPtrSetImpl<const SCEVPredicate * > * > PredSetList)7295 ScalarEvolution::ExitLimit::ExitLimit(
7296 const SCEV *E, const SCEV *M, bool MaxOrZero,
7297 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
7298 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
7299 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
7300 !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
7301 "Exact is not allowed to be less precise than Max");
7302 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7303 isa<SCEVConstant>(MaxNotTaken)) &&
7304 "No point in having a non-constant max backedge taken count!");
7305 for (auto *PredSet : PredSetList)
7306 for (auto *P : *PredSet)
7307 addPredicate(P);
7308 }
7309
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero,const SmallPtrSetImpl<const SCEVPredicate * > & PredSet)7310 ScalarEvolution::ExitLimit::ExitLimit(
7311 const SCEV *E, const SCEV *M, bool MaxOrZero,
7312 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
7313 : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
7314 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7315 isa<SCEVConstant>(MaxNotTaken)) &&
7316 "No point in having a non-constant max backedge taken count!");
7317 }
7318
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero)7319 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
7320 bool MaxOrZero)
7321 : ExitLimit(E, M, MaxOrZero, None) {
7322 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7323 isa<SCEVConstant>(MaxNotTaken)) &&
7324 "No point in having a non-constant max backedge taken count!");
7325 }
7326
7327 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
7328 /// computable exit into a persistent ExitNotTakenInfo array.
BackedgeTakenInfo(ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,bool IsComplete,const SCEV * ConstantMax,bool MaxOrZero)7329 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
7330 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
7331 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
7332 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
7333 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7334
7335 ExitNotTaken.reserve(ExitCounts.size());
7336 std::transform(
7337 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
7338 [&](const EdgeExitInfo &EEI) {
7339 BasicBlock *ExitBB = EEI.first;
7340 const ExitLimit &EL = EEI.second;
7341 if (EL.Predicates.empty())
7342 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7343 nullptr);
7344
7345 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate);
7346 for (auto *Pred : EL.Predicates)
7347 Predicate->add(Pred);
7348
7349 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7350 std::move(Predicate));
7351 });
7352 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
7353 isa<SCEVConstant>(ConstantMax)) &&
7354 "No point in having a non-constant max backedge taken count!");
7355 }
7356
7357 /// Invalidate this result and free the ExitNotTakenInfo array.
clear()7358 void ScalarEvolution::BackedgeTakenInfo::clear() {
7359 ExitNotTaken.clear();
7360 }
7361
7362 /// Compute the number of times the backedge of the specified loop will execute.
7363 ScalarEvolution::BackedgeTakenInfo
computeBackedgeTakenCount(const Loop * L,bool AllowPredicates)7364 ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
7365 bool AllowPredicates) {
7366 SmallVector<BasicBlock *, 8> ExitingBlocks;
7367 L->getExitingBlocks(ExitingBlocks);
7368
7369 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7370
7371 SmallVector<EdgeExitInfo, 4> ExitCounts;
7372 bool CouldComputeBECount = true;
7373 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
7374 const SCEV *MustExitMaxBECount = nullptr;
7375 const SCEV *MayExitMaxBECount = nullptr;
7376 bool MustExitMaxOrZero = false;
7377
7378 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7379 // and compute maxBECount.
7380 // Do a union of all the predicates here.
7381 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
7382 BasicBlock *ExitBB = ExitingBlocks[i];
7383
7384 // We canonicalize untaken exits to br (constant), ignore them so that
7385 // proving an exit untaken doesn't negatively impact our ability to reason
7386 // about the loop as whole.
7387 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
7388 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
7389 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7390 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne()))
7391 continue;
7392 }
7393
7394 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
7395
7396 assert((AllowPredicates || EL.Predicates.empty()) &&
7397 "Predicated exit limit when predicates are not allowed!");
7398
7399 // 1. For each exit that can be computed, add an entry to ExitCounts.
7400 // CouldComputeBECount is true only if all exits can be computed.
7401 if (EL.ExactNotTaken == getCouldNotCompute())
7402 // We couldn't compute an exact value for this exit, so
7403 // we won't be able to compute an exact value for the loop.
7404 CouldComputeBECount = false;
7405 else
7406 ExitCounts.emplace_back(ExitBB, EL);
7407
7408 // 2. Derive the loop's MaxBECount from each exit's max number of
7409 // non-exiting iterations. Partition the loop exits into two kinds:
7410 // LoopMustExits and LoopMayExits.
7411 //
7412 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7413 // is a LoopMayExit. If any computable LoopMustExit is found, then
7414 // MaxBECount is the minimum EL.MaxNotTaken of computable
7415 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7416 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7417 // computable EL.MaxNotTaken.
7418 if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
7419 DT.dominates(ExitBB, Latch)) {
7420 if (!MustExitMaxBECount) {
7421 MustExitMaxBECount = EL.MaxNotTaken;
7422 MustExitMaxOrZero = EL.MaxOrZero;
7423 } else {
7424 MustExitMaxBECount =
7425 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
7426 }
7427 } else if (MayExitMaxBECount != getCouldNotCompute()) {
7428 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
7429 MayExitMaxBECount = EL.MaxNotTaken;
7430 else {
7431 MayExitMaxBECount =
7432 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
7433 }
7434 }
7435 }
7436 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
7437 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
7438 // The loop backedge will be taken the maximum or zero times if there's
7439 // a single exit that must be taken the maximum or zero times.
7440 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
7441 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
7442 MaxBECount, MaxOrZero);
7443 }
7444
7445 ScalarEvolution::ExitLimit
computeExitLimit(const Loop * L,BasicBlock * ExitingBlock,bool AllowPredicates)7446 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
7447 bool AllowPredicates) {
7448 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
7449 // If our exiting block does not dominate the latch, then its connection with
7450 // loop's exit limit may be far from trivial.
7451 const BasicBlock *Latch = L->getLoopLatch();
7452 if (!Latch || !DT.dominates(ExitingBlock, Latch))
7453 return getCouldNotCompute();
7454
7455 bool IsOnlyExit = (L->getExitingBlock() != nullptr);
7456 Instruction *Term = ExitingBlock->getTerminator();
7457 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
7458 assert(BI->isConditional() && "If unconditional, it can't be in loop!");
7459 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7460 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
7461 "It should have one successor in loop and one exit block!");
7462 // Proceed to the next level to examine the exit condition expression.
7463 return computeExitLimitFromCond(
7464 L, BI->getCondition(), ExitIfTrue,
7465 /*ControlsExit=*/IsOnlyExit, AllowPredicates);
7466 }
7467
7468 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
7469 // For switch, make sure that there is a single exit from the loop.
7470 BasicBlock *Exit = nullptr;
7471 for (auto *SBB : successors(ExitingBlock))
7472 if (!L->contains(SBB)) {
7473 if (Exit) // Multiple exit successors.
7474 return getCouldNotCompute();
7475 Exit = SBB;
7476 }
7477 assert(Exit && "Exiting block must have at least one exit");
7478 return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
7479 /*ControlsExit=*/IsOnlyExit);
7480 }
7481
7482 return getCouldNotCompute();
7483 }
7484
computeExitLimitFromCond(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7485 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
7486 const Loop *L, Value *ExitCond, bool ExitIfTrue,
7487 bool ControlsExit, bool AllowPredicates) {
7488 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
7489 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
7490 ControlsExit, AllowPredicates);
7491 }
7492
7493 Optional<ScalarEvolution::ExitLimit>
find(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7494 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
7495 bool ExitIfTrue, bool ControlsExit,
7496 bool AllowPredicates) {
7497 (void)this->L;
7498 (void)this->ExitIfTrue;
7499 (void)this->AllowPredicates;
7500
7501 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7502 this->AllowPredicates == AllowPredicates &&
7503 "Variance in assumed invariant key components!");
7504 auto Itr = TripCountMap.find({ExitCond, ControlsExit});
7505 if (Itr == TripCountMap.end())
7506 return None;
7507 return Itr->second;
7508 }
7509
insert(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates,const ExitLimit & EL)7510 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
7511 bool ExitIfTrue,
7512 bool ControlsExit,
7513 bool AllowPredicates,
7514 const ExitLimit &EL) {
7515 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7516 this->AllowPredicates == AllowPredicates &&
7517 "Variance in assumed invariant key components!");
7518
7519 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
7520 assert(InsertResult.second && "Expected successful insertion!");
7521 (void)InsertResult;
7522 (void)ExitIfTrue;
7523 }
7524
computeExitLimitFromCondCached(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7525 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
7526 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7527 bool ControlsExit, bool AllowPredicates) {
7528
7529 if (auto MaybeEL =
7530 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7531 return *MaybeEL;
7532
7533 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
7534 ControlsExit, AllowPredicates);
7535 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
7536 return EL;
7537 }
7538
computeExitLimitFromCondImpl(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7539 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
7540 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7541 bool ControlsExit, bool AllowPredicates) {
7542 // Handle BinOp conditions (And, Or).
7543 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
7544 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7545 return *LimitFromBinOp;
7546
7547 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7548 // Proceed to the next level to examine the icmp.
7549 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
7550 ExitLimit EL =
7551 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
7552 if (EL.hasFullInfo() || !AllowPredicates)
7553 return EL;
7554
7555 // Try again, but use SCEV predicates this time.
7556 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
7557 /*AllowPredicates=*/true);
7558 }
7559
7560 // Check for a constant condition. These are normally stripped out by
7561 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7562 // preserve the CFG and is temporarily leaving constant conditions
7563 // in place.
7564 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
7565 if (ExitIfTrue == !CI->getZExtValue())
7566 // The backedge is always taken.
7567 return getCouldNotCompute();
7568 else
7569 // The backedge is never taken.
7570 return getZero(CI->getType());
7571 }
7572
7573 // If it's not an integer or pointer comparison then compute it the hard way.
7574 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7575 }
7576
7577 Optional<ScalarEvolution::ExitLimit>
computeExitLimitFromCondFromBinOp(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7578 ScalarEvolution::computeExitLimitFromCondFromBinOp(
7579 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7580 bool ControlsExit, bool AllowPredicates) {
7581 // Check if the controlling expression for this loop is an And or Or.
7582 Value *Op0, *Op1;
7583 bool IsAnd = false;
7584 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
7585 IsAnd = true;
7586 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
7587 IsAnd = false;
7588 else
7589 return None;
7590
7591 // EitherMayExit is true in these two cases:
7592 // br (and Op0 Op1), loop, exit
7593 // br (or Op0 Op1), exit, loop
7594 bool EitherMayExit = IsAnd ^ ExitIfTrue;
7595 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue,
7596 ControlsExit && !EitherMayExit,
7597 AllowPredicates);
7598 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue,
7599 ControlsExit && !EitherMayExit,
7600 AllowPredicates);
7601
7602 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement"
7603 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd);
7604 if (isa<ConstantInt>(Op1))
7605 return Op1 == NeutralElement ? EL0 : EL1;
7606 if (isa<ConstantInt>(Op0))
7607 return Op0 == NeutralElement ? EL1 : EL0;
7608
7609 const SCEV *BECount = getCouldNotCompute();
7610 const SCEV *MaxBECount = getCouldNotCompute();
7611 if (EitherMayExit) {
7612 // Both conditions must be same for the loop to continue executing.
7613 // Choose the less conservative count.
7614 // If ExitCond is a short-circuit form (select), using
7615 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general.
7616 // To see the detailed examples, please see
7617 // test/Analysis/ScalarEvolution/exit-count-select.ll
7618 bool PoisonSafe = isa<BinaryOperator>(ExitCond);
7619 if (!PoisonSafe)
7620 // Even if ExitCond is select, we can safely derive BECount using both
7621 // EL0 and EL1 in these cases:
7622 // (1) EL0.ExactNotTaken is non-zero
7623 // (2) EL1.ExactNotTaken is non-poison
7624 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and
7625 // it cannot be umin(0, ..))
7626 // The PoisonSafe assignment below is simplified and the assertion after
7627 // BECount calculation fully guarantees the condition (3).
7628 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) ||
7629 isa<SCEVConstant>(EL1.ExactNotTaken);
7630 if (EL0.ExactNotTaken != getCouldNotCompute() &&
7631 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) {
7632 BECount =
7633 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7634
7635 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form,
7636 // it should have been simplified to zero (see the condition (3) above)
7637 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() ||
7638 BECount->isZero());
7639 }
7640 if (EL0.MaxNotTaken == getCouldNotCompute())
7641 MaxBECount = EL1.MaxNotTaken;
7642 else if (EL1.MaxNotTaken == getCouldNotCompute())
7643 MaxBECount = EL0.MaxNotTaken;
7644 else
7645 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
7646 } else {
7647 // Both conditions must be same at the same time for the loop to exit.
7648 // For now, be conservative.
7649 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
7650 BECount = EL0.ExactNotTaken;
7651 }
7652
7653 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7654 // to be more aggressive when computing BECount than when computing
7655 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7656 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7657 // to not.
7658 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
7659 !isa<SCEVCouldNotCompute>(BECount))
7660 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
7661
7662 return ExitLimit(BECount, MaxBECount, false,
7663 { &EL0.Predicates, &EL1.Predicates });
7664 }
7665
7666 ScalarEvolution::ExitLimit
computeExitLimitFromICmp(const Loop * L,ICmpInst * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7667 ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
7668 ICmpInst *ExitCond,
7669 bool ExitIfTrue,
7670 bool ControlsExit,
7671 bool AllowPredicates) {
7672 // If the condition was exit on true, convert the condition to exit on false
7673 ICmpInst::Predicate Pred;
7674 if (!ExitIfTrue)
7675 Pred = ExitCond->getPredicate();
7676 else
7677 Pred = ExitCond->getInversePredicate();
7678 const ICmpInst::Predicate OriginalPred = Pred;
7679
7680 // Handle common loops like: for (X = "string"; *X; ++X)
7681 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
7682 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
7683 ExitLimit ItCnt =
7684 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
7685 if (ItCnt.hasAnyInfo())
7686 return ItCnt;
7687 }
7688
7689 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
7690 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
7691
7692 // Try to evaluate any dependencies out of the loop.
7693 LHS = getSCEVAtScope(LHS, L);
7694 RHS = getSCEVAtScope(RHS, L);
7695
7696 // At this point, we would like to compute how many iterations of the
7697 // loop the predicate will return true for these inputs.
7698 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
7699 // If there is a loop-invariant, force it into the RHS.
7700 std::swap(LHS, RHS);
7701 Pred = ICmpInst::getSwappedPredicate(Pred);
7702 }
7703
7704 // Simplify the operands before analyzing them.
7705 (void)SimplifyICmpOperands(Pred, LHS, RHS);
7706
7707 // If we have a comparison of a chrec against a constant, try to use value
7708 // ranges to answer this query.
7709 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
7710 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
7711 if (AddRec->getLoop() == L) {
7712 // Form the constant range.
7713 ConstantRange CompRange =
7714 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
7715
7716 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
7717 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
7718 }
7719
7720 switch (Pred) {
7721 case ICmpInst::ICMP_NE: { // while (X != Y)
7722 // Convert to: while (X-Y != 0)
7723 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
7724 AllowPredicates);
7725 if (EL.hasAnyInfo()) return EL;
7726 break;
7727 }
7728 case ICmpInst::ICMP_EQ: { // while (X == Y)
7729 // Convert to: while (X-Y == 0)
7730 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
7731 if (EL.hasAnyInfo()) return EL;
7732 break;
7733 }
7734 case ICmpInst::ICMP_SLT:
7735 case ICmpInst::ICMP_ULT: { // while (X < Y)
7736 bool IsSigned = Pred == ICmpInst::ICMP_SLT;
7737 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
7738 AllowPredicates);
7739 if (EL.hasAnyInfo()) return EL;
7740 break;
7741 }
7742 case ICmpInst::ICMP_SGT:
7743 case ICmpInst::ICMP_UGT: { // while (X > Y)
7744 bool IsSigned = Pred == ICmpInst::ICMP_SGT;
7745 ExitLimit EL =
7746 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
7747 AllowPredicates);
7748 if (EL.hasAnyInfo()) return EL;
7749 break;
7750 }
7751 default:
7752 break;
7753 }
7754
7755 auto *ExhaustiveCount =
7756 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7757
7758 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
7759 return ExhaustiveCount;
7760
7761 return computeShiftCompareExitLimit(ExitCond->getOperand(0),
7762 ExitCond->getOperand(1), L, OriginalPred);
7763 }
7764
7765 ScalarEvolution::ExitLimit
computeExitLimitFromSingleExitSwitch(const Loop * L,SwitchInst * Switch,BasicBlock * ExitingBlock,bool ControlsExit)7766 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
7767 SwitchInst *Switch,
7768 BasicBlock *ExitingBlock,
7769 bool ControlsExit) {
7770 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
7771
7772 // Give up if the exit is the default dest of a switch.
7773 if (Switch->getDefaultDest() == ExitingBlock)
7774 return getCouldNotCompute();
7775
7776 assert(L->contains(Switch->getDefaultDest()) &&
7777 "Default case must not exit the loop!");
7778 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
7779 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
7780
7781 // while (X != Y) --> while (X-Y != 0)
7782 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
7783 if (EL.hasAnyInfo())
7784 return EL;
7785
7786 return getCouldNotCompute();
7787 }
7788
7789 static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr * AddRec,ConstantInt * C,ScalarEvolution & SE)7790 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
7791 ScalarEvolution &SE) {
7792 const SCEV *InVal = SE.getConstant(C);
7793 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
7794 assert(isa<SCEVConstant>(Val) &&
7795 "Evaluation of SCEV at constant didn't fold correctly?");
7796 return cast<SCEVConstant>(Val)->getValue();
7797 }
7798
7799 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
7800 /// compute the backedge execution count.
7801 ScalarEvolution::ExitLimit
computeLoadConstantCompareExitLimit(LoadInst * LI,Constant * RHS,const Loop * L,ICmpInst::Predicate predicate)7802 ScalarEvolution::computeLoadConstantCompareExitLimit(
7803 LoadInst *LI,
7804 Constant *RHS,
7805 const Loop *L,
7806 ICmpInst::Predicate predicate) {
7807 if (LI->isVolatile()) return getCouldNotCompute();
7808
7809 // Check to see if the loaded pointer is a getelementptr of a global.
7810 // TODO: Use SCEV instead of manually grubbing with GEPs.
7811 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
7812 if (!GEP) return getCouldNotCompute();
7813
7814 // Make sure that it is really a constant global we are gepping, with an
7815 // initializer, and make sure the first IDX is really 0.
7816 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
7817 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
7818 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
7819 !cast<Constant>(GEP->getOperand(1))->isNullValue())
7820 return getCouldNotCompute();
7821
7822 // Okay, we allow one non-constant index into the GEP instruction.
7823 Value *VarIdx = nullptr;
7824 std::vector<Constant*> Indexes;
7825 unsigned VarIdxNum = 0;
7826 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
7827 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
7828 Indexes.push_back(CI);
7829 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
7830 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
7831 VarIdx = GEP->getOperand(i);
7832 VarIdxNum = i-2;
7833 Indexes.push_back(nullptr);
7834 }
7835
7836 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
7837 if (!VarIdx)
7838 return getCouldNotCompute();
7839
7840 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
7841 // Check to see if X is a loop variant variable value now.
7842 const SCEV *Idx = getSCEV(VarIdx);
7843 Idx = getSCEVAtScope(Idx, L);
7844
7845 // We can only recognize very limited forms of loop index expressions, in
7846 // particular, only affine AddRec's like {C1,+,C2}.
7847 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
7848 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
7849 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
7850 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
7851 return getCouldNotCompute();
7852
7853 unsigned MaxSteps = MaxBruteForceIterations;
7854 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
7855 ConstantInt *ItCst = ConstantInt::get(
7856 cast<IntegerType>(IdxExpr->getType()), IterationNum);
7857 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
7858
7859 // Form the GEP offset.
7860 Indexes[VarIdxNum] = Val;
7861
7862 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
7863 Indexes);
7864 if (!Result) break; // Cannot compute!
7865
7866 // Evaluate the condition for this iteration.
7867 Result = ConstantExpr::getICmp(predicate, Result, RHS);
7868 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
7869 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
7870 ++NumArrayLenItCounts;
7871 return getConstant(ItCst); // Found terminating iteration!
7872 }
7873 }
7874 return getCouldNotCompute();
7875 }
7876
computeShiftCompareExitLimit(Value * LHS,Value * RHSV,const Loop * L,ICmpInst::Predicate Pred)7877 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
7878 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
7879 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
7880 if (!RHS)
7881 return getCouldNotCompute();
7882
7883 const BasicBlock *Latch = L->getLoopLatch();
7884 if (!Latch)
7885 return getCouldNotCompute();
7886
7887 const BasicBlock *Predecessor = L->getLoopPredecessor();
7888 if (!Predecessor)
7889 return getCouldNotCompute();
7890
7891 // Return true if V is of the form "LHS `shift_op` <positive constant>".
7892 // Return LHS in OutLHS and shift_opt in OutOpCode.
7893 auto MatchPositiveShift =
7894 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
7895
7896 using namespace PatternMatch;
7897
7898 ConstantInt *ShiftAmt;
7899 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
7900 OutOpCode = Instruction::LShr;
7901 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
7902 OutOpCode = Instruction::AShr;
7903 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
7904 OutOpCode = Instruction::Shl;
7905 else
7906 return false;
7907
7908 return ShiftAmt->getValue().isStrictlyPositive();
7909 };
7910
7911 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
7912 //
7913 // loop:
7914 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
7915 // %iv.shifted = lshr i32 %iv, <positive constant>
7916 //
7917 // Return true on a successful match. Return the corresponding PHI node (%iv
7918 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
7919 auto MatchShiftRecurrence =
7920 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
7921 Optional<Instruction::BinaryOps> PostShiftOpCode;
7922
7923 {
7924 Instruction::BinaryOps OpC;
7925 Value *V;
7926
7927 // If we encounter a shift instruction, "peel off" the shift operation,
7928 // and remember that we did so. Later when we inspect %iv's backedge
7929 // value, we will make sure that the backedge value uses the same
7930 // operation.
7931 //
7932 // Note: the peeled shift operation does not have to be the same
7933 // instruction as the one feeding into the PHI's backedge value. We only
7934 // really care about it being the same *kind* of shift instruction --
7935 // that's all that is required for our later inferences to hold.
7936 if (MatchPositiveShift(LHS, V, OpC)) {
7937 PostShiftOpCode = OpC;
7938 LHS = V;
7939 }
7940 }
7941
7942 PNOut = dyn_cast<PHINode>(LHS);
7943 if (!PNOut || PNOut->getParent() != L->getHeader())
7944 return false;
7945
7946 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
7947 Value *OpLHS;
7948
7949 return
7950 // The backedge value for the PHI node must be a shift by a positive
7951 // amount
7952 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
7953
7954 // of the PHI node itself
7955 OpLHS == PNOut &&
7956
7957 // and the kind of shift should be match the kind of shift we peeled
7958 // off, if any.
7959 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
7960 };
7961
7962 PHINode *PN;
7963 Instruction::BinaryOps OpCode;
7964 if (!MatchShiftRecurrence(LHS, PN, OpCode))
7965 return getCouldNotCompute();
7966
7967 const DataLayout &DL = getDataLayout();
7968
7969 // The key rationale for this optimization is that for some kinds of shift
7970 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
7971 // within a finite number of iterations. If the condition guarding the
7972 // backedge (in the sense that the backedge is taken if the condition is true)
7973 // is false for the value the shift recurrence stabilizes to, then we know
7974 // that the backedge is taken only a finite number of times.
7975
7976 ConstantInt *StableValue = nullptr;
7977 switch (OpCode) {
7978 default:
7979 llvm_unreachable("Impossible case!");
7980
7981 case Instruction::AShr: {
7982 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
7983 // bitwidth(K) iterations.
7984 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
7985 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr,
7986 Predecessor->getTerminator(), &DT);
7987 auto *Ty = cast<IntegerType>(RHS->getType());
7988 if (Known.isNonNegative())
7989 StableValue = ConstantInt::get(Ty, 0);
7990 else if (Known.isNegative())
7991 StableValue = ConstantInt::get(Ty, -1, true);
7992 else
7993 return getCouldNotCompute();
7994
7995 break;
7996 }
7997 case Instruction::LShr:
7998 case Instruction::Shl:
7999 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
8000 // stabilize to 0 in at most bitwidth(K) iterations.
8001 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
8002 break;
8003 }
8004
8005 auto *Result =
8006 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
8007 assert(Result->getType()->isIntegerTy(1) &&
8008 "Otherwise cannot be an operand to a branch instruction");
8009
8010 if (Result->isZeroValue()) {
8011 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
8012 const SCEV *UpperBound =
8013 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
8014 return ExitLimit(getCouldNotCompute(), UpperBound, false);
8015 }
8016
8017 return getCouldNotCompute();
8018 }
8019
8020 /// Return true if we can constant fold an instruction of the specified type,
8021 /// assuming that all operands were constants.
CanConstantFold(const Instruction * I)8022 static bool CanConstantFold(const Instruction *I) {
8023 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
8024 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
8025 isa<LoadInst>(I) || isa<ExtractValueInst>(I))
8026 return true;
8027
8028 if (const CallInst *CI = dyn_cast<CallInst>(I))
8029 if (const Function *F = CI->getCalledFunction())
8030 return canConstantFoldCallTo(CI, F);
8031 return false;
8032 }
8033
8034 /// Determine whether this instruction can constant evolve within this loop
8035 /// assuming its operands can all constant evolve.
canConstantEvolve(Instruction * I,const Loop * L)8036 static bool canConstantEvolve(Instruction *I, const Loop *L) {
8037 // An instruction outside of the loop can't be derived from a loop PHI.
8038 if (!L->contains(I)) return false;
8039
8040 if (isa<PHINode>(I)) {
8041 // We don't currently keep track of the control flow needed to evaluate
8042 // PHIs, so we cannot handle PHIs inside of loops.
8043 return L->getHeader() == I->getParent();
8044 }
8045
8046 // If we won't be able to constant fold this expression even if the operands
8047 // are constants, bail early.
8048 return CanConstantFold(I);
8049 }
8050
8051 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
8052 /// recursing through each instruction operand until reaching a loop header phi.
8053 static PHINode *
getConstantEvolvingPHIOperands(Instruction * UseInst,const Loop * L,DenseMap<Instruction *,PHINode * > & PHIMap,unsigned Depth)8054 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
8055 DenseMap<Instruction *, PHINode *> &PHIMap,
8056 unsigned Depth) {
8057 if (Depth > MaxConstantEvolvingDepth)
8058 return nullptr;
8059
8060 // Otherwise, we can evaluate this instruction if all of its operands are
8061 // constant or derived from a PHI node themselves.
8062 PHINode *PHI = nullptr;
8063 for (Value *Op : UseInst->operands()) {
8064 if (isa<Constant>(Op)) continue;
8065
8066 Instruction *OpInst = dyn_cast<Instruction>(Op);
8067 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
8068
8069 PHINode *P = dyn_cast<PHINode>(OpInst);
8070 if (!P)
8071 // If this operand is already visited, reuse the prior result.
8072 // We may have P != PHI if this is the deepest point at which the
8073 // inconsistent paths meet.
8074 P = PHIMap.lookup(OpInst);
8075 if (!P) {
8076 // Recurse and memoize the results, whether a phi is found or not.
8077 // This recursive call invalidates pointers into PHIMap.
8078 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
8079 PHIMap[OpInst] = P;
8080 }
8081 if (!P)
8082 return nullptr; // Not evolving from PHI
8083 if (PHI && PHI != P)
8084 return nullptr; // Evolving from multiple different PHIs.
8085 PHI = P;
8086 }
8087 // This is a expression evolving from a constant PHI!
8088 return PHI;
8089 }
8090
8091 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
8092 /// in the loop that V is derived from. We allow arbitrary operations along the
8093 /// way, but the operands of an operation must either be constants or a value
8094 /// derived from a constant PHI. If this expression does not fit with these
8095 /// constraints, return null.
getConstantEvolvingPHI(Value * V,const Loop * L)8096 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
8097 Instruction *I = dyn_cast<Instruction>(V);
8098 if (!I || !canConstantEvolve(I, L)) return nullptr;
8099
8100 if (PHINode *PN = dyn_cast<PHINode>(I))
8101 return PN;
8102
8103 // Record non-constant instructions contained by the loop.
8104 DenseMap<Instruction *, PHINode *> PHIMap;
8105 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
8106 }
8107
8108 /// EvaluateExpression - Given an expression that passes the
8109 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
8110 /// in the loop has the value PHIVal. If we can't fold this expression for some
8111 /// reason, return null.
EvaluateExpression(Value * V,const Loop * L,DenseMap<Instruction *,Constant * > & Vals,const DataLayout & DL,const TargetLibraryInfo * TLI)8112 static Constant *EvaluateExpression(Value *V, const Loop *L,
8113 DenseMap<Instruction *, Constant *> &Vals,
8114 const DataLayout &DL,
8115 const TargetLibraryInfo *TLI) {
8116 // Convenient constant check, but redundant for recursive calls.
8117 if (Constant *C = dyn_cast<Constant>(V)) return C;
8118 Instruction *I = dyn_cast<Instruction>(V);
8119 if (!I) return nullptr;
8120
8121 if (Constant *C = Vals.lookup(I)) return C;
8122
8123 // An instruction inside the loop depends on a value outside the loop that we
8124 // weren't given a mapping for, or a value such as a call inside the loop.
8125 if (!canConstantEvolve(I, L)) return nullptr;
8126
8127 // An unmapped PHI can be due to a branch or another loop inside this loop,
8128 // or due to this not being the initial iteration through a loop where we
8129 // couldn't compute the evolution of this particular PHI last time.
8130 if (isa<PHINode>(I)) return nullptr;
8131
8132 std::vector<Constant*> Operands(I->getNumOperands());
8133
8134 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
8135 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
8136 if (!Operand) {
8137 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
8138 if (!Operands[i]) return nullptr;
8139 continue;
8140 }
8141 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
8142 Vals[Operand] = C;
8143 if (!C) return nullptr;
8144 Operands[i] = C;
8145 }
8146
8147 if (CmpInst *CI = dyn_cast<CmpInst>(I))
8148 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8149 Operands[1], DL, TLI);
8150 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8151 if (!LI->isVolatile())
8152 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
8153 }
8154 return ConstantFoldInstOperands(I, Operands, DL, TLI);
8155 }
8156
8157
8158 // If every incoming value to PN except the one for BB is a specific Constant,
8159 // return that, else return nullptr.
getOtherIncomingValue(PHINode * PN,BasicBlock * BB)8160 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
8161 Constant *IncomingVal = nullptr;
8162
8163 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
8164 if (PN->getIncomingBlock(i) == BB)
8165 continue;
8166
8167 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
8168 if (!CurrentVal)
8169 return nullptr;
8170
8171 if (IncomingVal != CurrentVal) {
8172 if (IncomingVal)
8173 return nullptr;
8174 IncomingVal = CurrentVal;
8175 }
8176 }
8177
8178 return IncomingVal;
8179 }
8180
8181 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
8182 /// in the header of its containing loop, we know the loop executes a
8183 /// constant number of times, and the PHI node is just a recurrence
8184 /// involving constants, fold it.
8185 Constant *
getConstantEvolutionLoopExitValue(PHINode * PN,const APInt & BEs,const Loop * L)8186 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
8187 const APInt &BEs,
8188 const Loop *L) {
8189 auto I = ConstantEvolutionLoopExitValue.find(PN);
8190 if (I != ConstantEvolutionLoopExitValue.end())
8191 return I->second;
8192
8193 if (BEs.ugt(MaxBruteForceIterations))
8194 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
8195
8196 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
8197
8198 DenseMap<Instruction *, Constant *> CurrentIterVals;
8199 BasicBlock *Header = L->getHeader();
8200 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8201
8202 BasicBlock *Latch = L->getLoopLatch();
8203 if (!Latch)
8204 return nullptr;
8205
8206 for (PHINode &PHI : Header->phis()) {
8207 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8208 CurrentIterVals[&PHI] = StartCST;
8209 }
8210 if (!CurrentIterVals.count(PN))
8211 return RetVal = nullptr;
8212
8213 Value *BEValue = PN->getIncomingValueForBlock(Latch);
8214
8215 // Execute the loop symbolically to determine the exit value.
8216 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
8217 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
8218
8219 unsigned NumIterations = BEs.getZExtValue(); // must be in range
8220 unsigned IterationNum = 0;
8221 const DataLayout &DL = getDataLayout();
8222 for (; ; ++IterationNum) {
8223 if (IterationNum == NumIterations)
8224 return RetVal = CurrentIterVals[PN]; // Got exit value!
8225
8226 // Compute the value of the PHIs for the next iteration.
8227 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
8228 DenseMap<Instruction *, Constant *> NextIterVals;
8229 Constant *NextPHI =
8230 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8231 if (!NextPHI)
8232 return nullptr; // Couldn't evaluate!
8233 NextIterVals[PN] = NextPHI;
8234
8235 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
8236
8237 // Also evaluate the other PHI nodes. However, we don't get to stop if we
8238 // cease to be able to evaluate one of them or if they stop evolving,
8239 // because that doesn't necessarily prevent us from computing PN.
8240 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
8241 for (const auto &I : CurrentIterVals) {
8242 PHINode *PHI = dyn_cast<PHINode>(I.first);
8243 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
8244 PHIsToCompute.emplace_back(PHI, I.second);
8245 }
8246 // We use two distinct loops because EvaluateExpression may invalidate any
8247 // iterators into CurrentIterVals.
8248 for (const auto &I : PHIsToCompute) {
8249 PHINode *PHI = I.first;
8250 Constant *&NextPHI = NextIterVals[PHI];
8251 if (!NextPHI) { // Not already computed.
8252 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8253 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8254 }
8255 if (NextPHI != I.second)
8256 StoppedEvolving = false;
8257 }
8258
8259 // If all entries in CurrentIterVals == NextIterVals then we can stop
8260 // iterating, the loop can't continue to change.
8261 if (StoppedEvolving)
8262 return RetVal = CurrentIterVals[PN];
8263
8264 CurrentIterVals.swap(NextIterVals);
8265 }
8266 }
8267
computeExitCountExhaustively(const Loop * L,Value * Cond,bool ExitWhen)8268 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
8269 Value *Cond,
8270 bool ExitWhen) {
8271 PHINode *PN = getConstantEvolvingPHI(Cond, L);
8272 if (!PN) return getCouldNotCompute();
8273
8274 // If the loop is canonicalized, the PHI will have exactly two entries.
8275 // That's the only form we support here.
8276 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
8277
8278 DenseMap<Instruction *, Constant *> CurrentIterVals;
8279 BasicBlock *Header = L->getHeader();
8280 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8281
8282 BasicBlock *Latch = L->getLoopLatch();
8283 assert(Latch && "Should follow from NumIncomingValues == 2!");
8284
8285 for (PHINode &PHI : Header->phis()) {
8286 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8287 CurrentIterVals[&PHI] = StartCST;
8288 }
8289 if (!CurrentIterVals.count(PN))
8290 return getCouldNotCompute();
8291
8292 // Okay, we find a PHI node that defines the trip count of this loop. Execute
8293 // the loop symbolically to determine when the condition gets a value of
8294 // "ExitWhen".
8295 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
8296 const DataLayout &DL = getDataLayout();
8297 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
8298 auto *CondVal = dyn_cast_or_null<ConstantInt>(
8299 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
8300
8301 // Couldn't symbolically evaluate.
8302 if (!CondVal) return getCouldNotCompute();
8303
8304 if (CondVal->getValue() == uint64_t(ExitWhen)) {
8305 ++NumBruteForceTripCountsComputed;
8306 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
8307 }
8308
8309 // Update all the PHI nodes for the next iteration.
8310 DenseMap<Instruction *, Constant *> NextIterVals;
8311
8312 // Create a list of which PHIs we need to compute. We want to do this before
8313 // calling EvaluateExpression on them because that may invalidate iterators
8314 // into CurrentIterVals.
8315 SmallVector<PHINode *, 8> PHIsToCompute;
8316 for (const auto &I : CurrentIterVals) {
8317 PHINode *PHI = dyn_cast<PHINode>(I.first);
8318 if (!PHI || PHI->getParent() != Header) continue;
8319 PHIsToCompute.push_back(PHI);
8320 }
8321 for (PHINode *PHI : PHIsToCompute) {
8322 Constant *&NextPHI = NextIterVals[PHI];
8323 if (NextPHI) continue; // Already computed!
8324
8325 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8326 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8327 }
8328 CurrentIterVals.swap(NextIterVals);
8329 }
8330
8331 // Too many iterations were needed to evaluate.
8332 return getCouldNotCompute();
8333 }
8334
getSCEVAtScope(const SCEV * V,const Loop * L)8335 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
8336 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
8337 ValuesAtScopes[V];
8338 // Check to see if we've folded this expression at this loop before.
8339 for (auto &LS : Values)
8340 if (LS.first == L)
8341 return LS.second ? LS.second : V;
8342
8343 Values.emplace_back(L, nullptr);
8344
8345 // Otherwise compute it.
8346 const SCEV *C = computeSCEVAtScope(V, L);
8347 for (auto &LS : reverse(ValuesAtScopes[V]))
8348 if (LS.first == L) {
8349 LS.second = C;
8350 break;
8351 }
8352 return C;
8353 }
8354
8355 /// This builds up a Constant using the ConstantExpr interface. That way, we
8356 /// will return Constants for objects which aren't represented by a
8357 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8358 /// Returns NULL if the SCEV isn't representable as a Constant.
BuildConstantFromSCEV(const SCEV * V)8359 static Constant *BuildConstantFromSCEV(const SCEV *V) {
8360 switch (V->getSCEVType()) {
8361 case scCouldNotCompute:
8362 case scAddRecExpr:
8363 return nullptr;
8364 case scConstant:
8365 return cast<SCEVConstant>(V)->getValue();
8366 case scUnknown:
8367 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
8368 case scSignExtend: {
8369 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
8370 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
8371 return ConstantExpr::getSExt(CastOp, SS->getType());
8372 return nullptr;
8373 }
8374 case scZeroExtend: {
8375 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
8376 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
8377 return ConstantExpr::getZExt(CastOp, SZ->getType());
8378 return nullptr;
8379 }
8380 case scPtrToInt: {
8381 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V);
8382 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
8383 return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
8384
8385 return nullptr;
8386 }
8387 case scTruncate: {
8388 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
8389 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
8390 return ConstantExpr::getTrunc(CastOp, ST->getType());
8391 return nullptr;
8392 }
8393 case scAddExpr: {
8394 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
8395 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
8396 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8397 unsigned AS = PTy->getAddressSpace();
8398 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8399 C = ConstantExpr::getBitCast(C, DestPtrTy);
8400 }
8401 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
8402 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
8403 if (!C2)
8404 return nullptr;
8405
8406 // First pointer!
8407 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
8408 unsigned AS = C2->getType()->getPointerAddressSpace();
8409 std::swap(C, C2);
8410 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8411 // The offsets have been converted to bytes. We can add bytes to an
8412 // i8* by GEP with the byte count in the first index.
8413 C = ConstantExpr::getBitCast(C, DestPtrTy);
8414 }
8415
8416 // Don't bother trying to sum two pointers. We probably can't
8417 // statically compute a load that results from it anyway.
8418 if (C2->getType()->isPointerTy())
8419 return nullptr;
8420
8421 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8422 if (PTy->getElementType()->isStructTy())
8423 C2 = ConstantExpr::getIntegerCast(
8424 C2, Type::getInt32Ty(C->getContext()), true);
8425 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
8426 } else
8427 C = ConstantExpr::getAdd(C, C2);
8428 }
8429 return C;
8430 }
8431 return nullptr;
8432 }
8433 case scMulExpr: {
8434 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
8435 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
8436 // Don't bother with pointers at all.
8437 if (C->getType()->isPointerTy())
8438 return nullptr;
8439 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
8440 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
8441 if (!C2 || C2->getType()->isPointerTy())
8442 return nullptr;
8443 C = ConstantExpr::getMul(C, C2);
8444 }
8445 return C;
8446 }
8447 return nullptr;
8448 }
8449 case scUDivExpr: {
8450 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
8451 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
8452 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
8453 if (LHS->getType() == RHS->getType())
8454 return ConstantExpr::getUDiv(LHS, RHS);
8455 return nullptr;
8456 }
8457 case scSMaxExpr:
8458 case scUMaxExpr:
8459 case scSMinExpr:
8460 case scUMinExpr:
8461 return nullptr; // TODO: smax, umax, smin, umax.
8462 }
8463 llvm_unreachable("Unknown SCEV kind!");
8464 }
8465
computeSCEVAtScope(const SCEV * V,const Loop * L)8466 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
8467 if (isa<SCEVConstant>(V)) return V;
8468
8469 // If this instruction is evolved from a constant-evolving PHI, compute the
8470 // exit value from the loop without using SCEVs.
8471 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
8472 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
8473 if (PHINode *PN = dyn_cast<PHINode>(I)) {
8474 const Loop *CurrLoop = this->LI[I->getParent()];
8475 // Looking for loop exit value.
8476 if (CurrLoop && CurrLoop->getParentLoop() == L &&
8477 PN->getParent() == CurrLoop->getHeader()) {
8478 // Okay, there is no closed form solution for the PHI node. Check
8479 // to see if the loop that contains it has a known backedge-taken
8480 // count. If so, we may be able to force computation of the exit
8481 // value.
8482 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
8483 // This trivial case can show up in some degenerate cases where
8484 // the incoming IR has not yet been fully simplified.
8485 if (BackedgeTakenCount->isZero()) {
8486 Value *InitValue = nullptr;
8487 bool MultipleInitValues = false;
8488 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
8489 if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
8490 if (!InitValue)
8491 InitValue = PN->getIncomingValue(i);
8492 else if (InitValue != PN->getIncomingValue(i)) {
8493 MultipleInitValues = true;
8494 break;
8495 }
8496 }
8497 }
8498 if (!MultipleInitValues && InitValue)
8499 return getSCEV(InitValue);
8500 }
8501 // Do we have a loop invariant value flowing around the backedge
8502 // for a loop which must execute the backedge?
8503 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
8504 isKnownPositive(BackedgeTakenCount) &&
8505 PN->getNumIncomingValues() == 2) {
8506
8507 unsigned InLoopPred =
8508 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
8509 Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
8510 if (CurrLoop->isLoopInvariant(BackedgeVal))
8511 return getSCEV(BackedgeVal);
8512 }
8513 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
8514 // Okay, we know how many times the containing loop executes. If
8515 // this is a constant evolving PHI node, get the final value at
8516 // the specified iteration number.
8517 Constant *RV = getConstantEvolutionLoopExitValue(
8518 PN, BTCC->getAPInt(), CurrLoop);
8519 if (RV) return getSCEV(RV);
8520 }
8521 }
8522
8523 // If there is a single-input Phi, evaluate it at our scope. If we can
8524 // prove that this replacement does not break LCSSA form, use new value.
8525 if (PN->getNumOperands() == 1) {
8526 const SCEV *Input = getSCEV(PN->getOperand(0));
8527 const SCEV *InputAtScope = getSCEVAtScope(Input, L);
8528 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
8529 // for the simplest case just support constants.
8530 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
8531 }
8532 }
8533
8534 // Okay, this is an expression that we cannot symbolically evaluate
8535 // into a SCEV. Check to see if it's possible to symbolically evaluate
8536 // the arguments into constants, and if so, try to constant propagate the
8537 // result. This is particularly useful for computing loop exit values.
8538 if (CanConstantFold(I)) {
8539 SmallVector<Constant *, 4> Operands;
8540 bool MadeImprovement = false;
8541 for (Value *Op : I->operands()) {
8542 if (Constant *C = dyn_cast<Constant>(Op)) {
8543 Operands.push_back(C);
8544 continue;
8545 }
8546
8547 // If any of the operands is non-constant and if they are
8548 // non-integer and non-pointer, don't even try to analyze them
8549 // with scev techniques.
8550 if (!isSCEVable(Op->getType()))
8551 return V;
8552
8553 const SCEV *OrigV = getSCEV(Op);
8554 const SCEV *OpV = getSCEVAtScope(OrigV, L);
8555 MadeImprovement |= OrigV != OpV;
8556
8557 Constant *C = BuildConstantFromSCEV(OpV);
8558 if (!C) return V;
8559 if (C->getType() != Op->getType())
8560 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
8561 Op->getType(),
8562 false),
8563 C, Op->getType());
8564 Operands.push_back(C);
8565 }
8566
8567 // Check to see if getSCEVAtScope actually made an improvement.
8568 if (MadeImprovement) {
8569 Constant *C = nullptr;
8570 const DataLayout &DL = getDataLayout();
8571 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
8572 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8573 Operands[1], DL, &TLI);
8574 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) {
8575 if (!Load->isVolatile())
8576 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(),
8577 DL);
8578 } else
8579 C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
8580 if (!C) return V;
8581 return getSCEV(C);
8582 }
8583 }
8584 }
8585
8586 // This is some other type of SCEVUnknown, just return it.
8587 return V;
8588 }
8589
8590 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
8591 // Avoid performing the look-up in the common case where the specified
8592 // expression has no loop-variant portions.
8593 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
8594 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8595 if (OpAtScope != Comm->getOperand(i)) {
8596 // Okay, at least one of these operands is loop variant but might be
8597 // foldable. Build a new instance of the folded commutative expression.
8598 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
8599 Comm->op_begin()+i);
8600 NewOps.push_back(OpAtScope);
8601
8602 for (++i; i != e; ++i) {
8603 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8604 NewOps.push_back(OpAtScope);
8605 }
8606 if (isa<SCEVAddExpr>(Comm))
8607 return getAddExpr(NewOps, Comm->getNoWrapFlags());
8608 if (isa<SCEVMulExpr>(Comm))
8609 return getMulExpr(NewOps, Comm->getNoWrapFlags());
8610 if (isa<SCEVMinMaxExpr>(Comm))
8611 return getMinMaxExpr(Comm->getSCEVType(), NewOps);
8612 llvm_unreachable("Unknown commutative SCEV type!");
8613 }
8614 }
8615 // If we got here, all operands are loop invariant.
8616 return Comm;
8617 }
8618
8619 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
8620 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
8621 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
8622 if (LHS == Div->getLHS() && RHS == Div->getRHS())
8623 return Div; // must be loop invariant
8624 return getUDivExpr(LHS, RHS);
8625 }
8626
8627 // If this is a loop recurrence for a loop that does not contain L, then we
8628 // are dealing with the final value computed by the loop.
8629 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
8630 // First, attempt to evaluate each operand.
8631 // Avoid performing the look-up in the common case where the specified
8632 // expression has no loop-variant portions.
8633 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
8634 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
8635 if (OpAtScope == AddRec->getOperand(i))
8636 continue;
8637
8638 // Okay, at least one of these operands is loop variant but might be
8639 // foldable. Build a new instance of the folded commutative expression.
8640 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
8641 AddRec->op_begin()+i);
8642 NewOps.push_back(OpAtScope);
8643 for (++i; i != e; ++i)
8644 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
8645
8646 const SCEV *FoldedRec =
8647 getAddRecExpr(NewOps, AddRec->getLoop(),
8648 AddRec->getNoWrapFlags(SCEV::FlagNW));
8649 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
8650 // The addrec may be folded to a nonrecurrence, for example, if the
8651 // induction variable is multiplied by zero after constant folding. Go
8652 // ahead and return the folded value.
8653 if (!AddRec)
8654 return FoldedRec;
8655 break;
8656 }
8657
8658 // If the scope is outside the addrec's loop, evaluate it by using the
8659 // loop exit value of the addrec.
8660 if (!AddRec->getLoop()->contains(L)) {
8661 // To evaluate this recurrence, we need to know how many times the AddRec
8662 // loop iterates. Compute this now.
8663 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
8664 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
8665
8666 // Then, evaluate the AddRec.
8667 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
8668 }
8669
8670 return AddRec;
8671 }
8672
8673 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
8674 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8675 if (Op == Cast->getOperand())
8676 return Cast; // must be loop invariant
8677 return getZeroExtendExpr(Op, Cast->getType());
8678 }
8679
8680 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
8681 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8682 if (Op == Cast->getOperand())
8683 return Cast; // must be loop invariant
8684 return getSignExtendExpr(Op, Cast->getType());
8685 }
8686
8687 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
8688 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8689 if (Op == Cast->getOperand())
8690 return Cast; // must be loop invariant
8691 return getTruncateExpr(Op, Cast->getType());
8692 }
8693
8694 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) {
8695 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8696 if (Op == Cast->getOperand())
8697 return Cast; // must be loop invariant
8698 return getPtrToIntExpr(Op, Cast->getType());
8699 }
8700
8701 llvm_unreachable("Unknown SCEV type!");
8702 }
8703
getSCEVAtScope(Value * V,const Loop * L)8704 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
8705 return getSCEVAtScope(getSCEV(V), L);
8706 }
8707
stripInjectiveFunctions(const SCEV * S) const8708 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
8709 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
8710 return stripInjectiveFunctions(ZExt->getOperand());
8711 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
8712 return stripInjectiveFunctions(SExt->getOperand());
8713 return S;
8714 }
8715
8716 /// Finds the minimum unsigned root of the following equation:
8717 ///
8718 /// A * X = B (mod N)
8719 ///
8720 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
8721 /// A and B isn't important.
8722 ///
8723 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
SolveLinEquationWithOverflow(const APInt & A,const SCEV * B,ScalarEvolution & SE)8724 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
8725 ScalarEvolution &SE) {
8726 uint32_t BW = A.getBitWidth();
8727 assert(BW == SE.getTypeSizeInBits(B->getType()));
8728 assert(A != 0 && "A must be non-zero.");
8729
8730 // 1. D = gcd(A, N)
8731 //
8732 // The gcd of A and N may have only one prime factor: 2. The number of
8733 // trailing zeros in A is its multiplicity
8734 uint32_t Mult2 = A.countTrailingZeros();
8735 // D = 2^Mult2
8736
8737 // 2. Check if B is divisible by D.
8738 //
8739 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
8740 // is not less than multiplicity of this prime factor for D.
8741 if (SE.GetMinTrailingZeros(B) < Mult2)
8742 return SE.getCouldNotCompute();
8743
8744 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
8745 // modulo (N / D).
8746 //
8747 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
8748 // (N / D) in general. The inverse itself always fits into BW bits, though,
8749 // so we immediately truncate it.
8750 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
8751 APInt Mod(BW + 1, 0);
8752 Mod.setBit(BW - Mult2); // Mod = N / D
8753 APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
8754
8755 // 4. Compute the minimum unsigned root of the equation:
8756 // I * (B / D) mod (N / D)
8757 // To simplify the computation, we factor out the divide by D:
8758 // (I * B mod N) / D
8759 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
8760 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
8761 }
8762
8763 /// For a given quadratic addrec, generate coefficients of the corresponding
8764 /// quadratic equation, multiplied by a common value to ensure that they are
8765 /// integers.
8766 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
8767 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
8768 /// were multiplied by, and BitWidth is the bit width of the original addrec
8769 /// coefficients.
8770 /// This function returns None if the addrec coefficients are not compile-
8771 /// time constants.
8772 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
GetQuadraticEquation(const SCEVAddRecExpr * AddRec)8773 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
8774 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
8775 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
8776 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
8777 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
8778 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
8779 << *AddRec << '\n');
8780
8781 // We currently can only solve this if the coefficients are constants.
8782 if (!LC || !MC || !NC) {
8783 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
8784 return None;
8785 }
8786
8787 APInt L = LC->getAPInt();
8788 APInt M = MC->getAPInt();
8789 APInt N = NC->getAPInt();
8790 assert(!N.isNullValue() && "This is not a quadratic addrec");
8791
8792 unsigned BitWidth = LC->getAPInt().getBitWidth();
8793 unsigned NewWidth = BitWidth + 1;
8794 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
8795 << BitWidth << '\n');
8796 // The sign-extension (as opposed to a zero-extension) here matches the
8797 // extension used in SolveQuadraticEquationWrap (with the same motivation).
8798 N = N.sext(NewWidth);
8799 M = M.sext(NewWidth);
8800 L = L.sext(NewWidth);
8801
8802 // The increments are M, M+N, M+2N, ..., so the accumulated values are
8803 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
8804 // L+M, L+2M+N, L+3M+3N, ...
8805 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
8806 //
8807 // The equation Acc = 0 is then
8808 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
8809 // In a quadratic form it becomes:
8810 // N n^2 + (2M-N) n + 2L = 0.
8811
8812 APInt A = N;
8813 APInt B = 2 * M - A;
8814 APInt C = 2 * L;
8815 APInt T = APInt(NewWidth, 2);
8816 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
8817 << "x + " << C << ", coeff bw: " << NewWidth
8818 << ", multiplied by " << T << '\n');
8819 return std::make_tuple(A, B, C, T, BitWidth);
8820 }
8821
8822 /// Helper function to compare optional APInts:
8823 /// (a) if X and Y both exist, return min(X, Y),
8824 /// (b) if neither X nor Y exist, return None,
8825 /// (c) if exactly one of X and Y exists, return that value.
MinOptional(Optional<APInt> X,Optional<APInt> Y)8826 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
8827 if (X.hasValue() && Y.hasValue()) {
8828 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
8829 APInt XW = X->sextOrSelf(W);
8830 APInt YW = Y->sextOrSelf(W);
8831 return XW.slt(YW) ? *X : *Y;
8832 }
8833 if (!X.hasValue() && !Y.hasValue())
8834 return None;
8835 return X.hasValue() ? *X : *Y;
8836 }
8837
8838 /// Helper function to truncate an optional APInt to a given BitWidth.
8839 /// When solving addrec-related equations, it is preferable to return a value
8840 /// that has the same bit width as the original addrec's coefficients. If the
8841 /// solution fits in the original bit width, truncate it (except for i1).
8842 /// Returning a value of a different bit width may inhibit some optimizations.
8843 ///
8844 /// In general, a solution to a quadratic equation generated from an addrec
8845 /// may require BW+1 bits, where BW is the bit width of the addrec's
8846 /// coefficients. The reason is that the coefficients of the quadratic
8847 /// equation are BW+1 bits wide (to avoid truncation when converting from
8848 /// the addrec to the equation).
TruncIfPossible(Optional<APInt> X,unsigned BitWidth)8849 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
8850 if (!X.hasValue())
8851 return None;
8852 unsigned W = X->getBitWidth();
8853 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
8854 return X->trunc(BitWidth);
8855 return X;
8856 }
8857
8858 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
8859 /// iterations. The values L, M, N are assumed to be signed, and they
8860 /// should all have the same bit widths.
8861 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
8862 /// where BW is the bit width of the addrec's coefficients.
8863 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
8864 /// returned as such, otherwise the bit width of the returned value may
8865 /// be greater than BW.
8866 ///
8867 /// This function returns None if
8868 /// (a) the addrec coefficients are not constant, or
8869 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
8870 /// like x^2 = 5, no integer solutions exist, in other cases an integer
8871 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
8872 static Optional<APInt>
SolveQuadraticAddRecExact(const SCEVAddRecExpr * AddRec,ScalarEvolution & SE)8873 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
8874 APInt A, B, C, M;
8875 unsigned BitWidth;
8876 auto T = GetQuadraticEquation(AddRec);
8877 if (!T.hasValue())
8878 return None;
8879
8880 std::tie(A, B, C, M, BitWidth) = *T;
8881 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
8882 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
8883 if (!X.hasValue())
8884 return None;
8885
8886 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
8887 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
8888 if (!V->isZero())
8889 return None;
8890
8891 return TruncIfPossible(X, BitWidth);
8892 }
8893
8894 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
8895 /// iterations. The values M, N are assumed to be signed, and they
8896 /// should all have the same bit widths.
8897 /// Find the least n such that c(n) does not belong to the given range,
8898 /// while c(n-1) does.
8899 ///
8900 /// This function returns None if
8901 /// (a) the addrec coefficients are not constant, or
8902 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
8903 /// bounds of the range.
8904 static Optional<APInt>
SolveQuadraticAddRecRange(const SCEVAddRecExpr * AddRec,const ConstantRange & Range,ScalarEvolution & SE)8905 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
8906 const ConstantRange &Range, ScalarEvolution &SE) {
8907 assert(AddRec->getOperand(0)->isZero() &&
8908 "Starting value of addrec should be 0");
8909 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
8910 << Range << ", addrec " << *AddRec << '\n');
8911 // This case is handled in getNumIterationsInRange. Here we can assume that
8912 // we start in the range.
8913 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
8914 "Addrec's initial value should be in range");
8915
8916 APInt A, B, C, M;
8917 unsigned BitWidth;
8918 auto T = GetQuadraticEquation(AddRec);
8919 if (!T.hasValue())
8920 return None;
8921
8922 // Be careful about the return value: there can be two reasons for not
8923 // returning an actual number. First, if no solutions to the equations
8924 // were found, and second, if the solutions don't leave the given range.
8925 // The first case means that the actual solution is "unknown", the second
8926 // means that it's known, but not valid. If the solution is unknown, we
8927 // cannot make any conclusions.
8928 // Return a pair: the optional solution and a flag indicating if the
8929 // solution was found.
8930 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
8931 // Solve for signed overflow and unsigned overflow, pick the lower
8932 // solution.
8933 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
8934 << Bound << " (before multiplying by " << M << ")\n");
8935 Bound *= M; // The quadratic equation multiplier.
8936
8937 Optional<APInt> SO = None;
8938 if (BitWidth > 1) {
8939 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8940 "signed overflow\n");
8941 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
8942 }
8943 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8944 "unsigned overflow\n");
8945 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
8946 BitWidth+1);
8947
8948 auto LeavesRange = [&] (const APInt &X) {
8949 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
8950 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
8951 if (Range.contains(V0->getValue()))
8952 return false;
8953 // X should be at least 1, so X-1 is non-negative.
8954 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
8955 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
8956 if (Range.contains(V1->getValue()))
8957 return true;
8958 return false;
8959 };
8960
8961 // If SolveQuadraticEquationWrap returns None, it means that there can
8962 // be a solution, but the function failed to find it. We cannot treat it
8963 // as "no solution".
8964 if (!SO.hasValue() || !UO.hasValue())
8965 return { None, false };
8966
8967 // Check the smaller value first to see if it leaves the range.
8968 // At this point, both SO and UO must have values.
8969 Optional<APInt> Min = MinOptional(SO, UO);
8970 if (LeavesRange(*Min))
8971 return { Min, true };
8972 Optional<APInt> Max = Min == SO ? UO : SO;
8973 if (LeavesRange(*Max))
8974 return { Max, true };
8975
8976 // Solutions were found, but were eliminated, hence the "true".
8977 return { None, true };
8978 };
8979
8980 std::tie(A, B, C, M, BitWidth) = *T;
8981 // Lower bound is inclusive, subtract 1 to represent the exiting value.
8982 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
8983 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
8984 auto SL = SolveForBoundary(Lower);
8985 auto SU = SolveForBoundary(Upper);
8986 // If any of the solutions was unknown, no meaninigful conclusions can
8987 // be made.
8988 if (!SL.second || !SU.second)
8989 return None;
8990
8991 // Claim: The correct solution is not some value between Min and Max.
8992 //
8993 // Justification: Assuming that Min and Max are different values, one of
8994 // them is when the first signed overflow happens, the other is when the
8995 // first unsigned overflow happens. Crossing the range boundary is only
8996 // possible via an overflow (treating 0 as a special case of it, modeling
8997 // an overflow as crossing k*2^W for some k).
8998 //
8999 // The interesting case here is when Min was eliminated as an invalid
9000 // solution, but Max was not. The argument is that if there was another
9001 // overflow between Min and Max, it would also have been eliminated if
9002 // it was considered.
9003 //
9004 // For a given boundary, it is possible to have two overflows of the same
9005 // type (signed/unsigned) without having the other type in between: this
9006 // can happen when the vertex of the parabola is between the iterations
9007 // corresponding to the overflows. This is only possible when the two
9008 // overflows cross k*2^W for the same k. In such case, if the second one
9009 // left the range (and was the first one to do so), the first overflow
9010 // would have to enter the range, which would mean that either we had left
9011 // the range before or that we started outside of it. Both of these cases
9012 // are contradictions.
9013 //
9014 // Claim: In the case where SolveForBoundary returns None, the correct
9015 // solution is not some value between the Max for this boundary and the
9016 // Min of the other boundary.
9017 //
9018 // Justification: Assume that we had such Max_A and Min_B corresponding
9019 // to range boundaries A and B and such that Max_A < Min_B. If there was
9020 // a solution between Max_A and Min_B, it would have to be caused by an
9021 // overflow corresponding to either A or B. It cannot correspond to B,
9022 // since Min_B is the first occurrence of such an overflow. If it
9023 // corresponded to A, it would have to be either a signed or an unsigned
9024 // overflow that is larger than both eliminated overflows for A. But
9025 // between the eliminated overflows and this overflow, the values would
9026 // cover the entire value space, thus crossing the other boundary, which
9027 // is a contradiction.
9028
9029 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
9030 }
9031
9032 ScalarEvolution::ExitLimit
howFarToZero(const SCEV * V,const Loop * L,bool ControlsExit,bool AllowPredicates)9033 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
9034 bool AllowPredicates) {
9035
9036 // This is only used for loops with a "x != y" exit test. The exit condition
9037 // is now expressed as a single expression, V = x-y. So the exit test is
9038 // effectively V != 0. We know and take advantage of the fact that this
9039 // expression only being used in a comparison by zero context.
9040
9041 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
9042 // If the value is a constant
9043 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9044 // If the value is already zero, the branch will execute zero times.
9045 if (C->getValue()->isZero()) return C;
9046 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9047 }
9048
9049 const SCEVAddRecExpr *AddRec =
9050 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
9051
9052 if (!AddRec && AllowPredicates)
9053 // Try to make this an AddRec using runtime tests, in the first X
9054 // iterations of this loop, where X is the SCEV expression found by the
9055 // algorithm below.
9056 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
9057
9058 if (!AddRec || AddRec->getLoop() != L)
9059 return getCouldNotCompute();
9060
9061 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
9062 // the quadratic equation to solve it.
9063 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
9064 // We can only use this value if the chrec ends up with an exact zero
9065 // value at this index. When solving for "X*X != 5", for example, we
9066 // should not accept a root of 2.
9067 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
9068 const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
9069 return ExitLimit(R, R, false, Predicates);
9070 }
9071 return getCouldNotCompute();
9072 }
9073
9074 // Otherwise we can only handle this if it is affine.
9075 if (!AddRec->isAffine())
9076 return getCouldNotCompute();
9077
9078 // If this is an affine expression, the execution count of this branch is
9079 // the minimum unsigned root of the following equation:
9080 //
9081 // Start + Step*N = 0 (mod 2^BW)
9082 //
9083 // equivalent to:
9084 //
9085 // Step*N = -Start (mod 2^BW)
9086 //
9087 // where BW is the common bit width of Start and Step.
9088
9089 // Get the initial value for the loop.
9090 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
9091 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
9092
9093 // For now we handle only constant steps.
9094 //
9095 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
9096 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
9097 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
9098 // We have not yet seen any such cases.
9099 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
9100 if (!StepC || StepC->getValue()->isZero())
9101 return getCouldNotCompute();
9102
9103 // For positive steps (counting up until unsigned overflow):
9104 // N = -Start/Step (as unsigned)
9105 // For negative steps (counting down to zero):
9106 // N = Start/-Step
9107 // First compute the unsigned distance from zero in the direction of Step.
9108 bool CountDown = StepC->getAPInt().isNegative();
9109 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
9110
9111 // Handle unitary steps, which cannot wraparound.
9112 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
9113 // N = Distance (as unsigned)
9114 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
9115 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
9116 APInt MaxBECountBase = getUnsignedRangeMax(Distance);
9117 if (MaxBECountBase.ult(MaxBECount))
9118 MaxBECount = MaxBECountBase;
9119
9120 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
9121 // we end up with a loop whose backedge-taken count is n - 1. Detect this
9122 // case, and see if we can improve the bound.
9123 //
9124 // Explicitly handling this here is necessary because getUnsignedRange
9125 // isn't context-sensitive; it doesn't know that we only care about the
9126 // range inside the loop.
9127 const SCEV *Zero = getZero(Distance->getType());
9128 const SCEV *One = getOne(Distance->getType());
9129 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
9130 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
9131 // If Distance + 1 doesn't overflow, we can compute the maximum distance
9132 // as "unsigned_max(Distance + 1) - 1".
9133 ConstantRange CR = getUnsignedRange(DistancePlusOne);
9134 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
9135 }
9136 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
9137 }
9138
9139 // If the condition controls loop exit (the loop exits only if the expression
9140 // is true) and the addition is no-wrap we can use unsigned divide to
9141 // compute the backedge count. In this case, the step may not divide the
9142 // distance, but we don't care because if the condition is "missed" the loop
9143 // will have undefined behavior due to wrapping.
9144 if (ControlsExit && AddRec->hasNoSelfWrap() &&
9145 loopHasNoAbnormalExits(AddRec->getLoop())) {
9146 const SCEV *Exact =
9147 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
9148 const SCEV *Max =
9149 Exact == getCouldNotCompute()
9150 ? Exact
9151 : getConstant(getUnsignedRangeMax(Exact));
9152 return ExitLimit(Exact, Max, false, Predicates);
9153 }
9154
9155 // Solve the general equation.
9156 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
9157 getNegativeSCEV(Start), *this);
9158 const SCEV *M = E == getCouldNotCompute()
9159 ? E
9160 : getConstant(getUnsignedRangeMax(E));
9161 return ExitLimit(E, M, false, Predicates);
9162 }
9163
9164 ScalarEvolution::ExitLimit
howFarToNonZero(const SCEV * V,const Loop * L)9165 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
9166 // Loops that look like: while (X == 0) are very strange indeed. We don't
9167 // handle them yet except for the trivial case. This could be expanded in the
9168 // future as needed.
9169
9170 // If the value is a constant, check to see if it is known to be non-zero
9171 // already. If so, the backedge will execute zero times.
9172 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9173 if (!C->getValue()->isZero())
9174 return getZero(C->getType());
9175 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9176 }
9177
9178 // We could implement others, but I really doubt anyone writes loops like
9179 // this, and if they did, they would already be constant folded.
9180 return getCouldNotCompute();
9181 }
9182
9183 std::pair<const BasicBlock *, const BasicBlock *>
getPredecessorWithUniqueSuccessorForBB(const BasicBlock * BB) const9184 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
9185 const {
9186 // If the block has a unique predecessor, then there is no path from the
9187 // predecessor to the block that does not go through the direct edge
9188 // from the predecessor to the block.
9189 if (const BasicBlock *Pred = BB->getSinglePredecessor())
9190 return {Pred, BB};
9191
9192 // A loop's header is defined to be a block that dominates the loop.
9193 // If the header has a unique predecessor outside the loop, it must be
9194 // a block that has exactly one successor that can reach the loop.
9195 if (const Loop *L = LI.getLoopFor(BB))
9196 return {L->getLoopPredecessor(), L->getHeader()};
9197
9198 return {nullptr, nullptr};
9199 }
9200
9201 /// SCEV structural equivalence is usually sufficient for testing whether two
9202 /// expressions are equal, however for the purposes of looking for a condition
9203 /// guarding a loop, it can be useful to be a little more general, since a
9204 /// front-end may have replicated the controlling expression.
HasSameValue(const SCEV * A,const SCEV * B)9205 static bool HasSameValue(const SCEV *A, const SCEV *B) {
9206 // Quick check to see if they are the same SCEV.
9207 if (A == B) return true;
9208
9209 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
9210 // Not all instructions that are "identical" compute the same value. For
9211 // instance, two distinct alloca instructions allocating the same type are
9212 // identical and do not read memory; but compute distinct values.
9213 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
9214 };
9215
9216 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
9217 // two different instructions with the same value. Check for this case.
9218 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
9219 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
9220 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
9221 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
9222 if (ComputesEqualValues(AI, BI))
9223 return true;
9224
9225 // Otherwise assume they may have a different value.
9226 return false;
9227 }
9228
SimplifyICmpOperands(ICmpInst::Predicate & Pred,const SCEV * & LHS,const SCEV * & RHS,unsigned Depth)9229 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
9230 const SCEV *&LHS, const SCEV *&RHS,
9231 unsigned Depth) {
9232 bool Changed = false;
9233 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
9234 // '0 != 0'.
9235 auto TrivialCase = [&](bool TriviallyTrue) {
9236 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
9237 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
9238 return true;
9239 };
9240 // If we hit the max recursion limit bail out.
9241 if (Depth >= 3)
9242 return false;
9243
9244 // Canonicalize a constant to the right side.
9245 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
9246 // Check for both operands constant.
9247 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
9248 if (ConstantExpr::getICmp(Pred,
9249 LHSC->getValue(),
9250 RHSC->getValue())->isNullValue())
9251 return TrivialCase(false);
9252 else
9253 return TrivialCase(true);
9254 }
9255 // Otherwise swap the operands to put the constant on the right.
9256 std::swap(LHS, RHS);
9257 Pred = ICmpInst::getSwappedPredicate(Pred);
9258 Changed = true;
9259 }
9260
9261 // If we're comparing an addrec with a value which is loop-invariant in the
9262 // addrec's loop, put the addrec on the left. Also make a dominance check,
9263 // as both operands could be addrecs loop-invariant in each other's loop.
9264 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
9265 const Loop *L = AR->getLoop();
9266 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
9267 std::swap(LHS, RHS);
9268 Pred = ICmpInst::getSwappedPredicate(Pred);
9269 Changed = true;
9270 }
9271 }
9272
9273 // If there's a constant operand, canonicalize comparisons with boundary
9274 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
9275 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
9276 const APInt &RA = RC->getAPInt();
9277
9278 bool SimplifiedByConstantRange = false;
9279
9280 if (!ICmpInst::isEquality(Pred)) {
9281 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
9282 if (ExactCR.isFullSet())
9283 return TrivialCase(true);
9284 else if (ExactCR.isEmptySet())
9285 return TrivialCase(false);
9286
9287 APInt NewRHS;
9288 CmpInst::Predicate NewPred;
9289 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
9290 ICmpInst::isEquality(NewPred)) {
9291 // We were able to convert an inequality to an equality.
9292 Pred = NewPred;
9293 RHS = getConstant(NewRHS);
9294 Changed = SimplifiedByConstantRange = true;
9295 }
9296 }
9297
9298 if (!SimplifiedByConstantRange) {
9299 switch (Pred) {
9300 default:
9301 break;
9302 case ICmpInst::ICMP_EQ:
9303 case ICmpInst::ICMP_NE:
9304 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
9305 if (!RA)
9306 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
9307 if (const SCEVMulExpr *ME =
9308 dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
9309 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
9310 ME->getOperand(0)->isAllOnesValue()) {
9311 RHS = AE->getOperand(1);
9312 LHS = ME->getOperand(1);
9313 Changed = true;
9314 }
9315 break;
9316
9317
9318 // The "Should have been caught earlier!" messages refer to the fact
9319 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
9320 // should have fired on the corresponding cases, and canonicalized the
9321 // check to trivial case.
9322
9323 case ICmpInst::ICMP_UGE:
9324 assert(!RA.isMinValue() && "Should have been caught earlier!");
9325 Pred = ICmpInst::ICMP_UGT;
9326 RHS = getConstant(RA - 1);
9327 Changed = true;
9328 break;
9329 case ICmpInst::ICMP_ULE:
9330 assert(!RA.isMaxValue() && "Should have been caught earlier!");
9331 Pred = ICmpInst::ICMP_ULT;
9332 RHS = getConstant(RA + 1);
9333 Changed = true;
9334 break;
9335 case ICmpInst::ICMP_SGE:
9336 assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
9337 Pred = ICmpInst::ICMP_SGT;
9338 RHS = getConstant(RA - 1);
9339 Changed = true;
9340 break;
9341 case ICmpInst::ICMP_SLE:
9342 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
9343 Pred = ICmpInst::ICMP_SLT;
9344 RHS = getConstant(RA + 1);
9345 Changed = true;
9346 break;
9347 }
9348 }
9349 }
9350
9351 // Check for obvious equality.
9352 if (HasSameValue(LHS, RHS)) {
9353 if (ICmpInst::isTrueWhenEqual(Pred))
9354 return TrivialCase(true);
9355 if (ICmpInst::isFalseWhenEqual(Pred))
9356 return TrivialCase(false);
9357 }
9358
9359 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
9360 // adding or subtracting 1 from one of the operands.
9361 switch (Pred) {
9362 case ICmpInst::ICMP_SLE:
9363 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
9364 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9365 SCEV::FlagNSW);
9366 Pred = ICmpInst::ICMP_SLT;
9367 Changed = true;
9368 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
9369 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
9370 SCEV::FlagNSW);
9371 Pred = ICmpInst::ICMP_SLT;
9372 Changed = true;
9373 }
9374 break;
9375 case ICmpInst::ICMP_SGE:
9376 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
9377 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
9378 SCEV::FlagNSW);
9379 Pred = ICmpInst::ICMP_SGT;
9380 Changed = true;
9381 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
9382 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9383 SCEV::FlagNSW);
9384 Pred = ICmpInst::ICMP_SGT;
9385 Changed = true;
9386 }
9387 break;
9388 case ICmpInst::ICMP_ULE:
9389 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
9390 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9391 SCEV::FlagNUW);
9392 Pred = ICmpInst::ICMP_ULT;
9393 Changed = true;
9394 } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
9395 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
9396 Pred = ICmpInst::ICMP_ULT;
9397 Changed = true;
9398 }
9399 break;
9400 case ICmpInst::ICMP_UGE:
9401 if (!getUnsignedRangeMin(RHS).isMinValue()) {
9402 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
9403 Pred = ICmpInst::ICMP_UGT;
9404 Changed = true;
9405 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
9406 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9407 SCEV::FlagNUW);
9408 Pred = ICmpInst::ICMP_UGT;
9409 Changed = true;
9410 }
9411 break;
9412 default:
9413 break;
9414 }
9415
9416 // TODO: More simplifications are possible here.
9417
9418 // Recursively simplify until we either hit a recursion limit or nothing
9419 // changes.
9420 if (Changed)
9421 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
9422
9423 return Changed;
9424 }
9425
isKnownNegative(const SCEV * S)9426 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
9427 return getSignedRangeMax(S).isNegative();
9428 }
9429
isKnownPositive(const SCEV * S)9430 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
9431 return getSignedRangeMin(S).isStrictlyPositive();
9432 }
9433
isKnownNonNegative(const SCEV * S)9434 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
9435 return !getSignedRangeMin(S).isNegative();
9436 }
9437
isKnownNonPositive(const SCEV * S)9438 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
9439 return !getSignedRangeMax(S).isStrictlyPositive();
9440 }
9441
isKnownNonZero(const SCEV * S)9442 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
9443 return isKnownNegative(S) || isKnownPositive(S);
9444 }
9445
9446 std::pair<const SCEV *, const SCEV *>
SplitIntoInitAndPostInc(const Loop * L,const SCEV * S)9447 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
9448 // Compute SCEV on entry of loop L.
9449 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
9450 if (Start == getCouldNotCompute())
9451 return { Start, Start };
9452 // Compute post increment SCEV for loop L.
9453 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
9454 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
9455 return { Start, PostInc };
9456 }
9457
isKnownViaInduction(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9458 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
9459 const SCEV *LHS, const SCEV *RHS) {
9460 // First collect all loops.
9461 SmallPtrSet<const Loop *, 8> LoopsUsed;
9462 getUsedLoops(LHS, LoopsUsed);
9463 getUsedLoops(RHS, LoopsUsed);
9464
9465 if (LoopsUsed.empty())
9466 return false;
9467
9468 // Domination relationship must be a linear order on collected loops.
9469 #ifndef NDEBUG
9470 for (auto *L1 : LoopsUsed)
9471 for (auto *L2 : LoopsUsed)
9472 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
9473 DT.dominates(L2->getHeader(), L1->getHeader())) &&
9474 "Domination relationship is not a linear order");
9475 #endif
9476
9477 const Loop *MDL =
9478 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
9479 [&](const Loop *L1, const Loop *L2) {
9480 return DT.properlyDominates(L1->getHeader(), L2->getHeader());
9481 });
9482
9483 // Get init and post increment value for LHS.
9484 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
9485 // if LHS contains unknown non-invariant SCEV then bail out.
9486 if (SplitLHS.first == getCouldNotCompute())
9487 return false;
9488 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
9489 // Get init and post increment value for RHS.
9490 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
9491 // if RHS contains unknown non-invariant SCEV then bail out.
9492 if (SplitRHS.first == getCouldNotCompute())
9493 return false;
9494 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
9495 // It is possible that init SCEV contains an invariant load but it does
9496 // not dominate MDL and is not available at MDL loop entry, so we should
9497 // check it here.
9498 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
9499 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
9500 return false;
9501
9502 // It seems backedge guard check is faster than entry one so in some cases
9503 // it can speed up whole estimation by short circuit
9504 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
9505 SplitRHS.second) &&
9506 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
9507 }
9508
isKnownPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9509 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
9510 const SCEV *LHS, const SCEV *RHS) {
9511 // Canonicalize the inputs first.
9512 (void)SimplifyICmpOperands(Pred, LHS, RHS);
9513
9514 if (isKnownViaInduction(Pred, LHS, RHS))
9515 return true;
9516
9517 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
9518 return true;
9519
9520 // Otherwise see what can be done with some simple reasoning.
9521 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
9522 }
9523
isKnownPredicateAt(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Instruction * Context)9524 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
9525 const SCEV *LHS, const SCEV *RHS,
9526 const Instruction *Context) {
9527 // TODO: Analyze guards and assumes from Context's block.
9528 return isKnownPredicate(Pred, LHS, RHS) ||
9529 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS);
9530 }
9531
isKnownOnEveryIteration(ICmpInst::Predicate Pred,const SCEVAddRecExpr * LHS,const SCEV * RHS)9532 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
9533 const SCEVAddRecExpr *LHS,
9534 const SCEV *RHS) {
9535 const Loop *L = LHS->getLoop();
9536 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
9537 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
9538 }
9539
9540 Optional<ScalarEvolution::MonotonicPredicateType>
getMonotonicPredicateType(const SCEVAddRecExpr * LHS,ICmpInst::Predicate Pred)9541 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
9542 ICmpInst::Predicate Pred) {
9543 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
9544
9545 #ifndef NDEBUG
9546 // Verify an invariant: inverting the predicate should turn a monotonically
9547 // increasing change to a monotonically decreasing one, and vice versa.
9548 if (Result) {
9549 auto ResultSwapped =
9550 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
9551
9552 assert(ResultSwapped.hasValue() && "should be able to analyze both!");
9553 assert(ResultSwapped.getValue() != Result.getValue() &&
9554 "monotonicity should flip as we flip the predicate");
9555 }
9556 #endif
9557
9558 return Result;
9559 }
9560
9561 Optional<ScalarEvolution::MonotonicPredicateType>
getMonotonicPredicateTypeImpl(const SCEVAddRecExpr * LHS,ICmpInst::Predicate Pred)9562 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
9563 ICmpInst::Predicate Pred) {
9564 // A zero step value for LHS means the induction variable is essentially a
9565 // loop invariant value. We don't really depend on the predicate actually
9566 // flipping from false to true (for increasing predicates, and the other way
9567 // around for decreasing predicates), all we care about is that *if* the
9568 // predicate changes then it only changes from false to true.
9569 //
9570 // A zero step value in itself is not very useful, but there may be places
9571 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9572 // as general as possible.
9573
9574 // Only handle LE/LT/GE/GT predicates.
9575 if (!ICmpInst::isRelational(Pred))
9576 return None;
9577
9578 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
9579 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
9580 "Should be greater or less!");
9581
9582 // Check that AR does not wrap.
9583 if (ICmpInst::isUnsigned(Pred)) {
9584 if (!LHS->hasNoUnsignedWrap())
9585 return None;
9586 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9587 } else {
9588 assert(ICmpInst::isSigned(Pred) &&
9589 "Relational predicate is either signed or unsigned!");
9590 if (!LHS->hasNoSignedWrap())
9591 return None;
9592
9593 const SCEV *Step = LHS->getStepRecurrence(*this);
9594
9595 if (isKnownNonNegative(Step))
9596 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9597
9598 if (isKnownNonPositive(Step))
9599 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9600
9601 return None;
9602 }
9603 }
9604
9605 Optional<ScalarEvolution::LoopInvariantPredicate>
getLoopInvariantPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Loop * L)9606 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
9607 const SCEV *LHS, const SCEV *RHS,
9608 const Loop *L) {
9609
9610 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9611 if (!isLoopInvariant(RHS, L)) {
9612 if (!isLoopInvariant(LHS, L))
9613 return None;
9614
9615 std::swap(LHS, RHS);
9616 Pred = ICmpInst::getSwappedPredicate(Pred);
9617 }
9618
9619 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
9620 if (!ArLHS || ArLHS->getLoop() != L)
9621 return None;
9622
9623 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
9624 if (!MonotonicType)
9625 return None;
9626 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
9627 // true as the loop iterates, and the backedge is control dependent on
9628 // "ArLHS `Pred` RHS" == true then we can reason as follows:
9629 //
9630 // * if the predicate was false in the first iteration then the predicate
9631 // is never evaluated again, since the loop exits without taking the
9632 // backedge.
9633 // * if the predicate was true in the first iteration then it will
9634 // continue to be true for all future iterations since it is
9635 // monotonically increasing.
9636 //
9637 // For both the above possibilities, we can replace the loop varying
9638 // predicate with its value on the first iteration of the loop (which is
9639 // loop invariant).
9640 //
9641 // A similar reasoning applies for a monotonically decreasing predicate, by
9642 // replacing true with false and false with true in the above two bullets.
9643 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing;
9644 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
9645
9646 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
9647 return None;
9648
9649 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS);
9650 }
9651
9652 Optional<ScalarEvolution::LoopInvariantPredicate>
getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Loop * L,const Instruction * Context,const SCEV * MaxIter)9653 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
9654 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
9655 const Instruction *Context, const SCEV *MaxIter) {
9656 // Try to prove the following set of facts:
9657 // - The predicate is monotonic in the iteration space.
9658 // - If the check does not fail on the 1st iteration:
9659 // - No overflow will happen during first MaxIter iterations;
9660 // - It will not fail on the MaxIter'th iteration.
9661 // If the check does fail on the 1st iteration, we leave the loop and no
9662 // other checks matter.
9663
9664 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9665 if (!isLoopInvariant(RHS, L)) {
9666 if (!isLoopInvariant(LHS, L))
9667 return None;
9668
9669 std::swap(LHS, RHS);
9670 Pred = ICmpInst::getSwappedPredicate(Pred);
9671 }
9672
9673 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
9674 if (!AR || AR->getLoop() != L)
9675 return None;
9676
9677 // The predicate must be relational (i.e. <, <=, >=, >).
9678 if (!ICmpInst::isRelational(Pred))
9679 return None;
9680
9681 // TODO: Support steps other than +/- 1.
9682 const SCEV *Step = AR->getStepRecurrence(*this);
9683 auto *One = getOne(Step->getType());
9684 auto *MinusOne = getNegativeSCEV(One);
9685 if (Step != One && Step != MinusOne)
9686 return None;
9687
9688 // Type mismatch here means that MaxIter is potentially larger than max
9689 // unsigned value in start type, which mean we cannot prove no wrap for the
9690 // indvar.
9691 if (AR->getType() != MaxIter->getType())
9692 return None;
9693
9694 // Value of IV on suggested last iteration.
9695 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
9696 // Does it still meet the requirement?
9697 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
9698 return None;
9699 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
9700 // not exceed max unsigned value of this type), this effectively proves
9701 // that there is no wrap during the iteration. To prove that there is no
9702 // signed/unsigned wrap, we need to check that
9703 // Start <= Last for step = 1 or Start >= Last for step = -1.
9704 ICmpInst::Predicate NoOverflowPred =
9705 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
9706 if (Step == MinusOne)
9707 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
9708 const SCEV *Start = AR->getStart();
9709 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context))
9710 return None;
9711
9712 // Everything is fine.
9713 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
9714 }
9715
isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9716 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
9717 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
9718 if (HasSameValue(LHS, RHS))
9719 return ICmpInst::isTrueWhenEqual(Pred);
9720
9721 // This code is split out from isKnownPredicate because it is called from
9722 // within isLoopEntryGuardedByCond.
9723
9724 auto CheckRanges =
9725 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) {
9726 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS)
9727 .contains(RangeLHS);
9728 };
9729
9730 // The check at the top of the function catches the case where the values are
9731 // known to be equal.
9732 if (Pred == CmpInst::ICMP_EQ)
9733 return false;
9734
9735 if (Pred == CmpInst::ICMP_NE)
9736 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
9737 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) ||
9738 isKnownNonZero(getMinusSCEV(LHS, RHS));
9739
9740 if (CmpInst::isSigned(Pred))
9741 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
9742
9743 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
9744 }
9745
isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9746 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
9747 const SCEV *LHS,
9748 const SCEV *RHS) {
9749 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
9750 // Return Y via OutY.
9751 auto MatchBinaryAddToConst =
9752 [this](const SCEV *Result, const SCEV *X, APInt &OutY,
9753 SCEV::NoWrapFlags ExpectedFlags) {
9754 const SCEV *NonConstOp, *ConstOp;
9755 SCEV::NoWrapFlags FlagsPresent;
9756
9757 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) ||
9758 !isa<SCEVConstant>(ConstOp) || NonConstOp != X)
9759 return false;
9760
9761 OutY = cast<SCEVConstant>(ConstOp)->getAPInt();
9762 return (FlagsPresent & ExpectedFlags) == ExpectedFlags;
9763 };
9764
9765 APInt C;
9766
9767 switch (Pred) {
9768 default:
9769 break;
9770
9771 case ICmpInst::ICMP_SGE:
9772 std::swap(LHS, RHS);
9773 LLVM_FALLTHROUGH;
9774 case ICmpInst::ICMP_SLE:
9775 // X s<= (X + C)<nsw> if C >= 0
9776 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative())
9777 return true;
9778
9779 // (X + C)<nsw> s<= X if C <= 0
9780 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) &&
9781 !C.isStrictlyPositive())
9782 return true;
9783 break;
9784
9785 case ICmpInst::ICMP_SGT:
9786 std::swap(LHS, RHS);
9787 LLVM_FALLTHROUGH;
9788 case ICmpInst::ICMP_SLT:
9789 // X s< (X + C)<nsw> if C > 0
9790 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) &&
9791 C.isStrictlyPositive())
9792 return true;
9793
9794 // (X + C)<nsw> s< X if C < 0
9795 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative())
9796 return true;
9797 break;
9798
9799 case ICmpInst::ICMP_UGE:
9800 std::swap(LHS, RHS);
9801 LLVM_FALLTHROUGH;
9802 case ICmpInst::ICMP_ULE:
9803 // X u<= (X + C)<nuw> for any C
9804 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW))
9805 return true;
9806 break;
9807
9808 case ICmpInst::ICMP_UGT:
9809 std::swap(LHS, RHS);
9810 LLVM_FALLTHROUGH;
9811 case ICmpInst::ICMP_ULT:
9812 // X u< (X + C)<nuw> if C != 0
9813 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue())
9814 return true;
9815 break;
9816 }
9817
9818 return false;
9819 }
9820
isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9821 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
9822 const SCEV *LHS,
9823 const SCEV *RHS) {
9824 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
9825 return false;
9826
9827 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
9828 // the stack can result in exponential time complexity.
9829 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);
9830
9831 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
9832 //
9833 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
9834 // isKnownPredicate. isKnownPredicate is more powerful, but also more
9835 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
9836 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
9837 // use isKnownPredicate later if needed.
9838 return isKnownNonNegative(RHS) &&
9839 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
9840 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
9841 }
9842
isImpliedViaGuard(const BasicBlock * BB,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9843 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
9844 ICmpInst::Predicate Pred,
9845 const SCEV *LHS, const SCEV *RHS) {
9846 // No need to even try if we know the module has no guards.
9847 if (!HasGuards)
9848 return false;
9849
9850 return any_of(*BB, [&](const Instruction &I) {
9851 using namespace llvm::PatternMatch;
9852
9853 Value *Condition;
9854 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
9855 m_Value(Condition))) &&
9856 isImpliedCond(Pred, LHS, RHS, Condition, false);
9857 });
9858 }
9859
9860 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
9861 /// protected by a conditional between LHS and RHS. This is used to
9862 /// to eliminate casts.
9863 bool
isLoopBackedgeGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9864 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
9865 ICmpInst::Predicate Pred,
9866 const SCEV *LHS, const SCEV *RHS) {
9867 // Interpret a null as meaning no loop, where there is obviously no guard
9868 // (interprocedural conditions notwithstanding).
9869 if (!L) return true;
9870
9871 if (VerifyIR)
9872 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
9873 "This cannot be done on broken IR!");
9874
9875
9876 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
9877 return true;
9878
9879 BasicBlock *Latch = L->getLoopLatch();
9880 if (!Latch)
9881 return false;
9882
9883 BranchInst *LoopContinuePredicate =
9884 dyn_cast<BranchInst>(Latch->getTerminator());
9885 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
9886 isImpliedCond(Pred, LHS, RHS,
9887 LoopContinuePredicate->getCondition(),
9888 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
9889 return true;
9890
9891 // We don't want more than one activation of the following loops on the stack
9892 // -- that can lead to O(n!) time complexity.
9893 if (WalkingBEDominatingConds)
9894 return false;
9895
9896 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);
9897
9898 // See if we can exploit a trip count to prove the predicate.
9899 const auto &BETakenInfo = getBackedgeTakenInfo(L);
9900 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
9901 if (LatchBECount != getCouldNotCompute()) {
9902 // We know that Latch branches back to the loop header exactly
9903 // LatchBECount times. This means the backdege condition at Latch is
9904 // equivalent to "{0,+,1} u< LatchBECount".
9905 Type *Ty = LatchBECount->getType();
9906 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
9907 const SCEV *LoopCounter =
9908 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
9909 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
9910 LatchBECount))
9911 return true;
9912 }
9913
9914 // Check conditions due to any @llvm.assume intrinsics.
9915 for (auto &AssumeVH : AC.assumptions()) {
9916 if (!AssumeVH)
9917 continue;
9918 auto *CI = cast<CallInst>(AssumeVH);
9919 if (!DT.dominates(CI, Latch->getTerminator()))
9920 continue;
9921
9922 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
9923 return true;
9924 }
9925
9926 // If the loop is not reachable from the entry block, we risk running into an
9927 // infinite loop as we walk up into the dom tree. These loops do not matter
9928 // anyway, so we just return a conservative answer when we see them.
9929 if (!DT.isReachableFromEntry(L->getHeader()))
9930 return false;
9931
9932 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
9933 return true;
9934
9935 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
9936 DTN != HeaderDTN; DTN = DTN->getIDom()) {
9937 assert(DTN && "should reach the loop header before reaching the root!");
9938
9939 BasicBlock *BB = DTN->getBlock();
9940 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
9941 return true;
9942
9943 BasicBlock *PBB = BB->getSinglePredecessor();
9944 if (!PBB)
9945 continue;
9946
9947 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
9948 if (!ContinuePredicate || !ContinuePredicate->isConditional())
9949 continue;
9950
9951 Value *Condition = ContinuePredicate->getCondition();
9952
9953 // If we have an edge `E` within the loop body that dominates the only
9954 // latch, the condition guarding `E` also guards the backedge. This
9955 // reasoning works only for loops with a single latch.
9956
9957 BasicBlockEdge DominatingEdge(PBB, BB);
9958 if (DominatingEdge.isSingleEdge()) {
9959 // We're constructively (and conservatively) enumerating edges within the
9960 // loop body that dominate the latch. The dominator tree better agree
9961 // with us on this:
9962 assert(DT.dominates(DominatingEdge, Latch) && "should be!");
9963
9964 if (isImpliedCond(Pred, LHS, RHS, Condition,
9965 BB != ContinuePredicate->getSuccessor(0)))
9966 return true;
9967 }
9968 }
9969
9970 return false;
9971 }
9972
isBasicBlockEntryGuardedByCond(const BasicBlock * BB,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9973 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
9974 ICmpInst::Predicate Pred,
9975 const SCEV *LHS,
9976 const SCEV *RHS) {
9977 if (VerifyIR)
9978 assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
9979 "This cannot be done on broken IR!");
9980
9981 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
9982 return true;
9983
9984 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
9985 // the facts (a >= b && a != b) separately. A typical situation is when the
9986 // non-strict comparison is known from ranges and non-equality is known from
9987 // dominating predicates. If we are proving strict comparison, we always try
9988 // to prove non-equality and non-strict comparison separately.
9989 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
9990 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
9991 bool ProvedNonStrictComparison = false;
9992 bool ProvedNonEquality = false;
9993
9994 if (ProvingStrictComparison) {
9995 ProvedNonStrictComparison =
9996 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS);
9997 ProvedNonEquality =
9998 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS);
9999 if (ProvedNonStrictComparison && ProvedNonEquality)
10000 return true;
10001 }
10002
10003 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
10004 auto ProveViaGuard = [&](const BasicBlock *Block) {
10005 if (isImpliedViaGuard(Block, Pred, LHS, RHS))
10006 return true;
10007 if (ProvingStrictComparison) {
10008 if (!ProvedNonStrictComparison)
10009 ProvedNonStrictComparison =
10010 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS);
10011 if (!ProvedNonEquality)
10012 ProvedNonEquality =
10013 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS);
10014 if (ProvedNonStrictComparison && ProvedNonEquality)
10015 return true;
10016 }
10017 return false;
10018 };
10019
10020 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
10021 auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
10022 const Instruction *Context = &BB->front();
10023 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context))
10024 return true;
10025 if (ProvingStrictComparison) {
10026 if (!ProvedNonStrictComparison)
10027 ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS,
10028 Condition, Inverse, Context);
10029 if (!ProvedNonEquality)
10030 ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS,
10031 Condition, Inverse, Context);
10032 if (ProvedNonStrictComparison && ProvedNonEquality)
10033 return true;
10034 }
10035 return false;
10036 };
10037
10038 // Starting at the block's predecessor, climb up the predecessor chain, as long
10039 // as there are predecessors that can be found that have unique successors
10040 // leading to the original block.
10041 const Loop *ContainingLoop = LI.getLoopFor(BB);
10042 const BasicBlock *PredBB;
10043 if (ContainingLoop && ContainingLoop->getHeader() == BB)
10044 PredBB = ContainingLoop->getLoopPredecessor();
10045 else
10046 PredBB = BB->getSinglePredecessor();
10047 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
10048 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
10049 if (ProveViaGuard(Pair.first))
10050 return true;
10051
10052 const BranchInst *LoopEntryPredicate =
10053 dyn_cast<BranchInst>(Pair.first->getTerminator());
10054 if (!LoopEntryPredicate ||
10055 LoopEntryPredicate->isUnconditional())
10056 continue;
10057
10058 if (ProveViaCond(LoopEntryPredicate->getCondition(),
10059 LoopEntryPredicate->getSuccessor(0) != Pair.second))
10060 return true;
10061 }
10062
10063 // Check conditions due to any @llvm.assume intrinsics.
10064 for (auto &AssumeVH : AC.assumptions()) {
10065 if (!AssumeVH)
10066 continue;
10067 auto *CI = cast<CallInst>(AssumeVH);
10068 if (!DT.dominates(CI, BB))
10069 continue;
10070
10071 if (ProveViaCond(CI->getArgOperand(0), false))
10072 return true;
10073 }
10074
10075 return false;
10076 }
10077
isLoopEntryGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10078 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
10079 ICmpInst::Predicate Pred,
10080 const SCEV *LHS,
10081 const SCEV *RHS) {
10082 // Interpret a null as meaning no loop, where there is obviously no guard
10083 // (interprocedural conditions notwithstanding).
10084 if (!L)
10085 return false;
10086
10087 // Both LHS and RHS must be available at loop entry.
10088 assert(isAvailableAtLoopEntry(LHS, L) &&
10089 "LHS is not available at Loop Entry");
10090 assert(isAvailableAtLoopEntry(RHS, L) &&
10091 "RHS is not available at Loop Entry");
10092 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
10093 }
10094
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Value * FoundCondValue,bool Inverse,const Instruction * Context)10095 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10096 const SCEV *RHS,
10097 const Value *FoundCondValue, bool Inverse,
10098 const Instruction *Context) {
10099 if (!PendingLoopPredicates.insert(FoundCondValue).second)
10100 return false;
10101
10102 auto ClearOnExit =
10103 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
10104
10105 // Recursively handle And and Or conditions.
10106 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
10107 if (BO->getOpcode() == Instruction::And) {
10108 if (!Inverse)
10109 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse,
10110 Context) ||
10111 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse,
10112 Context);
10113 } else if (BO->getOpcode() == Instruction::Or) {
10114 if (Inverse)
10115 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse,
10116 Context) ||
10117 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse,
10118 Context);
10119 }
10120 }
10121
10122 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
10123 if (!ICI) return false;
10124
10125 // Now that we found a conditional branch that dominates the loop or controls
10126 // the loop latch. Check to see if it is the comparison we are looking for.
10127 ICmpInst::Predicate FoundPred;
10128 if (Inverse)
10129 FoundPred = ICI->getInversePredicate();
10130 else
10131 FoundPred = ICI->getPredicate();
10132
10133 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
10134 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
10135
10136 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context);
10137 }
10138
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,ICmpInst::Predicate FoundPred,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10139 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10140 const SCEV *RHS,
10141 ICmpInst::Predicate FoundPred,
10142 const SCEV *FoundLHS, const SCEV *FoundRHS,
10143 const Instruction *Context) {
10144 // Balance the types.
10145 if (getTypeSizeInBits(LHS->getType()) <
10146 getTypeSizeInBits(FoundLHS->getType())) {
10147 // For unsigned and equality predicates, try to prove that both found
10148 // operands fit into narrow unsigned range. If so, try to prove facts in
10149 // narrow types.
10150 if (!CmpInst::isSigned(FoundPred)) {
10151 auto *NarrowType = LHS->getType();
10152 auto *WideType = FoundLHS->getType();
10153 auto BitWidth = getTypeSizeInBits(NarrowType);
10154 const SCEV *MaxValue = getZeroExtendExpr(
10155 getConstant(APInt::getMaxValue(BitWidth)), WideType);
10156 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) &&
10157 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) {
10158 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
10159 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
10160 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
10161 TruncFoundRHS, Context))
10162 return true;
10163 }
10164 }
10165
10166 if (CmpInst::isSigned(Pred)) {
10167 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
10168 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
10169 } else {
10170 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
10171 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
10172 }
10173 } else if (getTypeSizeInBits(LHS->getType()) >
10174 getTypeSizeInBits(FoundLHS->getType())) {
10175 if (CmpInst::isSigned(FoundPred)) {
10176 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
10177 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
10178 } else {
10179 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
10180 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
10181 }
10182 }
10183 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
10184 FoundRHS, Context);
10185 }
10186
isImpliedCondBalancedTypes(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,ICmpInst::Predicate FoundPred,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10187 bool ScalarEvolution::isImpliedCondBalancedTypes(
10188 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10189 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
10190 const Instruction *Context) {
10191 assert(getTypeSizeInBits(LHS->getType()) ==
10192 getTypeSizeInBits(FoundLHS->getType()) &&
10193 "Types should be balanced!");
10194 // Canonicalize the query to match the way instcombine will have
10195 // canonicalized the comparison.
10196 if (SimplifyICmpOperands(Pred, LHS, RHS))
10197 if (LHS == RHS)
10198 return CmpInst::isTrueWhenEqual(Pred);
10199 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
10200 if (FoundLHS == FoundRHS)
10201 return CmpInst::isFalseWhenEqual(FoundPred);
10202
10203 // Check to see if we can make the LHS or RHS match.
10204 if (LHS == FoundRHS || RHS == FoundLHS) {
10205 if (isa<SCEVConstant>(RHS)) {
10206 std::swap(FoundLHS, FoundRHS);
10207 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
10208 } else {
10209 std::swap(LHS, RHS);
10210 Pred = ICmpInst::getSwappedPredicate(Pred);
10211 }
10212 }
10213
10214 // Check whether the found predicate is the same as the desired predicate.
10215 if (FoundPred == Pred)
10216 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10217
10218 // Check whether swapping the found predicate makes it the same as the
10219 // desired predicate.
10220 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
10221 if (isa<SCEVConstant>(RHS))
10222 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context);
10223 else
10224 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS,
10225 LHS, FoundLHS, FoundRHS, Context);
10226 }
10227
10228 // Unsigned comparison is the same as signed comparison when both the operands
10229 // are non-negative.
10230 if (CmpInst::isUnsigned(FoundPred) &&
10231 CmpInst::getSignedPredicate(FoundPred) == Pred &&
10232 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS))
10233 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10234
10235 // Check if we can make progress by sharpening ranges.
10236 if (FoundPred == ICmpInst::ICMP_NE &&
10237 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
10238
10239 const SCEVConstant *C = nullptr;
10240 const SCEV *V = nullptr;
10241
10242 if (isa<SCEVConstant>(FoundLHS)) {
10243 C = cast<SCEVConstant>(FoundLHS);
10244 V = FoundRHS;
10245 } else {
10246 C = cast<SCEVConstant>(FoundRHS);
10247 V = FoundLHS;
10248 }
10249
10250 // The guarding predicate tells us that C != V. If the known range
10251 // of V is [C, t), we can sharpen the range to [C + 1, t). The
10252 // range we consider has to correspond to same signedness as the
10253 // predicate we're interested in folding.
10254
10255 APInt Min = ICmpInst::isSigned(Pred) ?
10256 getSignedRangeMin(V) : getUnsignedRangeMin(V);
10257
10258 if (Min == C->getAPInt()) {
10259 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
10260 // This is true even if (Min + 1) wraps around -- in case of
10261 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
10262
10263 APInt SharperMin = Min + 1;
10264
10265 switch (Pred) {
10266 case ICmpInst::ICMP_SGE:
10267 case ICmpInst::ICMP_UGE:
10268 // We know V `Pred` SharperMin. If this implies LHS `Pred`
10269 // RHS, we're done.
10270 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
10271 Context))
10272 return true;
10273 LLVM_FALLTHROUGH;
10274
10275 case ICmpInst::ICMP_SGT:
10276 case ICmpInst::ICMP_UGT:
10277 // We know from the range information that (V `Pred` Min ||
10278 // V == Min). We know from the guarding condition that !(V
10279 // == Min). This gives us
10280 //
10281 // V `Pred` Min || V == Min && !(V == Min)
10282 // => V `Pred` Min
10283 //
10284 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
10285
10286 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min),
10287 Context))
10288 return true;
10289 break;
10290
10291 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
10292 case ICmpInst::ICMP_SLE:
10293 case ICmpInst::ICMP_ULE:
10294 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10295 LHS, V, getConstant(SharperMin), Context))
10296 return true;
10297 LLVM_FALLTHROUGH;
10298
10299 case ICmpInst::ICMP_SLT:
10300 case ICmpInst::ICMP_ULT:
10301 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10302 LHS, V, getConstant(Min), Context))
10303 return true;
10304 break;
10305
10306 default:
10307 // No change
10308 break;
10309 }
10310 }
10311 }
10312
10313 // Check whether the actual condition is beyond sufficient.
10314 if (FoundPred == ICmpInst::ICMP_EQ)
10315 if (ICmpInst::isTrueWhenEqual(Pred))
10316 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context))
10317 return true;
10318 if (Pred == ICmpInst::ICMP_NE)
10319 if (!ICmpInst::isTrueWhenEqual(FoundPred))
10320 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS,
10321 Context))
10322 return true;
10323
10324 // Otherwise assume the worst.
10325 return false;
10326 }
10327
splitBinaryAdd(const SCEV * Expr,const SCEV * & L,const SCEV * & R,SCEV::NoWrapFlags & Flags)10328 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
10329 const SCEV *&L, const SCEV *&R,
10330 SCEV::NoWrapFlags &Flags) {
10331 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
10332 if (!AE || AE->getNumOperands() != 2)
10333 return false;
10334
10335 L = AE->getOperand(0);
10336 R = AE->getOperand(1);
10337 Flags = AE->getNoWrapFlags();
10338 return true;
10339 }
10340
computeConstantDifference(const SCEV * More,const SCEV * Less)10341 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
10342 const SCEV *Less) {
10343 // We avoid subtracting expressions here because this function is usually
10344 // fairly deep in the call stack (i.e. is called many times).
10345
10346 // X - X = 0.
10347 if (More == Less)
10348 return APInt(getTypeSizeInBits(More->getType()), 0);
10349
10350 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
10351 const auto *LAR = cast<SCEVAddRecExpr>(Less);
10352 const auto *MAR = cast<SCEVAddRecExpr>(More);
10353
10354 if (LAR->getLoop() != MAR->getLoop())
10355 return None;
10356
10357 // We look at affine expressions only; not for correctness but to keep
10358 // getStepRecurrence cheap.
10359 if (!LAR->isAffine() || !MAR->isAffine())
10360 return None;
10361
10362 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
10363 return None;
10364
10365 Less = LAR->getStart();
10366 More = MAR->getStart();
10367
10368 // fall through
10369 }
10370
10371 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
10372 const auto &M = cast<SCEVConstant>(More)->getAPInt();
10373 const auto &L = cast<SCEVConstant>(Less)->getAPInt();
10374 return M - L;
10375 }
10376
10377 SCEV::NoWrapFlags Flags;
10378 const SCEV *LLess = nullptr, *RLess = nullptr;
10379 const SCEV *LMore = nullptr, *RMore = nullptr;
10380 const SCEVConstant *C1 = nullptr, *C2 = nullptr;
10381 // Compare (X + C1) vs X.
10382 if (splitBinaryAdd(Less, LLess, RLess, Flags))
10383 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
10384 if (RLess == More)
10385 return -(C1->getAPInt());
10386
10387 // Compare X vs (X + C2).
10388 if (splitBinaryAdd(More, LMore, RMore, Flags))
10389 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
10390 if (RMore == Less)
10391 return C2->getAPInt();
10392
10393 // Compare (X + C1) vs (X + C2).
10394 if (C1 && C2 && RLess == RMore)
10395 return C2->getAPInt() - C1->getAPInt();
10396
10397 return None;
10398 }
10399
isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10400 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
10401 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10402 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) {
10403 // Try to recognize the following pattern:
10404 //
10405 // FoundRHS = ...
10406 // ...
10407 // loop:
10408 // FoundLHS = {Start,+,W}
10409 // context_bb: // Basic block from the same loop
10410 // known(Pred, FoundLHS, FoundRHS)
10411 //
10412 // If some predicate is known in the context of a loop, it is also known on
10413 // each iteration of this loop, including the first iteration. Therefore, in
10414 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
10415 // prove the original pred using this fact.
10416 if (!Context)
10417 return false;
10418 const BasicBlock *ContextBB = Context->getParent();
10419 // Make sure AR varies in the context block.
10420 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
10421 const Loop *L = AR->getLoop();
10422 // Make sure that context belongs to the loop and executes on 1st iteration
10423 // (if it ever executes at all).
10424 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10425 return false;
10426 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
10427 return false;
10428 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
10429 }
10430
10431 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
10432 const Loop *L = AR->getLoop();
10433 // Make sure that context belongs to the loop and executes on 1st iteration
10434 // (if it ever executes at all).
10435 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10436 return false;
10437 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
10438 return false;
10439 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
10440 }
10441
10442 return false;
10443 }
10444
isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10445 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
10446 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10447 const SCEV *FoundLHS, const SCEV *FoundRHS) {
10448 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
10449 return false;
10450
10451 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10452 if (!AddRecLHS)
10453 return false;
10454
10455 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
10456 if (!AddRecFoundLHS)
10457 return false;
10458
10459 // We'd like to let SCEV reason about control dependencies, so we constrain
10460 // both the inequalities to be about add recurrences on the same loop. This
10461 // way we can use isLoopEntryGuardedByCond later.
10462
10463 const Loop *L = AddRecFoundLHS->getLoop();
10464 if (L != AddRecLHS->getLoop())
10465 return false;
10466
10467 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
10468 //
10469 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
10470 // ... (2)
10471 //
10472 // Informal proof for (2), assuming (1) [*]:
10473 //
10474 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
10475 //
10476 // Then
10477 //
10478 // FoundLHS s< FoundRHS s< INT_MIN - C
10479 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
10480 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
10481 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
10482 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
10483 // <=> FoundLHS + C s< FoundRHS + C
10484 //
10485 // [*]: (1) can be proved by ruling out overflow.
10486 //
10487 // [**]: This can be proved by analyzing all the four possibilities:
10488 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
10489 // (A s>= 0, B s>= 0).
10490 //
10491 // Note:
10492 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
10493 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
10494 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
10495 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
10496 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
10497 // C)".
10498
10499 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
10500 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
10501 if (!LDiff || !RDiff || *LDiff != *RDiff)
10502 return false;
10503
10504 if (LDiff->isMinValue())
10505 return true;
10506
10507 APInt FoundRHSLimit;
10508
10509 if (Pred == CmpInst::ICMP_ULT) {
10510 FoundRHSLimit = -(*RDiff);
10511 } else {
10512 assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
10513 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
10514 }
10515
10516 // Try to prove (1) or (2), as needed.
10517 return isAvailableAtLoopEntry(FoundRHS, L) &&
10518 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
10519 getConstant(FoundRHSLimit));
10520 }
10521
isImpliedViaMerge(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,unsigned Depth)10522 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
10523 const SCEV *LHS, const SCEV *RHS,
10524 const SCEV *FoundLHS,
10525 const SCEV *FoundRHS, unsigned Depth) {
10526 const PHINode *LPhi = nullptr, *RPhi = nullptr;
10527
10528 auto ClearOnExit = make_scope_exit([&]() {
10529 if (LPhi) {
10530 bool Erased = PendingMerges.erase(LPhi);
10531 assert(Erased && "Failed to erase LPhi!");
10532 (void)Erased;
10533 }
10534 if (RPhi) {
10535 bool Erased = PendingMerges.erase(RPhi);
10536 assert(Erased && "Failed to erase RPhi!");
10537 (void)Erased;
10538 }
10539 });
10540
10541 // Find respective Phis and check that they are not being pending.
10542 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
10543 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
10544 if (!PendingMerges.insert(Phi).second)
10545 return false;
10546 LPhi = Phi;
10547 }
10548 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
10549 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
10550 // If we detect a loop of Phi nodes being processed by this method, for
10551 // example:
10552 //
10553 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
10554 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
10555 //
10556 // we don't want to deal with a case that complex, so return conservative
10557 // answer false.
10558 if (!PendingMerges.insert(Phi).second)
10559 return false;
10560 RPhi = Phi;
10561 }
10562
10563 // If none of LHS, RHS is a Phi, nothing to do here.
10564 if (!LPhi && !RPhi)
10565 return false;
10566
10567 // If there is a SCEVUnknown Phi we are interested in, make it left.
10568 if (!LPhi) {
10569 std::swap(LHS, RHS);
10570 std::swap(FoundLHS, FoundRHS);
10571 std::swap(LPhi, RPhi);
10572 Pred = ICmpInst::getSwappedPredicate(Pred);
10573 }
10574
10575 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
10576 const BasicBlock *LBB = LPhi->getParent();
10577 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
10578
10579 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
10580 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
10581 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
10582 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
10583 };
10584
10585 if (RPhi && RPhi->getParent() == LBB) {
10586 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
10587 // If we compare two Phis from the same block, and for each entry block
10588 // the predicate is true for incoming values from this block, then the
10589 // predicate is also true for the Phis.
10590 for (const BasicBlock *IncBB : predecessors(LBB)) {
10591 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
10592 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
10593 if (!ProvedEasily(L, R))
10594 return false;
10595 }
10596 } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
10597 // Case two: RHS is also a Phi from the same basic block, and it is an
10598 // AddRec. It means that there is a loop which has both AddRec and Unknown
10599 // PHIs, for it we can compare incoming values of AddRec from above the loop
10600 // and latch with their respective incoming values of LPhi.
10601 // TODO: Generalize to handle loops with many inputs in a header.
10602 if (LPhi->getNumIncomingValues() != 2) return false;
10603
10604 auto *RLoop = RAR->getLoop();
10605 auto *Predecessor = RLoop->getLoopPredecessor();
10606 assert(Predecessor && "Loop with AddRec with no predecessor?");
10607 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
10608 if (!ProvedEasily(L1, RAR->getStart()))
10609 return false;
10610 auto *Latch = RLoop->getLoopLatch();
10611 assert(Latch && "Loop with AddRec with no latch?");
10612 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
10613 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
10614 return false;
10615 } else {
10616 // In all other cases go over inputs of LHS and compare each of them to RHS,
10617 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
10618 // At this point RHS is either a non-Phi, or it is a Phi from some block
10619 // different from LBB.
10620 for (const BasicBlock *IncBB : predecessors(LBB)) {
10621 // Check that RHS is available in this block.
10622 if (!dominates(RHS, IncBB))
10623 return false;
10624 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
10625 // Make sure L does not refer to a value from a potentially previous
10626 // iteration of a loop.
10627 if (!properlyDominates(L, IncBB))
10628 return false;
10629 if (!ProvedEasily(L, RHS))
10630 return false;
10631 }
10632 }
10633 return true;
10634 }
10635
isImpliedCondOperands(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10636 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
10637 const SCEV *LHS, const SCEV *RHS,
10638 const SCEV *FoundLHS,
10639 const SCEV *FoundRHS,
10640 const Instruction *Context) {
10641 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
10642 return true;
10643
10644 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
10645 return true;
10646
10647 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
10648 Context))
10649 return true;
10650
10651 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
10652 FoundLHS, FoundRHS) ||
10653 // ~x < ~y --> x > y
10654 isImpliedCondOperandsHelper(Pred, LHS, RHS,
10655 getNotSCEV(FoundRHS),
10656 getNotSCEV(FoundLHS));
10657 }
10658
10659 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
10660 template <typename MinMaxExprType>
IsMinMaxConsistingOf(const SCEV * MaybeMinMaxExpr,const SCEV * Candidate)10661 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
10662 const SCEV *Candidate) {
10663 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
10664 if (!MinMaxExpr)
10665 return false;
10666
10667 return is_contained(MinMaxExpr->operands(), Candidate);
10668 }
10669
IsKnownPredicateViaAddRecStart(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10670 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
10671 ICmpInst::Predicate Pred,
10672 const SCEV *LHS, const SCEV *RHS) {
10673 // If both sides are affine addrecs for the same loop, with equal
10674 // steps, and we know the recurrences don't wrap, then we only
10675 // need to check the predicate on the starting values.
10676
10677 if (!ICmpInst::isRelational(Pred))
10678 return false;
10679
10680 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
10681 if (!LAR)
10682 return false;
10683 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
10684 if (!RAR)
10685 return false;
10686 if (LAR->getLoop() != RAR->getLoop())
10687 return false;
10688 if (!LAR->isAffine() || !RAR->isAffine())
10689 return false;
10690
10691 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
10692 return false;
10693
10694 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
10695 SCEV::FlagNSW : SCEV::FlagNUW;
10696 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
10697 return false;
10698
10699 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
10700 }
10701
10702 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
10703 /// expression?
IsKnownPredicateViaMinOrMax(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10704 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
10705 ICmpInst::Predicate Pred,
10706 const SCEV *LHS, const SCEV *RHS) {
10707 switch (Pred) {
10708 default:
10709 return false;
10710
10711 case ICmpInst::ICMP_SGE:
10712 std::swap(LHS, RHS);
10713 LLVM_FALLTHROUGH;
10714 case ICmpInst::ICMP_SLE:
10715 return
10716 // min(A, ...) <= A
10717 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
10718 // A <= max(A, ...)
10719 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
10720
10721 case ICmpInst::ICMP_UGE:
10722 std::swap(LHS, RHS);
10723 LLVM_FALLTHROUGH;
10724 case ICmpInst::ICMP_ULE:
10725 return
10726 // min(A, ...) <= A
10727 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
10728 // A <= max(A, ...)
10729 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
10730 }
10731
10732 llvm_unreachable("covered switch fell through?!");
10733 }
10734
isImpliedViaOperations(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,unsigned Depth)10735 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
10736 const SCEV *LHS, const SCEV *RHS,
10737 const SCEV *FoundLHS,
10738 const SCEV *FoundRHS,
10739 unsigned Depth) {
10740 assert(getTypeSizeInBits(LHS->getType()) ==
10741 getTypeSizeInBits(RHS->getType()) &&
10742 "LHS and RHS have different sizes?");
10743 assert(getTypeSizeInBits(FoundLHS->getType()) ==
10744 getTypeSizeInBits(FoundRHS->getType()) &&
10745 "FoundLHS and FoundRHS have different sizes?");
10746 // We want to avoid hurting the compile time with analysis of too big trees.
10747 if (Depth > MaxSCEVOperationsImplicationDepth)
10748 return false;
10749
10750 // We only want to work with GT comparison so far.
10751 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) {
10752 Pred = CmpInst::getSwappedPredicate(Pred);
10753 std::swap(LHS, RHS);
10754 std::swap(FoundLHS, FoundRHS);
10755 }
10756
10757 // For unsigned, try to reduce it to corresponding signed comparison.
10758 if (Pred == ICmpInst::ICMP_UGT)
10759 // We can replace unsigned predicate with its signed counterpart if all
10760 // involved values are non-negative.
10761 // TODO: We could have better support for unsigned.
10762 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
10763 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
10764 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
10765 // use this fact to prove that LHS and RHS are non-negative.
10766 const SCEV *MinusOne = getMinusOne(LHS->getType());
10767 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
10768 FoundRHS) &&
10769 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
10770 FoundRHS))
10771 Pred = ICmpInst::ICMP_SGT;
10772 }
10773
10774 if (Pred != ICmpInst::ICMP_SGT)
10775 return false;
10776
10777 auto GetOpFromSExt = [&](const SCEV *S) {
10778 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
10779 return Ext->getOperand();
10780 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
10781 // the constant in some cases.
10782 return S;
10783 };
10784
10785 // Acquire values from extensions.
10786 auto *OrigLHS = LHS;
10787 auto *OrigFoundLHS = FoundLHS;
10788 LHS = GetOpFromSExt(LHS);
10789 FoundLHS = GetOpFromSExt(FoundLHS);
10790
10791 // Is the SGT predicate can be proved trivially or using the found context.
10792 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
10793 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
10794 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
10795 FoundRHS, Depth + 1);
10796 };
10797
10798 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
10799 // We want to avoid creation of any new non-constant SCEV. Since we are
10800 // going to compare the operands to RHS, we should be certain that we don't
10801 // need any size extensions for this. So let's decline all cases when the
10802 // sizes of types of LHS and RHS do not match.
10803 // TODO: Maybe try to get RHS from sext to catch more cases?
10804 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
10805 return false;
10806
10807 // Should not overflow.
10808 if (!LHSAddExpr->hasNoSignedWrap())
10809 return false;
10810
10811 auto *LL = LHSAddExpr->getOperand(0);
10812 auto *LR = LHSAddExpr->getOperand(1);
10813 auto *MinusOne = getMinusOne(RHS->getType());
10814
10815 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
10816 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
10817 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
10818 };
10819 // Try to prove the following rule:
10820 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
10821 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
10822 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
10823 return true;
10824 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
10825 Value *LL, *LR;
10826 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
10827
10828 using namespace llvm::PatternMatch;
10829
10830 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
10831 // Rules for division.
10832 // We are going to perform some comparisons with Denominator and its
10833 // derivative expressions. In general case, creating a SCEV for it may
10834 // lead to a complex analysis of the entire graph, and in particular it
10835 // can request trip count recalculation for the same loop. This would
10836 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
10837 // this, we only want to create SCEVs that are constants in this section.
10838 // So we bail if Denominator is not a constant.
10839 if (!isa<ConstantInt>(LR))
10840 return false;
10841
10842 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
10843
10844 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
10845 // then a SCEV for the numerator already exists and matches with FoundLHS.
10846 auto *Numerator = getExistingSCEV(LL);
10847 if (!Numerator || Numerator->getType() != FoundLHS->getType())
10848 return false;
10849
10850 // Make sure that the numerator matches with FoundLHS and the denominator
10851 // is positive.
10852 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
10853 return false;
10854
10855 auto *DTy = Denominator->getType();
10856 auto *FRHSTy = FoundRHS->getType();
10857 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
10858 // One of types is a pointer and another one is not. We cannot extend
10859 // them properly to a wider type, so let us just reject this case.
10860 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
10861 // to avoid this check.
10862 return false;
10863
10864 // Given that:
10865 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
10866 auto *WTy = getWiderType(DTy, FRHSTy);
10867 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
10868 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
10869
10870 // Try to prove the following rule:
10871 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
10872 // For example, given that FoundLHS > 2. It means that FoundLHS is at
10873 // least 3. If we divide it by Denominator < 4, we will have at least 1.
10874 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
10875 if (isKnownNonPositive(RHS) &&
10876 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
10877 return true;
10878
10879 // Try to prove the following rule:
10880 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
10881 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
10882 // If we divide it by Denominator > 2, then:
10883 // 1. If FoundLHS is negative, then the result is 0.
10884 // 2. If FoundLHS is non-negative, then the result is non-negative.
10885 // Anyways, the result is non-negative.
10886 auto *MinusOne = getMinusOne(WTy);
10887 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
10888 if (isKnownNegative(RHS) &&
10889 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
10890 return true;
10891 }
10892 }
10893
10894 // If our expression contained SCEVUnknown Phis, and we split it down and now
10895 // need to prove something for them, try to prove the predicate for every
10896 // possible incoming values of those Phis.
10897 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
10898 return true;
10899
10900 return false;
10901 }
10902
isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10903 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
10904 const SCEV *LHS, const SCEV *RHS) {
10905 // zext x u<= sext x, sext x s<= zext x
10906 switch (Pred) {
10907 case ICmpInst::ICMP_SGE:
10908 std::swap(LHS, RHS);
10909 LLVM_FALLTHROUGH;
10910 case ICmpInst::ICMP_SLE: {
10911 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
10912 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
10913 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
10914 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
10915 return true;
10916 break;
10917 }
10918 case ICmpInst::ICMP_UGE:
10919 std::swap(LHS, RHS);
10920 LLVM_FALLTHROUGH;
10921 case ICmpInst::ICMP_ULE: {
10922 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
10923 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
10924 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
10925 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
10926 return true;
10927 break;
10928 }
10929 default:
10930 break;
10931 };
10932 return false;
10933 }
10934
10935 bool
isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10936 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
10937 const SCEV *LHS, const SCEV *RHS) {
10938 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
10939 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
10940 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
10941 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
10942 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
10943 }
10944
10945 bool
isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10946 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
10947 const SCEV *LHS, const SCEV *RHS,
10948 const SCEV *FoundLHS,
10949 const SCEV *FoundRHS) {
10950 switch (Pred) {
10951 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
10952 case ICmpInst::ICMP_EQ:
10953 case ICmpInst::ICMP_NE:
10954 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
10955 return true;
10956 break;
10957 case ICmpInst::ICMP_SLT:
10958 case ICmpInst::ICMP_SLE:
10959 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
10960 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
10961 return true;
10962 break;
10963 case ICmpInst::ICMP_SGT:
10964 case ICmpInst::ICMP_SGE:
10965 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
10966 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
10967 return true;
10968 break;
10969 case ICmpInst::ICMP_ULT:
10970 case ICmpInst::ICMP_ULE:
10971 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
10972 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
10973 return true;
10974 break;
10975 case ICmpInst::ICMP_UGT:
10976 case ICmpInst::ICMP_UGE:
10977 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
10978 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
10979 return true;
10980 break;
10981 }
10982
10983 // Maybe it can be proved via operations?
10984 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
10985 return true;
10986
10987 return false;
10988 }
10989
isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10990 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
10991 const SCEV *LHS,
10992 const SCEV *RHS,
10993 const SCEV *FoundLHS,
10994 const SCEV *FoundRHS) {
10995 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
10996 // The restriction on `FoundRHS` be lifted easily -- it exists only to
10997 // reduce the compile time impact of this optimization.
10998 return false;
10999
11000 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
11001 if (!Addend)
11002 return false;
11003
11004 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
11005
11006 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
11007 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
11008 ConstantRange FoundLHSRange =
11009 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
11010
11011 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
11012 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
11013
11014 // We can also compute the range of values for `LHS` that satisfy the
11015 // consequent, "`LHS` `Pred` `RHS`":
11016 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
11017 ConstantRange SatisfyingLHSRange =
11018 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
11019
11020 // The antecedent implies the consequent if every value of `LHS` that
11021 // satisfies the antecedent also satisfies the consequent.
11022 return SatisfyingLHSRange.contains(LHSRange);
11023 }
11024
doesIVOverflowOnLT(const SCEV * RHS,const SCEV * Stride,bool IsSigned,bool NoWrap)11025 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
11026 bool IsSigned, bool NoWrap) {
11027 assert(isKnownPositive(Stride) && "Positive stride expected!");
11028
11029 if (NoWrap) return false;
11030
11031 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11032 const SCEV *One = getOne(Stride->getType());
11033
11034 if (IsSigned) {
11035 APInt MaxRHS = getSignedRangeMax(RHS);
11036 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
11037 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11038
11039 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
11040 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
11041 }
11042
11043 APInt MaxRHS = getUnsignedRangeMax(RHS);
11044 APInt MaxValue = APInt::getMaxValue(BitWidth);
11045 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11046
11047 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
11048 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
11049 }
11050
doesIVOverflowOnGT(const SCEV * RHS,const SCEV * Stride,bool IsSigned,bool NoWrap)11051 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
11052 bool IsSigned, bool NoWrap) {
11053 if (NoWrap) return false;
11054
11055 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11056 const SCEV *One = getOne(Stride->getType());
11057
11058 if (IsSigned) {
11059 APInt MinRHS = getSignedRangeMin(RHS);
11060 APInt MinValue = APInt::getSignedMinValue(BitWidth);
11061 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11062
11063 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
11064 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
11065 }
11066
11067 APInt MinRHS = getUnsignedRangeMin(RHS);
11068 APInt MinValue = APInt::getMinValue(BitWidth);
11069 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11070
11071 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
11072 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
11073 }
11074
computeBECount(const SCEV * Delta,const SCEV * Step,bool Equality)11075 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
11076 bool Equality) {
11077 const SCEV *One = getOne(Step->getType());
11078 Delta = Equality ? getAddExpr(Delta, Step)
11079 : getAddExpr(Delta, getMinusSCEV(Step, One));
11080 return getUDivExpr(Delta, Step);
11081 }
11082
computeMaxBECountForLT(const SCEV * Start,const SCEV * Stride,const SCEV * End,unsigned BitWidth,bool IsSigned)11083 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
11084 const SCEV *Stride,
11085 const SCEV *End,
11086 unsigned BitWidth,
11087 bool IsSigned) {
11088
11089 assert(!isKnownNonPositive(Stride) &&
11090 "Stride is expected strictly positive!");
11091 // Calculate the maximum backedge count based on the range of values
11092 // permitted by Start, End, and Stride.
11093 const SCEV *MaxBECount;
11094 APInt MinStart =
11095 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
11096
11097 APInt StrideForMaxBECount =
11098 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
11099
11100 // We already know that the stride is positive, so we paper over conservatism
11101 // in our range computation by forcing StrideForMaxBECount to be at least one.
11102 // In theory this is unnecessary, but we expect MaxBECount to be a
11103 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
11104 // is nothing to constant fold it to).
11105 APInt One(BitWidth, 1, IsSigned);
11106 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount);
11107
11108 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
11109 : APInt::getMaxValue(BitWidth);
11110 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
11111
11112 // Although End can be a MAX expression we estimate MaxEnd considering only
11113 // the case End = RHS of the loop termination condition. This is safe because
11114 // in the other case (End - Start) is zero, leading to a zero maximum backedge
11115 // taken count.
11116 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
11117 : APIntOps::umin(getUnsignedRangeMax(End), Limit);
11118
11119 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */,
11120 getConstant(StrideForMaxBECount) /* Step */,
11121 false /* Equality */);
11122
11123 return MaxBECount;
11124 }
11125
11126 ScalarEvolution::ExitLimit
howManyLessThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit,bool AllowPredicates)11127 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
11128 const Loop *L, bool IsSigned,
11129 bool ControlsExit, bool AllowPredicates) {
11130 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11131
11132 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11133 bool PredicatedIV = false;
11134
11135 if (!IV && AllowPredicates) {
11136 // Try to make this an AddRec using runtime tests, in the first X
11137 // iterations of this loop, where X is the SCEV expression found by the
11138 // algorithm below.
11139 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11140 PredicatedIV = true;
11141 }
11142
11143 // Avoid weird loops
11144 if (!IV || IV->getLoop() != L || !IV->isAffine())
11145 return getCouldNotCompute();
11146
11147 bool NoWrap = ControlsExit &&
11148 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
11149
11150 const SCEV *Stride = IV->getStepRecurrence(*this);
11151
11152 bool PositiveStride = isKnownPositive(Stride);
11153
11154 // Avoid negative or zero stride values.
11155 if (!PositiveStride) {
11156 // We can compute the correct backedge taken count for loops with unknown
11157 // strides if we can prove that the loop is not an infinite loop with side
11158 // effects. Here's the loop structure we are trying to handle -
11159 //
11160 // i = start
11161 // do {
11162 // A[i] = i;
11163 // i += s;
11164 // } while (i < end);
11165 //
11166 // The backedge taken count for such loops is evaluated as -
11167 // (max(end, start + stride) - start - 1) /u stride
11168 //
11169 // The additional preconditions that we need to check to prove correctness
11170 // of the above formula is as follows -
11171 //
11172 // a) IV is either nuw or nsw depending upon signedness (indicated by the
11173 // NoWrap flag).
11174 // b) loop is single exit with no side effects.
11175 //
11176 //
11177 // Precondition a) implies that if the stride is negative, this is a single
11178 // trip loop. The backedge taken count formula reduces to zero in this case.
11179 //
11180 // Precondition b) implies that the unknown stride cannot be zero otherwise
11181 // we have UB.
11182 //
11183 // The positive stride case is the same as isKnownPositive(Stride) returning
11184 // true (original behavior of the function).
11185 //
11186 // We want to make sure that the stride is truly unknown as there are edge
11187 // cases where ScalarEvolution propagates no wrap flags to the
11188 // post-increment/decrement IV even though the increment/decrement operation
11189 // itself is wrapping. The computed backedge taken count may be wrong in
11190 // such cases. This is prevented by checking that the stride is not known to
11191 // be either positive or non-positive. For example, no wrap flags are
11192 // propagated to the post-increment IV of this loop with a trip count of 2 -
11193 //
11194 // unsigned char i;
11195 // for(i=127; i<128; i+=129)
11196 // A[i] = i;
11197 //
11198 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
11199 !loopHasNoSideEffects(L))
11200 return getCouldNotCompute();
11201 } else if (!Stride->isOne() &&
11202 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
11203 // Avoid proven overflow cases: this will ensure that the backedge taken
11204 // count will not generate any unsigned overflow. Relaxed no-overflow
11205 // conditions exploit NoWrapFlags, allowing to optimize in presence of
11206 // undefined behaviors like the case of C language.
11207 return getCouldNotCompute();
11208
11209 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
11210 : ICmpInst::ICMP_ULT;
11211 const SCEV *Start = IV->getStart();
11212 const SCEV *End = RHS;
11213 // When the RHS is not invariant, we do not know the end bound of the loop and
11214 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
11215 // calculate the MaxBECount, given the start, stride and max value for the end
11216 // bound of the loop (RHS), and the fact that IV does not overflow (which is
11217 // checked above).
11218 if (!isLoopInvariant(RHS, L)) {
11219 const SCEV *MaxBECount = computeMaxBECountForLT(
11220 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11221 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
11222 false /*MaxOrZero*/, Predicates);
11223 }
11224 // If the backedge is taken at least once, then it will be taken
11225 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
11226 // is the LHS value of the less-than comparison the first time it is evaluated
11227 // and End is the RHS.
11228 const SCEV *BECountIfBackedgeTaken =
11229 computeBECount(getMinusSCEV(End, Start), Stride, false);
11230 // If the loop entry is guarded by the result of the backedge test of the
11231 // first loop iteration, then we know the backedge will be taken at least
11232 // once and so the backedge taken count is as above. If not then we use the
11233 // expression (max(End,Start)-Start)/Stride to describe the backedge count,
11234 // as if the backedge is taken at least once max(End,Start) is End and so the
11235 // result is as above, and if not max(End,Start) is Start so we get a backedge
11236 // count of zero.
11237 const SCEV *BECount;
11238 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
11239 BECount = BECountIfBackedgeTaken;
11240 else {
11241 // If we know that RHS >= Start in the context of loop, then we know that
11242 // max(RHS, Start) = RHS at this point.
11243 if (isLoopEntryGuardedByCond(
11244 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start))
11245 End = RHS;
11246 else
11247 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
11248 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
11249 }
11250
11251 const SCEV *MaxBECount;
11252 bool MaxOrZero = false;
11253 if (isa<SCEVConstant>(BECount))
11254 MaxBECount = BECount;
11255 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) {
11256 // If we know exactly how many times the backedge will be taken if it's
11257 // taken at least once, then the backedge count will either be that or
11258 // zero.
11259 MaxBECount = BECountIfBackedgeTaken;
11260 MaxOrZero = true;
11261 } else {
11262 MaxBECount = computeMaxBECountForLT(
11263 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11264 }
11265
11266 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
11267 !isa<SCEVCouldNotCompute>(BECount))
11268 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
11269
11270 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
11271 }
11272
11273 ScalarEvolution::ExitLimit
howManyGreaterThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit,bool AllowPredicates)11274 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
11275 const Loop *L, bool IsSigned,
11276 bool ControlsExit, bool AllowPredicates) {
11277 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11278 // We handle only IV > Invariant
11279 if (!isLoopInvariant(RHS, L))
11280 return getCouldNotCompute();
11281
11282 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11283 if (!IV && AllowPredicates)
11284 // Try to make this an AddRec using runtime tests, in the first X
11285 // iterations of this loop, where X is the SCEV expression found by the
11286 // algorithm below.
11287 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11288
11289 // Avoid weird loops
11290 if (!IV || IV->getLoop() != L || !IV->isAffine())
11291 return getCouldNotCompute();
11292
11293 bool NoWrap = ControlsExit &&
11294 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
11295
11296 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
11297
11298 // Avoid negative or zero stride values
11299 if (!isKnownPositive(Stride))
11300 return getCouldNotCompute();
11301
11302 // Avoid proven overflow cases: this will ensure that the backedge taken count
11303 // will not generate any unsigned overflow. Relaxed no-overflow conditions
11304 // exploit NoWrapFlags, allowing to optimize in presence of undefined
11305 // behaviors like the case of C language.
11306 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
11307 return getCouldNotCompute();
11308
11309 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
11310 : ICmpInst::ICMP_UGT;
11311
11312 const SCEV *Start = IV->getStart();
11313 const SCEV *End = RHS;
11314 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
11315 // If we know that Start >= RHS in the context of loop, then we know that
11316 // min(RHS, Start) = RHS at this point.
11317 if (isLoopEntryGuardedByCond(
11318 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
11319 End = RHS;
11320 else
11321 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
11322 }
11323
11324 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
11325
11326 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
11327 : getUnsignedRangeMax(Start);
11328
11329 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
11330 : getUnsignedRangeMin(Stride);
11331
11332 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
11333 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
11334 : APInt::getMinValue(BitWidth) + (MinStride - 1);
11335
11336 // Although End can be a MIN expression we estimate MinEnd considering only
11337 // the case End = RHS. This is safe because in the other case (Start - End)
11338 // is zero, leading to a zero maximum backedge taken count.
11339 APInt MinEnd =
11340 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
11341 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
11342
11343 const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
11344 ? BECount
11345 : computeBECount(getConstant(MaxStart - MinEnd),
11346 getConstant(MinStride), false);
11347
11348 if (isa<SCEVCouldNotCompute>(MaxBECount))
11349 MaxBECount = BECount;
11350
11351 return ExitLimit(BECount, MaxBECount, false, Predicates);
11352 }
11353
getNumIterationsInRange(const ConstantRange & Range,ScalarEvolution & SE) const11354 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
11355 ScalarEvolution &SE) const {
11356 if (Range.isFullSet()) // Infinite loop.
11357 return SE.getCouldNotCompute();
11358
11359 // If the start is a non-zero constant, shift the range to simplify things.
11360 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
11361 if (!SC->getValue()->isZero()) {
11362 SmallVector<const SCEV *, 4> Operands(operands());
11363 Operands[0] = SE.getZero(SC->getType());
11364 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
11365 getNoWrapFlags(FlagNW));
11366 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
11367 return ShiftedAddRec->getNumIterationsInRange(
11368 Range.subtract(SC->getAPInt()), SE);
11369 // This is strange and shouldn't happen.
11370 return SE.getCouldNotCompute();
11371 }
11372
11373 // The only time we can solve this is when we have all constant indices.
11374 // Otherwise, we cannot determine the overflow conditions.
11375 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
11376 return SE.getCouldNotCompute();
11377
11378 // Okay at this point we know that all elements of the chrec are constants and
11379 // that the start element is zero.
11380
11381 // First check to see if the range contains zero. If not, the first
11382 // iteration exits.
11383 unsigned BitWidth = SE.getTypeSizeInBits(getType());
11384 if (!Range.contains(APInt(BitWidth, 0)))
11385 return SE.getZero(getType());
11386
11387 if (isAffine()) {
11388 // If this is an affine expression then we have this situation:
11389 // Solve {0,+,A} in Range === Ax in Range
11390
11391 // We know that zero is in the range. If A is positive then we know that
11392 // the upper value of the range must be the first possible exit value.
11393 // If A is negative then the lower of the range is the last possible loop
11394 // value. Also note that we already checked for a full range.
11395 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
11396 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
11397
11398 // The exit value should be (End+A)/A.
11399 APInt ExitVal = (End + A).udiv(A);
11400 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
11401
11402 // Evaluate at the exit value. If we really did fall out of the valid
11403 // range, then we computed our trip count, otherwise wrap around or other
11404 // things must have happened.
11405 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
11406 if (Range.contains(Val->getValue()))
11407 return SE.getCouldNotCompute(); // Something strange happened
11408
11409 // Ensure that the previous value is in the range. This is a sanity check.
11410 assert(Range.contains(
11411 EvaluateConstantChrecAtConstant(this,
11412 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
11413 "Linear scev computation is off in a bad way!");
11414 return SE.getConstant(ExitValue);
11415 }
11416
11417 if (isQuadratic()) {
11418 if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
11419 return SE.getConstant(S.getValue());
11420 }
11421
11422 return SE.getCouldNotCompute();
11423 }
11424
11425 const SCEVAddRecExpr *
getPostIncExpr(ScalarEvolution & SE) const11426 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
11427 assert(getNumOperands() > 1 && "AddRec with zero step?");
11428 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
11429 // but in this case we cannot guarantee that the value returned will be an
11430 // AddRec because SCEV does not have a fixed point where it stops
11431 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
11432 // may happen if we reach arithmetic depth limit while simplifying. So we
11433 // construct the returned value explicitly.
11434 SmallVector<const SCEV *, 3> Ops;
11435 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
11436 // (this + Step) is {A+B,+,B+C,+...,+,N}.
11437 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
11438 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
11439 // We know that the last operand is not a constant zero (otherwise it would
11440 // have been popped out earlier). This guarantees us that if the result has
11441 // the same last operand, then it will also not be popped out, meaning that
11442 // the returned value will be an AddRec.
11443 const SCEV *Last = getOperand(getNumOperands() - 1);
11444 assert(!Last->isZero() && "Recurrency with zero step?");
11445 Ops.push_back(Last);
11446 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
11447 SCEV::FlagAnyWrap));
11448 }
11449
11450 // Return true when S contains at least an undef value.
containsUndefs(const SCEV * S)11451 static inline bool containsUndefs(const SCEV *S) {
11452 return SCEVExprContains(S, [](const SCEV *S) {
11453 if (const auto *SU = dyn_cast<SCEVUnknown>(S))
11454 return isa<UndefValue>(SU->getValue());
11455 return false;
11456 });
11457 }
11458
11459 namespace {
11460
11461 // Collect all steps of SCEV expressions.
11462 struct SCEVCollectStrides {
11463 ScalarEvolution &SE;
11464 SmallVectorImpl<const SCEV *> &Strides;
11465
SCEVCollectStrides__anon2e4d85963011::SCEVCollectStrides11466 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
11467 : SE(SE), Strides(S) {}
11468
follow__anon2e4d85963011::SCEVCollectStrides11469 bool follow(const SCEV *S) {
11470 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
11471 Strides.push_back(AR->getStepRecurrence(SE));
11472 return true;
11473 }
11474
isDone__anon2e4d85963011::SCEVCollectStrides11475 bool isDone() const { return false; }
11476 };
11477
11478 // Collect all SCEVUnknown and SCEVMulExpr expressions.
11479 struct SCEVCollectTerms {
11480 SmallVectorImpl<const SCEV *> &Terms;
11481
SCEVCollectTerms__anon2e4d85963011::SCEVCollectTerms11482 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {}
11483
follow__anon2e4d85963011::SCEVCollectTerms11484 bool follow(const SCEV *S) {
11485 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
11486 isa<SCEVSignExtendExpr>(S)) {
11487 if (!containsUndefs(S))
11488 Terms.push_back(S);
11489
11490 // Stop recursion: once we collected a term, do not walk its operands.
11491 return false;
11492 }
11493
11494 // Keep looking.
11495 return true;
11496 }
11497
isDone__anon2e4d85963011::SCEVCollectTerms11498 bool isDone() const { return false; }
11499 };
11500
11501 // Check if a SCEV contains an AddRecExpr.
11502 struct SCEVHasAddRec {
11503 bool &ContainsAddRec;
11504
SCEVHasAddRec__anon2e4d85963011::SCEVHasAddRec11505 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
11506 ContainsAddRec = false;
11507 }
11508
follow__anon2e4d85963011::SCEVHasAddRec11509 bool follow(const SCEV *S) {
11510 if (isa<SCEVAddRecExpr>(S)) {
11511 ContainsAddRec = true;
11512
11513 // Stop recursion: once we collected a term, do not walk its operands.
11514 return false;
11515 }
11516
11517 // Keep looking.
11518 return true;
11519 }
11520
isDone__anon2e4d85963011::SCEVHasAddRec11521 bool isDone() const { return false; }
11522 };
11523
11524 // Find factors that are multiplied with an expression that (possibly as a
11525 // subexpression) contains an AddRecExpr. In the expression:
11526 //
11527 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
11528 //
11529 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
11530 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
11531 // parameters as they form a product with an induction variable.
11532 //
11533 // This collector expects all array size parameters to be in the same MulExpr.
11534 // It might be necessary to later add support for collecting parameters that are
11535 // spread over different nested MulExpr.
11536 struct SCEVCollectAddRecMultiplies {
11537 SmallVectorImpl<const SCEV *> &Terms;
11538 ScalarEvolution &SE;
11539
SCEVCollectAddRecMultiplies__anon2e4d85963011::SCEVCollectAddRecMultiplies11540 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE)
11541 : Terms(T), SE(SE) {}
11542
follow__anon2e4d85963011::SCEVCollectAddRecMultiplies11543 bool follow(const SCEV *S) {
11544 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
11545 bool HasAddRec = false;
11546 SmallVector<const SCEV *, 0> Operands;
11547 for (auto Op : Mul->operands()) {
11548 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
11549 if (Unknown && !isa<CallInst>(Unknown->getValue())) {
11550 Operands.push_back(Op);
11551 } else if (Unknown) {
11552 HasAddRec = true;
11553 } else {
11554 bool ContainsAddRec = false;
11555 SCEVHasAddRec ContiansAddRec(ContainsAddRec);
11556 visitAll(Op, ContiansAddRec);
11557 HasAddRec |= ContainsAddRec;
11558 }
11559 }
11560 if (Operands.size() == 0)
11561 return true;
11562
11563 if (!HasAddRec)
11564 return false;
11565
11566 Terms.push_back(SE.getMulExpr(Operands));
11567 // Stop recursion: once we collected a term, do not walk its operands.
11568 return false;
11569 }
11570
11571 // Keep looking.
11572 return true;
11573 }
11574
isDone__anon2e4d85963011::SCEVCollectAddRecMultiplies11575 bool isDone() const { return false; }
11576 };
11577
11578 } // end anonymous namespace
11579
11580 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
11581 /// two places:
11582 /// 1) The strides of AddRec expressions.
11583 /// 2) Unknowns that are multiplied with AddRec expressions.
collectParametricTerms(const SCEV * Expr,SmallVectorImpl<const SCEV * > & Terms)11584 void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
11585 SmallVectorImpl<const SCEV *> &Terms) {
11586 SmallVector<const SCEV *, 4> Strides;
11587 SCEVCollectStrides StrideCollector(*this, Strides);
11588 visitAll(Expr, StrideCollector);
11589
11590 LLVM_DEBUG({
11591 dbgs() << "Strides:\n";
11592 for (const SCEV *S : Strides)
11593 dbgs() << *S << "\n";
11594 });
11595
11596 for (const SCEV *S : Strides) {
11597 SCEVCollectTerms TermCollector(Terms);
11598 visitAll(S, TermCollector);
11599 }
11600
11601 LLVM_DEBUG({
11602 dbgs() << "Terms:\n";
11603 for (const SCEV *T : Terms)
11604 dbgs() << *T << "\n";
11605 });
11606
11607 SCEVCollectAddRecMultiplies MulCollector(Terms, *this);
11608 visitAll(Expr, MulCollector);
11609 }
11610
findArrayDimensionsRec(ScalarEvolution & SE,SmallVectorImpl<const SCEV * > & Terms,SmallVectorImpl<const SCEV * > & Sizes)11611 static bool findArrayDimensionsRec(ScalarEvolution &SE,
11612 SmallVectorImpl<const SCEV *> &Terms,
11613 SmallVectorImpl<const SCEV *> &Sizes) {
11614 int Last = Terms.size() - 1;
11615 const SCEV *Step = Terms[Last];
11616
11617 // End of recursion.
11618 if (Last == 0) {
11619 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
11620 SmallVector<const SCEV *, 2> Qs;
11621 for (const SCEV *Op : M->operands())
11622 if (!isa<SCEVConstant>(Op))
11623 Qs.push_back(Op);
11624
11625 Step = SE.getMulExpr(Qs);
11626 }
11627
11628 Sizes.push_back(Step);
11629 return true;
11630 }
11631
11632 for (const SCEV *&Term : Terms) {
11633 // Normalize the terms before the next call to findArrayDimensionsRec.
11634 const SCEV *Q, *R;
11635 SCEVDivision::divide(SE, Term, Step, &Q, &R);
11636
11637 // Bail out when GCD does not evenly divide one of the terms.
11638 if (!R->isZero())
11639 return false;
11640
11641 Term = Q;
11642 }
11643
11644 // Remove all SCEVConstants.
11645 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); });
11646
11647 if (Terms.size() > 0)
11648 if (!findArrayDimensionsRec(SE, Terms, Sizes))
11649 return false;
11650
11651 Sizes.push_back(Step);
11652 return true;
11653 }
11654
11655 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
containsParameters(SmallVectorImpl<const SCEV * > & Terms)11656 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
11657 for (const SCEV *T : Terms)
11658 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); }))
11659 return true;
11660
11661 return false;
11662 }
11663
11664 // Return the number of product terms in S.
numberOfTerms(const SCEV * S)11665 static inline int numberOfTerms(const SCEV *S) {
11666 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
11667 return Expr->getNumOperands();
11668 return 1;
11669 }
11670
removeConstantFactors(ScalarEvolution & SE,const SCEV * T)11671 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
11672 if (isa<SCEVConstant>(T))
11673 return nullptr;
11674
11675 if (isa<SCEVUnknown>(T))
11676 return T;
11677
11678 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
11679 SmallVector<const SCEV *, 2> Factors;
11680 for (const SCEV *Op : M->operands())
11681 if (!isa<SCEVConstant>(Op))
11682 Factors.push_back(Op);
11683
11684 return SE.getMulExpr(Factors);
11685 }
11686
11687 return T;
11688 }
11689
11690 /// Return the size of an element read or written by Inst.
getElementSize(Instruction * Inst)11691 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
11692 Type *Ty;
11693 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
11694 Ty = Store->getValueOperand()->getType();
11695 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
11696 Ty = Load->getType();
11697 else
11698 return nullptr;
11699
11700 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
11701 return getSizeOfExpr(ETy, Ty);
11702 }
11703
findArrayDimensions(SmallVectorImpl<const SCEV * > & Terms,SmallVectorImpl<const SCEV * > & Sizes,const SCEV * ElementSize)11704 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
11705 SmallVectorImpl<const SCEV *> &Sizes,
11706 const SCEV *ElementSize) {
11707 if (Terms.size() < 1 || !ElementSize)
11708 return;
11709
11710 // Early return when Terms do not contain parameters: we do not delinearize
11711 // non parametric SCEVs.
11712 if (!containsParameters(Terms))
11713 return;
11714
11715 LLVM_DEBUG({
11716 dbgs() << "Terms:\n";
11717 for (const SCEV *T : Terms)
11718 dbgs() << *T << "\n";
11719 });
11720
11721 // Remove duplicates.
11722 array_pod_sort(Terms.begin(), Terms.end());
11723 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
11724
11725 // Put larger terms first.
11726 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
11727 return numberOfTerms(LHS) > numberOfTerms(RHS);
11728 });
11729
11730 // Try to divide all terms by the element size. If term is not divisible by
11731 // element size, proceed with the original term.
11732 for (const SCEV *&Term : Terms) {
11733 const SCEV *Q, *R;
11734 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
11735 if (!Q->isZero())
11736 Term = Q;
11737 }
11738
11739 SmallVector<const SCEV *, 4> NewTerms;
11740
11741 // Remove constant factors.
11742 for (const SCEV *T : Terms)
11743 if (const SCEV *NewT = removeConstantFactors(*this, T))
11744 NewTerms.push_back(NewT);
11745
11746 LLVM_DEBUG({
11747 dbgs() << "Terms after sorting:\n";
11748 for (const SCEV *T : NewTerms)
11749 dbgs() << *T << "\n";
11750 });
11751
11752 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
11753 Sizes.clear();
11754 return;
11755 }
11756
11757 // The last element to be pushed into Sizes is the size of an element.
11758 Sizes.push_back(ElementSize);
11759
11760 LLVM_DEBUG({
11761 dbgs() << "Sizes:\n";
11762 for (const SCEV *S : Sizes)
11763 dbgs() << *S << "\n";
11764 });
11765 }
11766
computeAccessFunctions(const SCEV * Expr,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<const SCEV * > & Sizes)11767 void ScalarEvolution::computeAccessFunctions(
11768 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
11769 SmallVectorImpl<const SCEV *> &Sizes) {
11770 // Early exit in case this SCEV is not an affine multivariate function.
11771 if (Sizes.empty())
11772 return;
11773
11774 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr))
11775 if (!AR->isAffine())
11776 return;
11777
11778 const SCEV *Res = Expr;
11779 int Last = Sizes.size() - 1;
11780 for (int i = Last; i >= 0; i--) {
11781 const SCEV *Q, *R;
11782 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
11783
11784 LLVM_DEBUG({
11785 dbgs() << "Res: " << *Res << "\n";
11786 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
11787 dbgs() << "Res divided by Sizes[i]:\n";
11788 dbgs() << "Quotient: " << *Q << "\n";
11789 dbgs() << "Remainder: " << *R << "\n";
11790 });
11791
11792 Res = Q;
11793
11794 // Do not record the last subscript corresponding to the size of elements in
11795 // the array.
11796 if (i == Last) {
11797
11798 // Bail out if the remainder is too complex.
11799 if (isa<SCEVAddRecExpr>(R)) {
11800 Subscripts.clear();
11801 Sizes.clear();
11802 return;
11803 }
11804
11805 continue;
11806 }
11807
11808 // Record the access function for the current subscript.
11809 Subscripts.push_back(R);
11810 }
11811
11812 // Also push in last position the remainder of the last division: it will be
11813 // the access function of the innermost dimension.
11814 Subscripts.push_back(Res);
11815
11816 std::reverse(Subscripts.begin(), Subscripts.end());
11817
11818 LLVM_DEBUG({
11819 dbgs() << "Subscripts:\n";
11820 for (const SCEV *S : Subscripts)
11821 dbgs() << *S << "\n";
11822 });
11823 }
11824
11825 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
11826 /// sizes of an array access. Returns the remainder of the delinearization that
11827 /// is the offset start of the array. The SCEV->delinearize algorithm computes
11828 /// the multiples of SCEV coefficients: that is a pattern matching of sub
11829 /// expressions in the stride and base of a SCEV corresponding to the
11830 /// computation of a GCD (greatest common divisor) of base and stride. When
11831 /// SCEV->delinearize fails, it returns the SCEV unchanged.
11832 ///
11833 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
11834 ///
11835 /// void foo(long n, long m, long o, double A[n][m][o]) {
11836 ///
11837 /// for (long i = 0; i < n; i++)
11838 /// for (long j = 0; j < m; j++)
11839 /// for (long k = 0; k < o; k++)
11840 /// A[i][j][k] = 1.0;
11841 /// }
11842 ///
11843 /// the delinearization input is the following AddRec SCEV:
11844 ///
11845 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
11846 ///
11847 /// From this SCEV, we are able to say that the base offset of the access is %A
11848 /// because it appears as an offset that does not divide any of the strides in
11849 /// the loops:
11850 ///
11851 /// CHECK: Base offset: %A
11852 ///
11853 /// and then SCEV->delinearize determines the size of some of the dimensions of
11854 /// the array as these are the multiples by which the strides are happening:
11855 ///
11856 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
11857 ///
11858 /// Note that the outermost dimension remains of UnknownSize because there are
11859 /// no strides that would help identifying the size of the last dimension: when
11860 /// the array has been statically allocated, one could compute the size of that
11861 /// dimension by dividing the overall size of the array by the size of the known
11862 /// dimensions: %m * %o * 8.
11863 ///
11864 /// Finally delinearize provides the access functions for the array reference
11865 /// that does correspond to A[i][j][k] of the above C testcase:
11866 ///
11867 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
11868 ///
11869 /// The testcases are checking the output of a function pass:
11870 /// DelinearizationPass that walks through all loads and stores of a function
11871 /// asking for the SCEV of the memory access with respect to all enclosing
11872 /// loops, calling SCEV->delinearize on that and printing the results.
delinearize(const SCEV * Expr,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<const SCEV * > & Sizes,const SCEV * ElementSize)11873 void ScalarEvolution::delinearize(const SCEV *Expr,
11874 SmallVectorImpl<const SCEV *> &Subscripts,
11875 SmallVectorImpl<const SCEV *> &Sizes,
11876 const SCEV *ElementSize) {
11877 // First step: collect parametric terms.
11878 SmallVector<const SCEV *, 4> Terms;
11879 collectParametricTerms(Expr, Terms);
11880
11881 if (Terms.empty())
11882 return;
11883
11884 // Second step: find subscript sizes.
11885 findArrayDimensions(Terms, Sizes, ElementSize);
11886
11887 if (Sizes.empty())
11888 return;
11889
11890 // Third step: compute the access functions for each subscript.
11891 computeAccessFunctions(Expr, Subscripts, Sizes);
11892
11893 if (Subscripts.empty())
11894 return;
11895
11896 LLVM_DEBUG({
11897 dbgs() << "succeeded to delinearize " << *Expr << "\n";
11898 dbgs() << "ArrayDecl[UnknownSize]";
11899 for (const SCEV *S : Sizes)
11900 dbgs() << "[" << *S << "]";
11901
11902 dbgs() << "\nArrayRef";
11903 for (const SCEV *S : Subscripts)
11904 dbgs() << "[" << *S << "]";
11905 dbgs() << "\n";
11906 });
11907 }
11908
getIndexExpressionsFromGEP(const GetElementPtrInst * GEP,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<int> & Sizes)11909 bool ScalarEvolution::getIndexExpressionsFromGEP(
11910 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts,
11911 SmallVectorImpl<int> &Sizes) {
11912 assert(Subscripts.empty() && Sizes.empty() &&
11913 "Expected output lists to be empty on entry to this function.");
11914 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP");
11915 Type *Ty = GEP->getPointerOperandType();
11916 bool DroppedFirstDim = false;
11917 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
11918 const SCEV *Expr = getSCEV(GEP->getOperand(i));
11919 if (i == 1) {
11920 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
11921 Ty = PtrTy->getElementType();
11922 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
11923 Ty = ArrayTy->getElementType();
11924 } else {
11925 Subscripts.clear();
11926 Sizes.clear();
11927 return false;
11928 }
11929 if (auto *Const = dyn_cast<SCEVConstant>(Expr))
11930 if (Const->getValue()->isZero()) {
11931 DroppedFirstDim = true;
11932 continue;
11933 }
11934 Subscripts.push_back(Expr);
11935 continue;
11936 }
11937
11938 auto *ArrayTy = dyn_cast<ArrayType>(Ty);
11939 if (!ArrayTy) {
11940 Subscripts.clear();
11941 Sizes.clear();
11942 return false;
11943 }
11944
11945 Subscripts.push_back(Expr);
11946 if (!(DroppedFirstDim && i == 2))
11947 Sizes.push_back(ArrayTy->getNumElements());
11948
11949 Ty = ArrayTy->getElementType();
11950 }
11951 return !Subscripts.empty();
11952 }
11953
11954 //===----------------------------------------------------------------------===//
11955 // SCEVCallbackVH Class Implementation
11956 //===----------------------------------------------------------------------===//
11957
deleted()11958 void ScalarEvolution::SCEVCallbackVH::deleted() {
11959 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
11960 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
11961 SE->ConstantEvolutionLoopExitValue.erase(PN);
11962 SE->eraseValueFromMap(getValPtr());
11963 // this now dangles!
11964 }
11965
allUsesReplacedWith(Value * V)11966 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
11967 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
11968
11969 // Forget all the expressions associated with users of the old value,
11970 // so that future queries will recompute the expressions using the new
11971 // value.
11972 Value *Old = getValPtr();
11973 SmallVector<User *, 16> Worklist(Old->users());
11974 SmallPtrSet<User *, 8> Visited;
11975 while (!Worklist.empty()) {
11976 User *U = Worklist.pop_back_val();
11977 // Deleting the Old value will cause this to dangle. Postpone
11978 // that until everything else is done.
11979 if (U == Old)
11980 continue;
11981 if (!Visited.insert(U).second)
11982 continue;
11983 if (PHINode *PN = dyn_cast<PHINode>(U))
11984 SE->ConstantEvolutionLoopExitValue.erase(PN);
11985 SE->eraseValueFromMap(U);
11986 llvm::append_range(Worklist, U->users());
11987 }
11988 // Delete the Old value.
11989 if (PHINode *PN = dyn_cast<PHINode>(Old))
11990 SE->ConstantEvolutionLoopExitValue.erase(PN);
11991 SE->eraseValueFromMap(Old);
11992 // this now dangles!
11993 }
11994
SCEVCallbackVH(Value * V,ScalarEvolution * se)11995 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
11996 : CallbackVH(V), SE(se) {}
11997
11998 //===----------------------------------------------------------------------===//
11999 // ScalarEvolution Class Implementation
12000 //===----------------------------------------------------------------------===//
12001
ScalarEvolution(Function & F,TargetLibraryInfo & TLI,AssumptionCache & AC,DominatorTree & DT,LoopInfo & LI)12002 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
12003 AssumptionCache &AC, DominatorTree &DT,
12004 LoopInfo &LI)
12005 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
12006 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
12007 LoopDispositions(64), BlockDispositions(64) {
12008 // To use guards for proving predicates, we need to scan every instruction in
12009 // relevant basic blocks, and not just terminators. Doing this is a waste of
12010 // time if the IR does not actually contain any calls to
12011 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
12012 //
12013 // This pessimizes the case where a pass that preserves ScalarEvolution wants
12014 // to _add_ guards to the module when there weren't any before, and wants
12015 // ScalarEvolution to optimize based on those guards. For now we prefer to be
12016 // efficient in lieu of being smart in that rather obscure case.
12017
12018 auto *GuardDecl = F.getParent()->getFunction(
12019 Intrinsic::getName(Intrinsic::experimental_guard));
12020 HasGuards = GuardDecl && !GuardDecl->use_empty();
12021 }
12022
ScalarEvolution(ScalarEvolution && Arg)12023 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
12024 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
12025 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
12026 ValueExprMap(std::move(Arg.ValueExprMap)),
12027 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
12028 PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
12029 PendingMerges(std::move(Arg.PendingMerges)),
12030 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
12031 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
12032 PredicatedBackedgeTakenCounts(
12033 std::move(Arg.PredicatedBackedgeTakenCounts)),
12034 ConstantEvolutionLoopExitValue(
12035 std::move(Arg.ConstantEvolutionLoopExitValue)),
12036 ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
12037 LoopDispositions(std::move(Arg.LoopDispositions)),
12038 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
12039 BlockDispositions(std::move(Arg.BlockDispositions)),
12040 UnsignedRanges(std::move(Arg.UnsignedRanges)),
12041 SignedRanges(std::move(Arg.SignedRanges)),
12042 UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
12043 UniquePreds(std::move(Arg.UniquePreds)),
12044 SCEVAllocator(std::move(Arg.SCEVAllocator)),
12045 LoopUsers(std::move(Arg.LoopUsers)),
12046 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
12047 FirstUnknown(Arg.FirstUnknown) {
12048 Arg.FirstUnknown = nullptr;
12049 }
12050
~ScalarEvolution()12051 ScalarEvolution::~ScalarEvolution() {
12052 // Iterate through all the SCEVUnknown instances and call their
12053 // destructors, so that they release their references to their values.
12054 for (SCEVUnknown *U = FirstUnknown; U;) {
12055 SCEVUnknown *Tmp = U;
12056 U = U->Next;
12057 Tmp->~SCEVUnknown();
12058 }
12059 FirstUnknown = nullptr;
12060
12061 ExprValueMap.clear();
12062 ValueExprMap.clear();
12063 HasRecMap.clear();
12064
12065 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
12066 // that a loop had multiple computable exits.
12067 for (auto &BTCI : BackedgeTakenCounts)
12068 BTCI.second.clear();
12069 for (auto &BTCI : PredicatedBackedgeTakenCounts)
12070 BTCI.second.clear();
12071
12072 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
12073 assert(PendingPhiRanges.empty() && "getRangeRef garbage");
12074 assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
12075 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
12076 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
12077 }
12078
hasLoopInvariantBackedgeTakenCount(const Loop * L)12079 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
12080 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
12081 }
12082
PrintLoopInfo(raw_ostream & OS,ScalarEvolution * SE,const Loop * L)12083 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
12084 const Loop *L) {
12085 // Print all inner loops first
12086 for (Loop *I : *L)
12087 PrintLoopInfo(OS, SE, I);
12088
12089 OS << "Loop ";
12090 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12091 OS << ": ";
12092
12093 SmallVector<BasicBlock *, 8> ExitingBlocks;
12094 L->getExitingBlocks(ExitingBlocks);
12095 if (ExitingBlocks.size() != 1)
12096 OS << "<multiple exits> ";
12097
12098 if (SE->hasLoopInvariantBackedgeTakenCount(L))
12099 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
12100 else
12101 OS << "Unpredictable backedge-taken count.\n";
12102
12103 if (ExitingBlocks.size() > 1)
12104 for (BasicBlock *ExitingBlock : ExitingBlocks) {
12105 OS << " exit count for " << ExitingBlock->getName() << ": "
12106 << *SE->getExitCount(L, ExitingBlock) << "\n";
12107 }
12108
12109 OS << "Loop ";
12110 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12111 OS << ": ";
12112
12113 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
12114 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
12115 if (SE->isBackedgeTakenCountMaxOrZero(L))
12116 OS << ", actual taken count either this or zero.";
12117 } else {
12118 OS << "Unpredictable max backedge-taken count. ";
12119 }
12120
12121 OS << "\n"
12122 "Loop ";
12123 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12124 OS << ": ";
12125
12126 SCEVUnionPredicate Pred;
12127 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred);
12128 if (!isa<SCEVCouldNotCompute>(PBT)) {
12129 OS << "Predicated backedge-taken count is " << *PBT << "\n";
12130 OS << " Predicates:\n";
12131 Pred.print(OS, 4);
12132 } else {
12133 OS << "Unpredictable predicated backedge-taken count. ";
12134 }
12135 OS << "\n";
12136
12137 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
12138 OS << "Loop ";
12139 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12140 OS << ": ";
12141 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
12142 }
12143 }
12144
loopDispositionToStr(ScalarEvolution::LoopDisposition LD)12145 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
12146 switch (LD) {
12147 case ScalarEvolution::LoopVariant:
12148 return "Variant";
12149 case ScalarEvolution::LoopInvariant:
12150 return "Invariant";
12151 case ScalarEvolution::LoopComputable:
12152 return "Computable";
12153 }
12154 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
12155 }
12156
print(raw_ostream & OS) const12157 void ScalarEvolution::print(raw_ostream &OS) const {
12158 // ScalarEvolution's implementation of the print method is to print
12159 // out SCEV values of all instructions that are interesting. Doing
12160 // this potentially causes it to create new SCEV objects though,
12161 // which technically conflicts with the const qualifier. This isn't
12162 // observable from outside the class though, so casting away the
12163 // const isn't dangerous.
12164 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12165
12166 if (ClassifyExpressions) {
12167 OS << "Classifying expressions for: ";
12168 F.printAsOperand(OS, /*PrintType=*/false);
12169 OS << "\n";
12170 for (Instruction &I : instructions(F))
12171 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
12172 OS << I << '\n';
12173 OS << " --> ";
12174 const SCEV *SV = SE.getSCEV(&I);
12175 SV->print(OS);
12176 if (!isa<SCEVCouldNotCompute>(SV)) {
12177 OS << " U: ";
12178 SE.getUnsignedRange(SV).print(OS);
12179 OS << " S: ";
12180 SE.getSignedRange(SV).print(OS);
12181 }
12182
12183 const Loop *L = LI.getLoopFor(I.getParent());
12184
12185 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
12186 if (AtUse != SV) {
12187 OS << " --> ";
12188 AtUse->print(OS);
12189 if (!isa<SCEVCouldNotCompute>(AtUse)) {
12190 OS << " U: ";
12191 SE.getUnsignedRange(AtUse).print(OS);
12192 OS << " S: ";
12193 SE.getSignedRange(AtUse).print(OS);
12194 }
12195 }
12196
12197 if (L) {
12198 OS << "\t\t" "Exits: ";
12199 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
12200 if (!SE.isLoopInvariant(ExitValue, L)) {
12201 OS << "<<Unknown>>";
12202 } else {
12203 OS << *ExitValue;
12204 }
12205
12206 bool First = true;
12207 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
12208 if (First) {
12209 OS << "\t\t" "LoopDispositions: { ";
12210 First = false;
12211 } else {
12212 OS << ", ";
12213 }
12214
12215 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12216 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
12217 }
12218
12219 for (auto *InnerL : depth_first(L)) {
12220 if (InnerL == L)
12221 continue;
12222 if (First) {
12223 OS << "\t\t" "LoopDispositions: { ";
12224 First = false;
12225 } else {
12226 OS << ", ";
12227 }
12228
12229 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12230 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
12231 }
12232
12233 OS << " }";
12234 }
12235
12236 OS << "\n";
12237 }
12238 }
12239
12240 OS << "Determining loop execution counts for: ";
12241 F.printAsOperand(OS, /*PrintType=*/false);
12242 OS << "\n";
12243 for (Loop *I : LI)
12244 PrintLoopInfo(OS, &SE, I);
12245 }
12246
12247 ScalarEvolution::LoopDisposition
getLoopDisposition(const SCEV * S,const Loop * L)12248 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
12249 auto &Values = LoopDispositions[S];
12250 for (auto &V : Values) {
12251 if (V.getPointer() == L)
12252 return V.getInt();
12253 }
12254 Values.emplace_back(L, LoopVariant);
12255 LoopDisposition D = computeLoopDisposition(S, L);
12256 auto &Values2 = LoopDispositions[S];
12257 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12258 if (V.getPointer() == L) {
12259 V.setInt(D);
12260 break;
12261 }
12262 }
12263 return D;
12264 }
12265
12266 ScalarEvolution::LoopDisposition
computeLoopDisposition(const SCEV * S,const Loop * L)12267 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
12268 switch (S->getSCEVType()) {
12269 case scConstant:
12270 return LoopInvariant;
12271 case scPtrToInt:
12272 case scTruncate:
12273 case scZeroExtend:
12274 case scSignExtend:
12275 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
12276 case scAddRecExpr: {
12277 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12278
12279 // If L is the addrec's loop, it's computable.
12280 if (AR->getLoop() == L)
12281 return LoopComputable;
12282
12283 // Add recurrences are never invariant in the function-body (null loop).
12284 if (!L)
12285 return LoopVariant;
12286
12287 // Everything that is not defined at loop entry is variant.
12288 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
12289 return LoopVariant;
12290 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
12291 " dominate the contained loop's header?");
12292
12293 // This recurrence is invariant w.r.t. L if AR's loop contains L.
12294 if (AR->getLoop()->contains(L))
12295 return LoopInvariant;
12296
12297 // This recurrence is variant w.r.t. L if any of its operands
12298 // are variant.
12299 for (auto *Op : AR->operands())
12300 if (!isLoopInvariant(Op, L))
12301 return LoopVariant;
12302
12303 // Otherwise it's loop-invariant.
12304 return LoopInvariant;
12305 }
12306 case scAddExpr:
12307 case scMulExpr:
12308 case scUMaxExpr:
12309 case scSMaxExpr:
12310 case scUMinExpr:
12311 case scSMinExpr: {
12312 bool HasVarying = false;
12313 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
12314 LoopDisposition D = getLoopDisposition(Op, L);
12315 if (D == LoopVariant)
12316 return LoopVariant;
12317 if (D == LoopComputable)
12318 HasVarying = true;
12319 }
12320 return HasVarying ? LoopComputable : LoopInvariant;
12321 }
12322 case scUDivExpr: {
12323 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
12324 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
12325 if (LD == LoopVariant)
12326 return LoopVariant;
12327 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
12328 if (RD == LoopVariant)
12329 return LoopVariant;
12330 return (LD == LoopInvariant && RD == LoopInvariant) ?
12331 LoopInvariant : LoopComputable;
12332 }
12333 case scUnknown:
12334 // All non-instruction values are loop invariant. All instructions are loop
12335 // invariant if they are not contained in the specified loop.
12336 // Instructions are never considered invariant in the function body
12337 // (null loop) because they are defined within the "loop".
12338 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
12339 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
12340 return LoopInvariant;
12341 case scCouldNotCompute:
12342 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
12343 }
12344 llvm_unreachable("Unknown SCEV kind!");
12345 }
12346
isLoopInvariant(const SCEV * S,const Loop * L)12347 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
12348 return getLoopDisposition(S, L) == LoopInvariant;
12349 }
12350
hasComputableLoopEvolution(const SCEV * S,const Loop * L)12351 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
12352 return getLoopDisposition(S, L) == LoopComputable;
12353 }
12354
12355 ScalarEvolution::BlockDisposition
getBlockDisposition(const SCEV * S,const BasicBlock * BB)12356 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
12357 auto &Values = BlockDispositions[S];
12358 for (auto &V : Values) {
12359 if (V.getPointer() == BB)
12360 return V.getInt();
12361 }
12362 Values.emplace_back(BB, DoesNotDominateBlock);
12363 BlockDisposition D = computeBlockDisposition(S, BB);
12364 auto &Values2 = BlockDispositions[S];
12365 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12366 if (V.getPointer() == BB) {
12367 V.setInt(D);
12368 break;
12369 }
12370 }
12371 return D;
12372 }
12373
12374 ScalarEvolution::BlockDisposition
computeBlockDisposition(const SCEV * S,const BasicBlock * BB)12375 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
12376 switch (S->getSCEVType()) {
12377 case scConstant:
12378 return ProperlyDominatesBlock;
12379 case scPtrToInt:
12380 case scTruncate:
12381 case scZeroExtend:
12382 case scSignExtend:
12383 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
12384 case scAddRecExpr: {
12385 // This uses a "dominates" query instead of "properly dominates" query
12386 // to test for proper dominance too, because the instruction which
12387 // produces the addrec's value is a PHI, and a PHI effectively properly
12388 // dominates its entire containing block.
12389 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12390 if (!DT.dominates(AR->getLoop()->getHeader(), BB))
12391 return DoesNotDominateBlock;
12392
12393 // Fall through into SCEVNAryExpr handling.
12394 LLVM_FALLTHROUGH;
12395 }
12396 case scAddExpr:
12397 case scMulExpr:
12398 case scUMaxExpr:
12399 case scSMaxExpr:
12400 case scUMinExpr:
12401 case scSMinExpr: {
12402 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
12403 bool Proper = true;
12404 for (const SCEV *NAryOp : NAry->operands()) {
12405 BlockDisposition D = getBlockDisposition(NAryOp, BB);
12406 if (D == DoesNotDominateBlock)
12407 return DoesNotDominateBlock;
12408 if (D == DominatesBlock)
12409 Proper = false;
12410 }
12411 return Proper ? ProperlyDominatesBlock : DominatesBlock;
12412 }
12413 case scUDivExpr: {
12414 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
12415 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
12416 BlockDisposition LD = getBlockDisposition(LHS, BB);
12417 if (LD == DoesNotDominateBlock)
12418 return DoesNotDominateBlock;
12419 BlockDisposition RD = getBlockDisposition(RHS, BB);
12420 if (RD == DoesNotDominateBlock)
12421 return DoesNotDominateBlock;
12422 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
12423 ProperlyDominatesBlock : DominatesBlock;
12424 }
12425 case scUnknown:
12426 if (Instruction *I =
12427 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
12428 if (I->getParent() == BB)
12429 return DominatesBlock;
12430 if (DT.properlyDominates(I->getParent(), BB))
12431 return ProperlyDominatesBlock;
12432 return DoesNotDominateBlock;
12433 }
12434 return ProperlyDominatesBlock;
12435 case scCouldNotCompute:
12436 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
12437 }
12438 llvm_unreachable("Unknown SCEV kind!");
12439 }
12440
dominates(const SCEV * S,const BasicBlock * BB)12441 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
12442 return getBlockDisposition(S, BB) >= DominatesBlock;
12443 }
12444
properlyDominates(const SCEV * S,const BasicBlock * BB)12445 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
12446 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
12447 }
12448
hasOperand(const SCEV * S,const SCEV * Op) const12449 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
12450 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
12451 }
12452
hasOperand(const SCEV * S) const12453 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const {
12454 auto IsS = [&](const SCEV *X) { return S == X; };
12455 auto ContainsS = [&](const SCEV *X) {
12456 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS);
12457 };
12458 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken);
12459 }
12460
12461 void
forgetMemoizedResults(const SCEV * S)12462 ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
12463 ValuesAtScopes.erase(S);
12464 LoopDispositions.erase(S);
12465 BlockDispositions.erase(S);
12466 UnsignedRanges.erase(S);
12467 SignedRanges.erase(S);
12468 ExprValueMap.erase(S);
12469 HasRecMap.erase(S);
12470 MinTrailingZerosCache.erase(S);
12471
12472 for (auto I = PredicatedSCEVRewrites.begin();
12473 I != PredicatedSCEVRewrites.end();) {
12474 std::pair<const SCEV *, const Loop *> Entry = I->first;
12475 if (Entry.first == S)
12476 PredicatedSCEVRewrites.erase(I++);
12477 else
12478 ++I;
12479 }
12480
12481 auto RemoveSCEVFromBackedgeMap =
12482 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
12483 for (auto I = Map.begin(), E = Map.end(); I != E;) {
12484 BackedgeTakenInfo &BEInfo = I->second;
12485 if (BEInfo.hasOperand(S, this)) {
12486 BEInfo.clear();
12487 Map.erase(I++);
12488 } else
12489 ++I;
12490 }
12491 };
12492
12493 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
12494 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
12495 }
12496
12497 void
getUsedLoops(const SCEV * S,SmallPtrSetImpl<const Loop * > & LoopsUsed)12498 ScalarEvolution::getUsedLoops(const SCEV *S,
12499 SmallPtrSetImpl<const Loop *> &LoopsUsed) {
12500 struct FindUsedLoops {
12501 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
12502 : LoopsUsed(LoopsUsed) {}
12503 SmallPtrSetImpl<const Loop *> &LoopsUsed;
12504 bool follow(const SCEV *S) {
12505 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
12506 LoopsUsed.insert(AR->getLoop());
12507 return true;
12508 }
12509
12510 bool isDone() const { return false; }
12511 };
12512
12513 FindUsedLoops F(LoopsUsed);
12514 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
12515 }
12516
addToLoopUseLists(const SCEV * S)12517 void ScalarEvolution::addToLoopUseLists(const SCEV *S) {
12518 SmallPtrSet<const Loop *, 8> LoopsUsed;
12519 getUsedLoops(S, LoopsUsed);
12520 for (auto *L : LoopsUsed)
12521 LoopUsers[L].push_back(S);
12522 }
12523
verify() const12524 void ScalarEvolution::verify() const {
12525 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12526 ScalarEvolution SE2(F, TLI, AC, DT, LI);
12527
12528 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
12529
12530 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
12531 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
12532 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
12533
12534 const SCEV *visitConstant(const SCEVConstant *Constant) {
12535 return SE.getConstant(Constant->getAPInt());
12536 }
12537
12538 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
12539 return SE.getUnknown(Expr->getValue());
12540 }
12541
12542 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
12543 return SE.getCouldNotCompute();
12544 }
12545 };
12546
12547 SCEVMapper SCM(SE2);
12548
12549 while (!LoopStack.empty()) {
12550 auto *L = LoopStack.pop_back_val();
12551 llvm::append_range(LoopStack, *L);
12552
12553 auto *CurBECount = SCM.visit(
12554 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
12555 auto *NewBECount = SE2.getBackedgeTakenCount(L);
12556
12557 if (CurBECount == SE2.getCouldNotCompute() ||
12558 NewBECount == SE2.getCouldNotCompute()) {
12559 // NB! This situation is legal, but is very suspicious -- whatever pass
12560 // change the loop to make a trip count go from could not compute to
12561 // computable or vice-versa *should have* invalidated SCEV. However, we
12562 // choose not to assert here (for now) since we don't want false
12563 // positives.
12564 continue;
12565 }
12566
12567 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
12568 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
12569 // not propagate undef aggressively). This means we can (and do) fail
12570 // verification in cases where a transform makes the trip count of a loop
12571 // go from "undef" to "undef+1" (say). The transform is fine, since in
12572 // both cases the loop iterates "undef" times, but SCEV thinks we
12573 // increased the trip count of the loop by 1 incorrectly.
12574 continue;
12575 }
12576
12577 if (SE.getTypeSizeInBits(CurBECount->getType()) >
12578 SE.getTypeSizeInBits(NewBECount->getType()))
12579 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
12580 else if (SE.getTypeSizeInBits(CurBECount->getType()) <
12581 SE.getTypeSizeInBits(NewBECount->getType()))
12582 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
12583
12584 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);
12585
12586 // Unless VerifySCEVStrict is set, we only compare constant deltas.
12587 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
12588 dbgs() << "Trip Count for " << *L << " Changed!\n";
12589 dbgs() << "Old: " << *CurBECount << "\n";
12590 dbgs() << "New: " << *NewBECount << "\n";
12591 dbgs() << "Delta: " << *Delta << "\n";
12592 std::abort();
12593 }
12594 }
12595
12596 // Collect all valid loops currently in LoopInfo.
12597 SmallPtrSet<Loop *, 32> ValidLoops;
12598 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
12599 while (!Worklist.empty()) {
12600 Loop *L = Worklist.pop_back_val();
12601 if (ValidLoops.contains(L))
12602 continue;
12603 ValidLoops.insert(L);
12604 Worklist.append(L->begin(), L->end());
12605 }
12606 // Check for SCEV expressions referencing invalid/deleted loops.
12607 for (auto &KV : ValueExprMap) {
12608 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second);
12609 if (!AR)
12610 continue;
12611 assert(ValidLoops.contains(AR->getLoop()) &&
12612 "AddRec references invalid loop");
12613 }
12614 }
12615
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)12616 bool ScalarEvolution::invalidate(
12617 Function &F, const PreservedAnalyses &PA,
12618 FunctionAnalysisManager::Invalidator &Inv) {
12619 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
12620 // of its dependencies is invalidated.
12621 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
12622 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
12623 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
12624 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
12625 Inv.invalidate<LoopAnalysis>(F, PA);
12626 }
12627
12628 AnalysisKey ScalarEvolutionAnalysis::Key;
12629
run(Function & F,FunctionAnalysisManager & AM)12630 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
12631 FunctionAnalysisManager &AM) {
12632 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
12633 AM.getResult<AssumptionAnalysis>(F),
12634 AM.getResult<DominatorTreeAnalysis>(F),
12635 AM.getResult<LoopAnalysis>(F));
12636 }
12637
12638 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)12639 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
12640 AM.getResult<ScalarEvolutionAnalysis>(F).verify();
12641 return PreservedAnalyses::all();
12642 }
12643
12644 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)12645 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
12646 // For compatibility with opt's -analyze feature under legacy pass manager
12647 // which was not ported to NPM. This keeps tests using
12648 // update_analyze_test_checks.py working.
12649 OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
12650 << F.getName() << "':\n";
12651 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
12652 return PreservedAnalyses::all();
12653 }
12654
12655 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
12656 "Scalar Evolution Analysis", false, true)
12657 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
12658 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
12659 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
12660 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
12661 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
12662 "Scalar Evolution Analysis", false, true)
12663
12664 char ScalarEvolutionWrapperPass::ID = 0;
12665
ScalarEvolutionWrapperPass()12666 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
12667 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
12668 }
12669
runOnFunction(Function & F)12670 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
12671 SE.reset(new ScalarEvolution(
12672 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
12673 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
12674 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
12675 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
12676 return false;
12677 }
12678
releaseMemory()12679 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
12680
print(raw_ostream & OS,const Module *) const12681 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
12682 SE->print(OS);
12683 }
12684
verifyAnalysis() const12685 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
12686 if (!VerifySCEV)
12687 return;
12688
12689 SE->verify();
12690 }
12691
getAnalysisUsage(AnalysisUsage & AU) const12692 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
12693 AU.setPreservesAll();
12694 AU.addRequiredTransitive<AssumptionCacheTracker>();
12695 AU.addRequiredTransitive<LoopInfoWrapperPass>();
12696 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
12697 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
12698 }
12699
getEqualPredicate(const SCEV * LHS,const SCEV * RHS)12700 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
12701 const SCEV *RHS) {
12702 FoldingSetNodeID ID;
12703 assert(LHS->getType() == RHS->getType() &&
12704 "Type mismatch between LHS and RHS");
12705 // Unique this node based on the arguments
12706 ID.AddInteger(SCEVPredicate::P_Equal);
12707 ID.AddPointer(LHS);
12708 ID.AddPointer(RHS);
12709 void *IP = nullptr;
12710 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
12711 return S;
12712 SCEVEqualPredicate *Eq = new (SCEVAllocator)
12713 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS);
12714 UniquePreds.InsertNode(Eq, IP);
12715 return Eq;
12716 }
12717
getWrapPredicate(const SCEVAddRecExpr * AR,SCEVWrapPredicate::IncrementWrapFlags AddedFlags)12718 const SCEVPredicate *ScalarEvolution::getWrapPredicate(
12719 const SCEVAddRecExpr *AR,
12720 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
12721 FoldingSetNodeID ID;
12722 // Unique this node based on the arguments
12723 ID.AddInteger(SCEVPredicate::P_Wrap);
12724 ID.AddPointer(AR);
12725 ID.AddInteger(AddedFlags);
12726 void *IP = nullptr;
12727 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
12728 return S;
12729 auto *OF = new (SCEVAllocator)
12730 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
12731 UniquePreds.InsertNode(OF, IP);
12732 return OF;
12733 }
12734
12735 namespace {
12736
12737 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
12738 public:
12739
12740 /// Rewrites \p S in the context of a loop L and the SCEV predication
12741 /// infrastructure.
12742 ///
12743 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
12744 /// equivalences present in \p Pred.
12745 ///
12746 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
12747 /// \p NewPreds such that the result will be an AddRecExpr.
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE,SmallPtrSetImpl<const SCEVPredicate * > * NewPreds,SCEVUnionPredicate * Pred)12748 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
12749 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
12750 SCEVUnionPredicate *Pred) {
12751 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
12752 return Rewriter.visit(S);
12753 }
12754
visitUnknown(const SCEVUnknown * Expr)12755 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
12756 if (Pred) {
12757 auto ExprPreds = Pred->getPredicatesForExpr(Expr);
12758 for (auto *Pred : ExprPreds)
12759 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
12760 if (IPred->getLHS() == Expr)
12761 return IPred->getRHS();
12762 }
12763 return convertToAddRecWithPreds(Expr);
12764 }
12765
visitZeroExtendExpr(const SCEVZeroExtendExpr * Expr)12766 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
12767 const SCEV *Operand = visit(Expr->getOperand());
12768 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
12769 if (AR && AR->getLoop() == L && AR->isAffine()) {
12770 // This couldn't be folded because the operand didn't have the nuw
12771 // flag. Add the nusw flag as an assumption that we could make.
12772 const SCEV *Step = AR->getStepRecurrence(SE);
12773 Type *Ty = Expr->getType();
12774 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
12775 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
12776 SE.getSignExtendExpr(Step, Ty), L,
12777 AR->getNoWrapFlags());
12778 }
12779 return SE.getZeroExtendExpr(Operand, Expr->getType());
12780 }
12781
visitSignExtendExpr(const SCEVSignExtendExpr * Expr)12782 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
12783 const SCEV *Operand = visit(Expr->getOperand());
12784 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
12785 if (AR && AR->getLoop() == L && AR->isAffine()) {
12786 // This couldn't be folded because the operand didn't have the nsw
12787 // flag. Add the nssw flag as an assumption that we could make.
12788 const SCEV *Step = AR->getStepRecurrence(SE);
12789 Type *Ty = Expr->getType();
12790 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
12791 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
12792 SE.getSignExtendExpr(Step, Ty), L,
12793 AR->getNoWrapFlags());
12794 }
12795 return SE.getSignExtendExpr(Operand, Expr->getType());
12796 }
12797
12798 private:
SCEVPredicateRewriter(const Loop * L,ScalarEvolution & SE,SmallPtrSetImpl<const SCEVPredicate * > * NewPreds,SCEVUnionPredicate * Pred)12799 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
12800 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
12801 SCEVUnionPredicate *Pred)
12802 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
12803
addOverflowAssumption(const SCEVPredicate * P)12804 bool addOverflowAssumption(const SCEVPredicate *P) {
12805 if (!NewPreds) {
12806 // Check if we've already made this assumption.
12807 return Pred && Pred->implies(P);
12808 }
12809 NewPreds->insert(P);
12810 return true;
12811 }
12812
addOverflowAssumption(const SCEVAddRecExpr * AR,SCEVWrapPredicate::IncrementWrapFlags AddedFlags)12813 bool addOverflowAssumption(const SCEVAddRecExpr *AR,
12814 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
12815 auto *A = SE.getWrapPredicate(AR, AddedFlags);
12816 return addOverflowAssumption(A);
12817 }
12818
12819 // If \p Expr represents a PHINode, we try to see if it can be represented
12820 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
12821 // to add this predicate as a runtime overflow check, we return the AddRec.
12822 // If \p Expr does not meet these conditions (is not a PHI node, or we
12823 // couldn't create an AddRec for it, or couldn't add the predicate), we just
12824 // return \p Expr.
convertToAddRecWithPreds(const SCEVUnknown * Expr)12825 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
12826 if (!isa<PHINode>(Expr->getValue()))
12827 return Expr;
12828 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
12829 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
12830 if (!PredicatedRewrite)
12831 return Expr;
12832 for (auto *P : PredicatedRewrite->second){
12833 // Wrap predicates from outer loops are not supported.
12834 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
12835 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
12836 if (L != AR->getLoop())
12837 return Expr;
12838 }
12839 if (!addOverflowAssumption(P))
12840 return Expr;
12841 }
12842 return PredicatedRewrite->first;
12843 }
12844
12845 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
12846 SCEVUnionPredicate *Pred;
12847 const Loop *L;
12848 };
12849
12850 } // end anonymous namespace
12851
rewriteUsingPredicate(const SCEV * S,const Loop * L,SCEVUnionPredicate & Preds)12852 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
12853 SCEVUnionPredicate &Preds) {
12854 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
12855 }
12856
convertSCEVToAddRecWithPredicates(const SCEV * S,const Loop * L,SmallPtrSetImpl<const SCEVPredicate * > & Preds)12857 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
12858 const SCEV *S, const Loop *L,
12859 SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
12860 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
12861 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
12862 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
12863
12864 if (!AddRec)
12865 return nullptr;
12866
12867 // Since the transformation was successful, we can now transfer the SCEV
12868 // predicates.
12869 for (auto *P : TransformPreds)
12870 Preds.insert(P);
12871
12872 return AddRec;
12873 }
12874
12875 /// SCEV predicates
SCEVPredicate(const FoldingSetNodeIDRef ID,SCEVPredicateKind Kind)12876 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
12877 SCEVPredicateKind Kind)
12878 : FastID(ID), Kind(Kind) {}
12879
SCEVEqualPredicate(const FoldingSetNodeIDRef ID,const SCEV * LHS,const SCEV * RHS)12880 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
12881 const SCEV *LHS, const SCEV *RHS)
12882 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
12883 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
12884 assert(LHS != RHS && "LHS and RHS are the same SCEV");
12885 }
12886
implies(const SCEVPredicate * N) const12887 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
12888 const auto *Op = dyn_cast<SCEVEqualPredicate>(N);
12889
12890 if (!Op)
12891 return false;
12892
12893 return Op->LHS == LHS && Op->RHS == RHS;
12894 }
12895
isAlwaysTrue() const12896 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
12897
getExpr() const12898 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; }
12899
print(raw_ostream & OS,unsigned Depth) const12900 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const {
12901 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
12902 }
12903
SCEVWrapPredicate(const FoldingSetNodeIDRef ID,const SCEVAddRecExpr * AR,IncrementWrapFlags Flags)12904 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
12905 const SCEVAddRecExpr *AR,
12906 IncrementWrapFlags Flags)
12907 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
12908
getExpr() const12909 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
12910
implies(const SCEVPredicate * N) const12911 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
12912 const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
12913
12914 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
12915 }
12916
isAlwaysTrue() const12917 bool SCEVWrapPredicate::isAlwaysTrue() const {
12918 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
12919 IncrementWrapFlags IFlags = Flags;
12920
12921 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
12922 IFlags = clearFlags(IFlags, IncrementNSSW);
12923
12924 return IFlags == IncrementAnyWrap;
12925 }
12926
print(raw_ostream & OS,unsigned Depth) const12927 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
12928 OS.indent(Depth) << *getExpr() << " Added Flags: ";
12929 if (SCEVWrapPredicate::IncrementNUSW & getFlags())
12930 OS << "<nusw>";
12931 if (SCEVWrapPredicate::IncrementNSSW & getFlags())
12932 OS << "<nssw>";
12933 OS << "\n";
12934 }
12935
12936 SCEVWrapPredicate::IncrementWrapFlags
getImpliedFlags(const SCEVAddRecExpr * AR,ScalarEvolution & SE)12937 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
12938 ScalarEvolution &SE) {
12939 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
12940 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
12941
12942 // We can safely transfer the NSW flag as NSSW.
12943 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
12944 ImpliedFlags = IncrementNSSW;
12945
12946 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
12947 // If the increment is positive, the SCEV NUW flag will also imply the
12948 // WrapPredicate NUSW flag.
12949 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
12950 if (Step->getValue()->getValue().isNonNegative())
12951 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
12952 }
12953
12954 return ImpliedFlags;
12955 }
12956
12957 /// Union predicates don't get cached so create a dummy set ID for it.
SCEVUnionPredicate()12958 SCEVUnionPredicate::SCEVUnionPredicate()
12959 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {}
12960
isAlwaysTrue() const12961 bool SCEVUnionPredicate::isAlwaysTrue() const {
12962 return all_of(Preds,
12963 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
12964 }
12965
12966 ArrayRef<const SCEVPredicate *>
getPredicatesForExpr(const SCEV * Expr)12967 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
12968 auto I = SCEVToPreds.find(Expr);
12969 if (I == SCEVToPreds.end())
12970 return ArrayRef<const SCEVPredicate *>();
12971 return I->second;
12972 }
12973
implies(const SCEVPredicate * N) const12974 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
12975 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
12976 return all_of(Set->Preds,
12977 [this](const SCEVPredicate *I) { return this->implies(I); });
12978
12979 auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
12980 if (ScevPredsIt == SCEVToPreds.end())
12981 return false;
12982 auto &SCEVPreds = ScevPredsIt->second;
12983
12984 return any_of(SCEVPreds,
12985 [N](const SCEVPredicate *I) { return I->implies(N); });
12986 }
12987
getExpr() const12988 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
12989
print(raw_ostream & OS,unsigned Depth) const12990 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
12991 for (auto Pred : Preds)
12992 Pred->print(OS, Depth);
12993 }
12994
add(const SCEVPredicate * N)12995 void SCEVUnionPredicate::add(const SCEVPredicate *N) {
12996 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
12997 for (auto Pred : Set->Preds)
12998 add(Pred);
12999 return;
13000 }
13001
13002 if (implies(N))
13003 return;
13004
13005 const SCEV *Key = N->getExpr();
13006 assert(Key && "Only SCEVUnionPredicate doesn't have an "
13007 " associated expression!");
13008
13009 SCEVToPreds[Key].push_back(N);
13010 Preds.push_back(N);
13011 }
13012
PredicatedScalarEvolution(ScalarEvolution & SE,Loop & L)13013 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
13014 Loop &L)
13015 : SE(SE), L(L) {}
13016
getSCEV(Value * V)13017 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
13018 const SCEV *Expr = SE.getSCEV(V);
13019 RewriteEntry &Entry = RewriteMap[Expr];
13020
13021 // If we already have an entry and the version matches, return it.
13022 if (Entry.second && Generation == Entry.first)
13023 return Entry.second;
13024
13025 // We found an entry but it's stale. Rewrite the stale entry
13026 // according to the current predicate.
13027 if (Entry.second)
13028 Expr = Entry.second;
13029
13030 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds);
13031 Entry = {Generation, NewSCEV};
13032
13033 return NewSCEV;
13034 }
13035
getBackedgeTakenCount()13036 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
13037 if (!BackedgeCount) {
13038 SCEVUnionPredicate BackedgePred;
13039 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred);
13040 addPredicate(BackedgePred);
13041 }
13042 return BackedgeCount;
13043 }
13044
addPredicate(const SCEVPredicate & Pred)13045 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
13046 if (Preds.implies(&Pred))
13047 return;
13048 Preds.add(&Pred);
13049 updateGeneration();
13050 }
13051
getUnionPredicate() const13052 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
13053 return Preds;
13054 }
13055
updateGeneration()13056 void PredicatedScalarEvolution::updateGeneration() {
13057 // If the generation number wrapped recompute everything.
13058 if (++Generation == 0) {
13059 for (auto &II : RewriteMap) {
13060 const SCEV *Rewritten = II.second.second;
13061 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)};
13062 }
13063 }
13064 }
13065
setNoOverflow(Value * V,SCEVWrapPredicate::IncrementWrapFlags Flags)13066 void PredicatedScalarEvolution::setNoOverflow(
13067 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13068 const SCEV *Expr = getSCEV(V);
13069 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13070
13071 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
13072
13073 // Clear the statically implied flags.
13074 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
13075 addPredicate(*SE.getWrapPredicate(AR, Flags));
13076
13077 auto II = FlagsMap.insert({V, Flags});
13078 if (!II.second)
13079 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
13080 }
13081
hasNoOverflow(Value * V,SCEVWrapPredicate::IncrementWrapFlags Flags)13082 bool PredicatedScalarEvolution::hasNoOverflow(
13083 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13084 const SCEV *Expr = getSCEV(V);
13085 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13086
13087 Flags = SCEVWrapPredicate::clearFlags(
13088 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
13089
13090 auto II = FlagsMap.find(V);
13091
13092 if (II != FlagsMap.end())
13093 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
13094
13095 return Flags == SCEVWrapPredicate::IncrementAnyWrap;
13096 }
13097
getAsAddRec(Value * V)13098 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
13099 const SCEV *Expr = this->getSCEV(V);
13100 SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
13101 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
13102
13103 if (!New)
13104 return nullptr;
13105
13106 for (auto *P : NewPreds)
13107 Preds.add(P);
13108
13109 updateGeneration();
13110 RewriteMap[SE.getSCEV(V)] = {Generation, New};
13111 return New;
13112 }
13113
PredicatedScalarEvolution(const PredicatedScalarEvolution & Init)13114 PredicatedScalarEvolution::PredicatedScalarEvolution(
13115 const PredicatedScalarEvolution &Init)
13116 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
13117 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
13118 for (auto I : Init.FlagsMap)
13119 FlagsMap.insert(I);
13120 }
13121
print(raw_ostream & OS,unsigned Depth) const13122 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
13123 // For each block.
13124 for (auto *BB : L.getBlocks())
13125 for (auto &I : *BB) {
13126 if (!SE.isSCEVable(I.getType()))
13127 continue;
13128
13129 auto *Expr = SE.getSCEV(&I);
13130 auto II = RewriteMap.find(Expr);
13131
13132 if (II == RewriteMap.end())
13133 continue;
13134
13135 // Don't print things that are not interesting.
13136 if (II->second.second == Expr)
13137 continue;
13138
13139 OS.indent(Depth) << "[PSE]" << I << ":\n";
13140 OS.indent(Depth + 2) << *Expr << "\n";
13141 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
13142 }
13143 }
13144
13145 // Match the mathematical pattern A - (A / B) * B, where A and B can be
13146 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
13147 // for URem with constant power-of-2 second operands.
13148 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
13149 // 4, A / B becomes X / 8).
matchURem(const SCEV * Expr,const SCEV * & LHS,const SCEV * & RHS)13150 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
13151 const SCEV *&RHS) {
13152 // Try to match 'zext (trunc A to iB) to iY', which is used
13153 // for URem with constant power-of-2 second operands. Make sure the size of
13154 // the operand A matches the size of the whole expressions.
13155 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
13156 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
13157 LHS = Trunc->getOperand();
13158 if (LHS->getType() != Expr->getType())
13159 LHS = getZeroExtendExpr(LHS, Expr->getType());
13160 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
13161 << getTypeSizeInBits(Trunc->getType()));
13162 return true;
13163 }
13164 const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
13165 if (Add == nullptr || Add->getNumOperands() != 2)
13166 return false;
13167
13168 const SCEV *A = Add->getOperand(1);
13169 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
13170
13171 if (Mul == nullptr)
13172 return false;
13173
13174 const auto MatchURemWithDivisor = [&](const SCEV *B) {
13175 // (SomeExpr + (-(SomeExpr / B) * B)).
13176 if (Expr == getURemExpr(A, B)) {
13177 LHS = A;
13178 RHS = B;
13179 return true;
13180 }
13181 return false;
13182 };
13183
13184 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
13185 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
13186 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13187 MatchURemWithDivisor(Mul->getOperand(2));
13188
13189 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
13190 if (Mul->getNumOperands() == 2)
13191 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13192 MatchURemWithDivisor(Mul->getOperand(0)) ||
13193 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
13194 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
13195 return false;
13196 }
13197
13198 const SCEV *
computeSymbolicMaxBackedgeTakenCount(const Loop * L)13199 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
13200 SmallVector<BasicBlock*, 16> ExitingBlocks;
13201 L->getExitingBlocks(ExitingBlocks);
13202
13203 // Form an expression for the maximum exit count possible for this loop. We
13204 // merge the max and exact information to approximate a version of
13205 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
13206 SmallVector<const SCEV*, 4> ExitCounts;
13207 for (BasicBlock *ExitingBB : ExitingBlocks) {
13208 const SCEV *ExitCount = getExitCount(L, ExitingBB);
13209 if (isa<SCEVCouldNotCompute>(ExitCount))
13210 ExitCount = getExitCount(L, ExitingBB,
13211 ScalarEvolution::ConstantMaximum);
13212 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
13213 assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
13214 "We should only have known counts for exiting blocks that "
13215 "dominate latch!");
13216 ExitCounts.push_back(ExitCount);
13217 }
13218 }
13219 if (ExitCounts.empty())
13220 return getCouldNotCompute();
13221 return getUMinFromMismatchedTypes(ExitCounts);
13222 }
13223
13224 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown
13225 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because
13226 /// we cannot guarantee that the replacement is loop invariant in the loop of
13227 /// the AddRec.
13228 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
13229 ValueToSCEVMapTy ⤅
13230
13231 public:
SCEVLoopGuardRewriter(ScalarEvolution & SE,ValueToSCEVMapTy & M)13232 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
13233 : SCEVRewriteVisitor(SE), Map(M) {}
13234
visitAddRecExpr(const SCEVAddRecExpr * Expr)13235 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
13236
visitUnknown(const SCEVUnknown * Expr)13237 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13238 auto I = Map.find(Expr->getValue());
13239 if (I == Map.end())
13240 return Expr;
13241 return I->second;
13242 }
13243 };
13244
applyLoopGuards(const SCEV * Expr,const Loop * L)13245 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
13246 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
13247 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) {
13248 if (!isa<SCEVUnknown>(LHS)) {
13249 std::swap(LHS, RHS);
13250 Predicate = CmpInst::getSwappedPredicate(Predicate);
13251 }
13252
13253 // For now, limit to conditions that provide information about unknown
13254 // expressions.
13255 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
13256 if (!LHSUnknown)
13257 return;
13258
13259 // TODO: use information from more predicates.
13260 switch (Predicate) {
13261 case CmpInst::ICMP_ULT: {
13262 if (!containsAddRecurrence(RHS)) {
13263 const SCEV *Base = LHS;
13264 auto I = RewriteMap.find(LHSUnknown->getValue());
13265 if (I != RewriteMap.end())
13266 Base = I->second;
13267
13268 RewriteMap[LHSUnknown->getValue()] =
13269 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType())));
13270 }
13271 break;
13272 }
13273 case CmpInst::ICMP_ULE: {
13274 if (!containsAddRecurrence(RHS)) {
13275 const SCEV *Base = LHS;
13276 auto I = RewriteMap.find(LHSUnknown->getValue());
13277 if (I != RewriteMap.end())
13278 Base = I->second;
13279 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS);
13280 }
13281 break;
13282 }
13283 case CmpInst::ICMP_EQ:
13284 if (isa<SCEVConstant>(RHS))
13285 RewriteMap[LHSUnknown->getValue()] = RHS;
13286 break;
13287 case CmpInst::ICMP_NE:
13288 if (isa<SCEVConstant>(RHS) &&
13289 cast<SCEVConstant>(RHS)->getValue()->isNullValue())
13290 RewriteMap[LHSUnknown->getValue()] =
13291 getUMaxExpr(LHS, getOne(RHS->getType()));
13292 break;
13293 default:
13294 break;
13295 }
13296 };
13297 // Starting at the loop predecessor, climb up the predecessor chain, as long
13298 // as there are predecessors that can be found that have unique successors
13299 // leading to the original header.
13300 // TODO: share this logic with isLoopEntryGuardedByCond.
13301 ValueToSCEVMapTy RewriteMap;
13302 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
13303 L->getLoopPredecessor(), L->getHeader());
13304 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
13305
13306 const BranchInst *LoopEntryPredicate =
13307 dyn_cast<BranchInst>(Pair.first->getTerminator());
13308 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
13309 continue;
13310
13311 // TODO: use information from more complex conditions, e.g. AND expressions.
13312 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition());
13313 if (!Cmp)
13314 continue;
13315
13316 auto Predicate = Cmp->getPredicate();
13317 if (LoopEntryPredicate->getSuccessor(1) == Pair.second)
13318 Predicate = CmpInst::getInversePredicate(Predicate);
13319 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
13320 getSCEV(Cmp->getOperand(1)), RewriteMap);
13321 }
13322
13323 // Also collect information from assumptions dominating the loop.
13324 for (auto &AssumeVH : AC.assumptions()) {
13325 if (!AssumeVH)
13326 continue;
13327 auto *AssumeI = cast<CallInst>(AssumeVH);
13328 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0));
13329 if (!Cmp || !DT.dominates(AssumeI, L->getHeader()))
13330 continue;
13331 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)),
13332 getSCEV(Cmp->getOperand(1)), RewriteMap);
13333 }
13334
13335 if (RewriteMap.empty())
13336 return Expr;
13337 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
13338 return Rewriter.visit(Expr);
13339 }
13340