1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
12 //
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
18 //
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
24 //
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
29 //
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
33 //
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // There are several good references for the techniques used in this analysis.
40 //
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44 //
45 // On computational properties of chains of recurrences
46 // Eugene V. Zima
47 //
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
50 //
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
53 //
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
57 //
58 //===----------------------------------------------------------------------===//
59
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionDivision.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/Config/llvm-config.h"
87 #include "llvm/IR/Argument.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/InitializePasses.h"
116 #include "llvm/Pass.h"
117 #include "llvm/Support/Casting.h"
118 #include "llvm/Support/CommandLine.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/KnownBits.h"
123 #include "llvm/Support/SaveAndRestore.h"
124 #include "llvm/Support/raw_ostream.h"
125 #include <algorithm>
126 #include <cassert>
127 #include <climits>
128 #include <cstddef>
129 #include <cstdint>
130 #include <cstdlib>
131 #include <map>
132 #include <memory>
133 #include <tuple>
134 #include <utility>
135 #include <vector>
136
137 using namespace llvm;
138
139 #define DEBUG_TYPE "scalar-evolution"
140
141 STATISTIC(NumArrayLenItCounts,
142 "Number of trip counts computed with array length");
143 STATISTIC(NumTripCountsComputed,
144 "Number of loops with predictable loop counts");
145 STATISTIC(NumTripCountsNotComputed,
146 "Number of loops without predictable loop counts");
147 STATISTIC(NumBruteForceTripCountsComputed,
148 "Number of loops with trip counts computed by force");
149
150 static cl::opt<unsigned>
151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
152 cl::ZeroOrMore,
153 cl::desc("Maximum number of iterations SCEV will "
154 "symbolically execute a constant "
155 "derived loop"),
156 cl::init(100));
157
158 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
159 static cl::opt<bool> VerifySCEV(
160 "verify-scev", cl::Hidden,
161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
162 static cl::opt<bool> VerifySCEVStrict(
163 "verify-scev-strict", cl::Hidden,
164 cl::desc("Enable stricter verification with -verify-scev is passed"));
165 static cl::opt<bool>
166 VerifySCEVMap("verify-scev-maps", cl::Hidden,
167 cl::desc("Verify no dangling value in ScalarEvolution's "
168 "ExprValueMap (slow)"));
169
170 static cl::opt<bool> VerifyIR(
171 "scev-verify-ir", cl::Hidden,
172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
173 cl::init(false));
174
175 static cl::opt<unsigned> MulOpsInlineThreshold(
176 "scev-mulops-inline-threshold", cl::Hidden,
177 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
178 cl::init(32));
179
180 static cl::opt<unsigned> AddOpsInlineThreshold(
181 "scev-addops-inline-threshold", cl::Hidden,
182 cl::desc("Threshold for inlining addition operands into a SCEV"),
183 cl::init(500));
184
185 static cl::opt<unsigned> MaxSCEVCompareDepth(
186 "scalar-evolution-max-scev-compare-depth", cl::Hidden,
187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
188 cl::init(32));
189
190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
193 cl::init(2));
194
195 static cl::opt<unsigned> MaxValueCompareDepth(
196 "scalar-evolution-max-value-compare-depth", cl::Hidden,
197 cl::desc("Maximum depth of recursive value complexity comparisons"),
198 cl::init(2));
199
200 static cl::opt<unsigned>
201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
202 cl::desc("Maximum depth of recursive arithmetics"),
203 cl::init(32));
204
205 static cl::opt<unsigned> MaxConstantEvolvingDepth(
206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
208
209 static cl::opt<unsigned>
210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
212 cl::init(8));
213
214 static cl::opt<unsigned>
215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
216 cl::desc("Max coefficients in AddRec during evolving"),
217 cl::init(8));
218
219 static cl::opt<unsigned>
220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
221 cl::desc("Size of the expression which is considered huge"),
222 cl::init(4096));
223
224 static cl::opt<bool>
225 ClassifyExpressions("scalar-evolution-classify-expressions",
226 cl::Hidden, cl::init(true),
227 cl::desc("When printing analysis, include information on every instruction"));
228
229 static cl::opt<bool> UseExpensiveRangeSharpening(
230 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
231 cl::init(false),
232 cl::desc("Use more powerful methods of sharpening expression ranges. May "
233 "be costly in terms of compile time"));
234
235 //===----------------------------------------------------------------------===//
236 // SCEV class definitions
237 //===----------------------------------------------------------------------===//
238
239 //===----------------------------------------------------------------------===//
240 // Implementation of the SCEV class.
241 //
242
243 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const244 LLVM_DUMP_METHOD void SCEV::dump() const {
245 print(dbgs());
246 dbgs() << '\n';
247 }
248 #endif
249
print(raw_ostream & OS) const250 void SCEV::print(raw_ostream &OS) const {
251 switch (getSCEVType()) {
252 case scConstant:
253 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
254 return;
255 case scPtrToInt: {
256 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
257 const SCEV *Op = PtrToInt->getOperand();
258 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
259 << *PtrToInt->getType() << ")";
260 return;
261 }
262 case scTruncate: {
263 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
264 const SCEV *Op = Trunc->getOperand();
265 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
266 << *Trunc->getType() << ")";
267 return;
268 }
269 case scZeroExtend: {
270 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
271 const SCEV *Op = ZExt->getOperand();
272 OS << "(zext " << *Op->getType() << " " << *Op << " to "
273 << *ZExt->getType() << ")";
274 return;
275 }
276 case scSignExtend: {
277 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
278 const SCEV *Op = SExt->getOperand();
279 OS << "(sext " << *Op->getType() << " " << *Op << " to "
280 << *SExt->getType() << ")";
281 return;
282 }
283 case scAddRecExpr: {
284 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
285 OS << "{" << *AR->getOperand(0);
286 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
287 OS << ",+," << *AR->getOperand(i);
288 OS << "}<";
289 if (AR->hasNoUnsignedWrap())
290 OS << "nuw><";
291 if (AR->hasNoSignedWrap())
292 OS << "nsw><";
293 if (AR->hasNoSelfWrap() &&
294 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
295 OS << "nw><";
296 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
297 OS << ">";
298 return;
299 }
300 case scAddExpr:
301 case scMulExpr:
302 case scUMaxExpr:
303 case scSMaxExpr:
304 case scUMinExpr:
305 case scSMinExpr: {
306 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
307 const char *OpStr = nullptr;
308 switch (NAry->getSCEVType()) {
309 case scAddExpr: OpStr = " + "; break;
310 case scMulExpr: OpStr = " * "; break;
311 case scUMaxExpr: OpStr = " umax "; break;
312 case scSMaxExpr: OpStr = " smax "; break;
313 case scUMinExpr:
314 OpStr = " umin ";
315 break;
316 case scSMinExpr:
317 OpStr = " smin ";
318 break;
319 default:
320 llvm_unreachable("There are no other nary expression types.");
321 }
322 OS << "(";
323 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
324 I != E; ++I) {
325 OS << **I;
326 if (std::next(I) != E)
327 OS << OpStr;
328 }
329 OS << ")";
330 switch (NAry->getSCEVType()) {
331 case scAddExpr:
332 case scMulExpr:
333 if (NAry->hasNoUnsignedWrap())
334 OS << "<nuw>";
335 if (NAry->hasNoSignedWrap())
336 OS << "<nsw>";
337 break;
338 default:
339 // Nothing to print for other nary expressions.
340 break;
341 }
342 return;
343 }
344 case scUDivExpr: {
345 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
346 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
347 return;
348 }
349 case scUnknown: {
350 const SCEVUnknown *U = cast<SCEVUnknown>(this);
351 Type *AllocTy;
352 if (U->isSizeOf(AllocTy)) {
353 OS << "sizeof(" << *AllocTy << ")";
354 return;
355 }
356 if (U->isAlignOf(AllocTy)) {
357 OS << "alignof(" << *AllocTy << ")";
358 return;
359 }
360
361 Type *CTy;
362 Constant *FieldNo;
363 if (U->isOffsetOf(CTy, FieldNo)) {
364 OS << "offsetof(" << *CTy << ", ";
365 FieldNo->printAsOperand(OS, false);
366 OS << ")";
367 return;
368 }
369
370 // Otherwise just print it normally.
371 U->getValue()->printAsOperand(OS, false);
372 return;
373 }
374 case scCouldNotCompute:
375 OS << "***COULDNOTCOMPUTE***";
376 return;
377 }
378 llvm_unreachable("Unknown SCEV kind!");
379 }
380
getType() const381 Type *SCEV::getType() const {
382 switch (getSCEVType()) {
383 case scConstant:
384 return cast<SCEVConstant>(this)->getType();
385 case scPtrToInt:
386 case scTruncate:
387 case scZeroExtend:
388 case scSignExtend:
389 return cast<SCEVCastExpr>(this)->getType();
390 case scAddRecExpr:
391 case scMulExpr:
392 case scUMaxExpr:
393 case scSMaxExpr:
394 case scUMinExpr:
395 case scSMinExpr:
396 return cast<SCEVNAryExpr>(this)->getType();
397 case scAddExpr:
398 return cast<SCEVAddExpr>(this)->getType();
399 case scUDivExpr:
400 return cast<SCEVUDivExpr>(this)->getType();
401 case scUnknown:
402 return cast<SCEVUnknown>(this)->getType();
403 case scCouldNotCompute:
404 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
405 }
406 llvm_unreachable("Unknown SCEV kind!");
407 }
408
isZero() const409 bool SCEV::isZero() const {
410 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
411 return SC->getValue()->isZero();
412 return false;
413 }
414
isOne() const415 bool SCEV::isOne() const {
416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
417 return SC->getValue()->isOne();
418 return false;
419 }
420
isAllOnesValue() const421 bool SCEV::isAllOnesValue() const {
422 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
423 return SC->getValue()->isMinusOne();
424 return false;
425 }
426
isNonConstantNegative() const427 bool SCEV::isNonConstantNegative() const {
428 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
429 if (!Mul) return false;
430
431 // If there is a constant factor, it will be first.
432 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
433 if (!SC) return false;
434
435 // Return true if the value is negative, this matches things like (-42 * V).
436 return SC->getAPInt().isNegative();
437 }
438
SCEVCouldNotCompute()439 SCEVCouldNotCompute::SCEVCouldNotCompute() :
440 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}
441
classof(const SCEV * S)442 bool SCEVCouldNotCompute::classof(const SCEV *S) {
443 return S->getSCEVType() == scCouldNotCompute;
444 }
445
getConstant(ConstantInt * V)446 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
447 FoldingSetNodeID ID;
448 ID.AddInteger(scConstant);
449 ID.AddPointer(V);
450 void *IP = nullptr;
451 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
452 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
453 UniqueSCEVs.InsertNode(S, IP);
454 return S;
455 }
456
getConstant(const APInt & Val)457 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
458 return getConstant(ConstantInt::get(getContext(), Val));
459 }
460
461 const SCEV *
getConstant(Type * Ty,uint64_t V,bool isSigned)462 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
463 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
464 return getConstant(ConstantInt::get(ITy, V, isSigned));
465 }
466
SCEVCastExpr(const FoldingSetNodeIDRef ID,SCEVTypes SCEVTy,const SCEV * op,Type * ty)467 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
468 const SCEV *op, Type *ty)
469 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) {
470 Operands[0] = op;
471 }
472
SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID,const SCEV * Op,Type * ITy)473 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
474 Type *ITy)
475 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
476 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
477 "Must be a non-bit-width-changing pointer-to-integer cast!");
478 }
479
SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,SCEVTypes SCEVTy,const SCEV * op,Type * ty)480 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
481 SCEVTypes SCEVTy, const SCEV *op,
482 Type *ty)
483 : SCEVCastExpr(ID, SCEVTy, op, ty) {}
484
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)485 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
486 Type *ty)
487 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
488 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
489 "Cannot truncate non-integer value!");
490 }
491
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)492 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
493 const SCEV *op, Type *ty)
494 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
495 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
496 "Cannot zero extend non-integer value!");
497 }
498
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)499 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
500 const SCEV *op, Type *ty)
501 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
502 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
503 "Cannot sign extend non-integer value!");
504 }
505
deleted()506 void SCEVUnknown::deleted() {
507 // Clear this SCEVUnknown from various maps.
508 SE->forgetMemoizedResults(this);
509
510 // Remove this SCEVUnknown from the uniquing map.
511 SE->UniqueSCEVs.RemoveNode(this);
512
513 // Release the value.
514 setValPtr(nullptr);
515 }
516
allUsesReplacedWith(Value * New)517 void SCEVUnknown::allUsesReplacedWith(Value *New) {
518 // Remove this SCEVUnknown from the uniquing map.
519 SE->UniqueSCEVs.RemoveNode(this);
520
521 // Update this SCEVUnknown to point to the new value. This is needed
522 // because there may still be outstanding SCEVs which still point to
523 // this SCEVUnknown.
524 setValPtr(New);
525 }
526
isSizeOf(Type * & AllocTy) const527 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
528 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
529 if (VCE->getOpcode() == Instruction::PtrToInt)
530 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
531 if (CE->getOpcode() == Instruction::GetElementPtr &&
532 CE->getOperand(0)->isNullValue() &&
533 CE->getNumOperands() == 2)
534 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
535 if (CI->isOne()) {
536 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
537 ->getElementType();
538 return true;
539 }
540
541 return false;
542 }
543
isAlignOf(Type * & AllocTy) const544 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
545 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
546 if (VCE->getOpcode() == Instruction::PtrToInt)
547 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
548 if (CE->getOpcode() == Instruction::GetElementPtr &&
549 CE->getOperand(0)->isNullValue()) {
550 Type *Ty =
551 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
552 if (StructType *STy = dyn_cast<StructType>(Ty))
553 if (!STy->isPacked() &&
554 CE->getNumOperands() == 3 &&
555 CE->getOperand(1)->isNullValue()) {
556 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
557 if (CI->isOne() &&
558 STy->getNumElements() == 2 &&
559 STy->getElementType(0)->isIntegerTy(1)) {
560 AllocTy = STy->getElementType(1);
561 return true;
562 }
563 }
564 }
565
566 return false;
567 }
568
isOffsetOf(Type * & CTy,Constant * & FieldNo) const569 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
570 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
571 if (VCE->getOpcode() == Instruction::PtrToInt)
572 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
573 if (CE->getOpcode() == Instruction::GetElementPtr &&
574 CE->getNumOperands() == 3 &&
575 CE->getOperand(0)->isNullValue() &&
576 CE->getOperand(1)->isNullValue()) {
577 Type *Ty =
578 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
579 // Ignore vector types here so that ScalarEvolutionExpander doesn't
580 // emit getelementptrs that index into vectors.
581 if (Ty->isStructTy() || Ty->isArrayTy()) {
582 CTy = Ty;
583 FieldNo = CE->getOperand(2);
584 return true;
585 }
586 }
587
588 return false;
589 }
590
591 //===----------------------------------------------------------------------===//
592 // SCEV Utilities
593 //===----------------------------------------------------------------------===//
594
595 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
596 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
597 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
598 /// have been previously deemed to be "equally complex" by this routine. It is
599 /// intended to avoid exponential time complexity in cases like:
600 ///
601 /// %a = f(%x, %y)
602 /// %b = f(%a, %a)
603 /// %c = f(%b, %b)
604 ///
605 /// %d = f(%x, %y)
606 /// %e = f(%d, %d)
607 /// %f = f(%e, %e)
608 ///
609 /// CompareValueComplexity(%f, %c)
610 ///
611 /// Since we do not continue running this routine on expression trees once we
612 /// have seen unequal values, there is no need to track them in the cache.
613 static int
CompareValueComplexity(EquivalenceClasses<const Value * > & EqCacheValue,const LoopInfo * const LI,Value * LV,Value * RV,unsigned Depth)614 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
615 const LoopInfo *const LI, Value *LV, Value *RV,
616 unsigned Depth) {
617 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
618 return 0;
619
620 // Order pointer values after integer values. This helps SCEVExpander form
621 // GEPs.
622 bool LIsPointer = LV->getType()->isPointerTy(),
623 RIsPointer = RV->getType()->isPointerTy();
624 if (LIsPointer != RIsPointer)
625 return (int)LIsPointer - (int)RIsPointer;
626
627 // Compare getValueID values.
628 unsigned LID = LV->getValueID(), RID = RV->getValueID();
629 if (LID != RID)
630 return (int)LID - (int)RID;
631
632 // Sort arguments by their position.
633 if (const auto *LA = dyn_cast<Argument>(LV)) {
634 const auto *RA = cast<Argument>(RV);
635 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
636 return (int)LArgNo - (int)RArgNo;
637 }
638
639 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
640 const auto *RGV = cast<GlobalValue>(RV);
641
642 const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
643 auto LT = GV->getLinkage();
644 return !(GlobalValue::isPrivateLinkage(LT) ||
645 GlobalValue::isInternalLinkage(LT));
646 };
647
648 // Use the names to distinguish the two values, but only if the
649 // names are semantically important.
650 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
651 return LGV->getName().compare(RGV->getName());
652 }
653
654 // For instructions, compare their loop depth, and their operand count. This
655 // is pretty loose.
656 if (const auto *LInst = dyn_cast<Instruction>(LV)) {
657 const auto *RInst = cast<Instruction>(RV);
658
659 // Compare loop depths.
660 const BasicBlock *LParent = LInst->getParent(),
661 *RParent = RInst->getParent();
662 if (LParent != RParent) {
663 unsigned LDepth = LI->getLoopDepth(LParent),
664 RDepth = LI->getLoopDepth(RParent);
665 if (LDepth != RDepth)
666 return (int)LDepth - (int)RDepth;
667 }
668
669 // Compare the number of operands.
670 unsigned LNumOps = LInst->getNumOperands(),
671 RNumOps = RInst->getNumOperands();
672 if (LNumOps != RNumOps)
673 return (int)LNumOps - (int)RNumOps;
674
675 for (unsigned Idx : seq(0u, LNumOps)) {
676 int Result =
677 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
678 RInst->getOperand(Idx), Depth + 1);
679 if (Result != 0)
680 return Result;
681 }
682 }
683
684 EqCacheValue.unionSets(LV, RV);
685 return 0;
686 }
687
688 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
689 // than RHS, respectively. A three-way result allows recursive comparisons to be
690 // more efficient.
CompareSCEVComplexity(EquivalenceClasses<const SCEV * > & EqCacheSCEV,EquivalenceClasses<const Value * > & EqCacheValue,const LoopInfo * const LI,const SCEV * LHS,const SCEV * RHS,DominatorTree & DT,unsigned Depth=0)691 static int CompareSCEVComplexity(
692 EquivalenceClasses<const SCEV *> &EqCacheSCEV,
693 EquivalenceClasses<const Value *> &EqCacheValue,
694 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS,
695 DominatorTree &DT, unsigned Depth = 0) {
696 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
697 if (LHS == RHS)
698 return 0;
699
700 // Primarily, sort the SCEVs by their getSCEVType().
701 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
702 if (LType != RType)
703 return (int)LType - (int)RType;
704
705 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS))
706 return 0;
707 // Aside from the getSCEVType() ordering, the particular ordering
708 // isn't very important except that it's beneficial to be consistent,
709 // so that (a + b) and (b + a) don't end up as different expressions.
710 switch (LType) {
711 case scUnknown: {
712 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
713 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
714
715 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
716 RU->getValue(), Depth + 1);
717 if (X == 0)
718 EqCacheSCEV.unionSets(LHS, RHS);
719 return X;
720 }
721
722 case scConstant: {
723 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
724 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
725
726 // Compare constant values.
727 const APInt &LA = LC->getAPInt();
728 const APInt &RA = RC->getAPInt();
729 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
730 if (LBitWidth != RBitWidth)
731 return (int)LBitWidth - (int)RBitWidth;
732 return LA.ult(RA) ? -1 : 1;
733 }
734
735 case scAddRecExpr: {
736 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
737 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
738
739 // There is always a dominance between two recs that are used by one SCEV,
740 // so we can safely sort recs by loop header dominance. We require such
741 // order in getAddExpr.
742 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
743 if (LLoop != RLoop) {
744 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
745 assert(LHead != RHead && "Two loops share the same header?");
746 if (DT.dominates(LHead, RHead))
747 return 1;
748 else
749 assert(DT.dominates(RHead, LHead) &&
750 "No dominance between recurrences used by one SCEV?");
751 return -1;
752 }
753
754 // Addrec complexity grows with operand count.
755 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
756 if (LNumOps != RNumOps)
757 return (int)LNumOps - (int)RNumOps;
758
759 // Lexicographically compare.
760 for (unsigned i = 0; i != LNumOps; ++i) {
761 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
762 LA->getOperand(i), RA->getOperand(i), DT,
763 Depth + 1);
764 if (X != 0)
765 return X;
766 }
767 EqCacheSCEV.unionSets(LHS, RHS);
768 return 0;
769 }
770
771 case scAddExpr:
772 case scMulExpr:
773 case scSMaxExpr:
774 case scUMaxExpr:
775 case scSMinExpr:
776 case scUMinExpr: {
777 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
778 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
779
780 // Lexicographically compare n-ary expressions.
781 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
782 if (LNumOps != RNumOps)
783 return (int)LNumOps - (int)RNumOps;
784
785 for (unsigned i = 0; i != LNumOps; ++i) {
786 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
787 LC->getOperand(i), RC->getOperand(i), DT,
788 Depth + 1);
789 if (X != 0)
790 return X;
791 }
792 EqCacheSCEV.unionSets(LHS, RHS);
793 return 0;
794 }
795
796 case scUDivExpr: {
797 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
798 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
799
800 // Lexicographically compare udiv expressions.
801 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
802 RC->getLHS(), DT, Depth + 1);
803 if (X != 0)
804 return X;
805 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
806 RC->getRHS(), DT, Depth + 1);
807 if (X == 0)
808 EqCacheSCEV.unionSets(LHS, RHS);
809 return X;
810 }
811
812 case scPtrToInt:
813 case scTruncate:
814 case scZeroExtend:
815 case scSignExtend: {
816 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
817 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
818
819 // Compare cast expressions by operand.
820 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
821 LC->getOperand(), RC->getOperand(), DT,
822 Depth + 1);
823 if (X == 0)
824 EqCacheSCEV.unionSets(LHS, RHS);
825 return X;
826 }
827
828 case scCouldNotCompute:
829 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
830 }
831 llvm_unreachable("Unknown SCEV kind!");
832 }
833
834 /// Given a list of SCEV objects, order them by their complexity, and group
835 /// objects of the same complexity together by value. When this routine is
836 /// finished, we know that any duplicates in the vector are consecutive and that
837 /// complexity is monotonically increasing.
838 ///
839 /// Note that we go take special precautions to ensure that we get deterministic
840 /// results from this routine. In other words, we don't want the results of
841 /// this to depend on where the addresses of various SCEV objects happened to
842 /// land in memory.
GroupByComplexity(SmallVectorImpl<const SCEV * > & Ops,LoopInfo * LI,DominatorTree & DT)843 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
844 LoopInfo *LI, DominatorTree &DT) {
845 if (Ops.size() < 2) return; // Noop
846
847 EquivalenceClasses<const SCEV *> EqCacheSCEV;
848 EquivalenceClasses<const Value *> EqCacheValue;
849 if (Ops.size() == 2) {
850 // This is the common case, which also happens to be trivially simple.
851 // Special case it.
852 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
853 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0)
854 std::swap(LHS, RHS);
855 return;
856 }
857
858 // Do the rough sort by complexity.
859 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
860 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) <
861 0;
862 });
863
864 // Now that we are sorted by complexity, group elements of the same
865 // complexity. Note that this is, at worst, N^2, but the vector is likely to
866 // be extremely short in practice. Note that we take this approach because we
867 // do not want to depend on the addresses of the objects we are grouping.
868 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
869 const SCEV *S = Ops[i];
870 unsigned Complexity = S->getSCEVType();
871
872 // If there are any objects of the same complexity and same value as this
873 // one, group them.
874 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
875 if (Ops[j] == S) { // Found a duplicate.
876 // Move it to immediately after i'th element.
877 std::swap(Ops[i+1], Ops[j]);
878 ++i; // no need to rescan it.
879 if (i == e-2) return; // Done!
880 }
881 }
882 }
883 }
884
885 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
886 /// least HugeExprThreshold nodes).
hasHugeExpression(ArrayRef<const SCEV * > Ops)887 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
888 return any_of(Ops, [](const SCEV *S) {
889 return S->getExpressionSize() >= HugeExprThreshold;
890 });
891 }
892
893 //===----------------------------------------------------------------------===//
894 // Simple SCEV method implementations
895 //===----------------------------------------------------------------------===//
896
897 /// Compute BC(It, K). The result has width W. Assume, K > 0.
BinomialCoefficient(const SCEV * It,unsigned K,ScalarEvolution & SE,Type * ResultTy)898 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
899 ScalarEvolution &SE,
900 Type *ResultTy) {
901 // Handle the simplest case efficiently.
902 if (K == 1)
903 return SE.getTruncateOrZeroExtend(It, ResultTy);
904
905 // We are using the following formula for BC(It, K):
906 //
907 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
908 //
909 // Suppose, W is the bitwidth of the return value. We must be prepared for
910 // overflow. Hence, we must assure that the result of our computation is
911 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
912 // safe in modular arithmetic.
913 //
914 // However, this code doesn't use exactly that formula; the formula it uses
915 // is something like the following, where T is the number of factors of 2 in
916 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
917 // exponentiation:
918 //
919 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
920 //
921 // This formula is trivially equivalent to the previous formula. However,
922 // this formula can be implemented much more efficiently. The trick is that
923 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
924 // arithmetic. To do exact division in modular arithmetic, all we have
925 // to do is multiply by the inverse. Therefore, this step can be done at
926 // width W.
927 //
928 // The next issue is how to safely do the division by 2^T. The way this
929 // is done is by doing the multiplication step at a width of at least W + T
930 // bits. This way, the bottom W+T bits of the product are accurate. Then,
931 // when we perform the division by 2^T (which is equivalent to a right shift
932 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
933 // truncated out after the division by 2^T.
934 //
935 // In comparison to just directly using the first formula, this technique
936 // is much more efficient; using the first formula requires W * K bits,
937 // but this formula less than W + K bits. Also, the first formula requires
938 // a division step, whereas this formula only requires multiplies and shifts.
939 //
940 // It doesn't matter whether the subtraction step is done in the calculation
941 // width or the input iteration count's width; if the subtraction overflows,
942 // the result must be zero anyway. We prefer here to do it in the width of
943 // the induction variable because it helps a lot for certain cases; CodeGen
944 // isn't smart enough to ignore the overflow, which leads to much less
945 // efficient code if the width of the subtraction is wider than the native
946 // register width.
947 //
948 // (It's possible to not widen at all by pulling out factors of 2 before
949 // the multiplication; for example, K=2 can be calculated as
950 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
951 // extra arithmetic, so it's not an obvious win, and it gets
952 // much more complicated for K > 3.)
953
954 // Protection from insane SCEVs; this bound is conservative,
955 // but it probably doesn't matter.
956 if (K > 1000)
957 return SE.getCouldNotCompute();
958
959 unsigned W = SE.getTypeSizeInBits(ResultTy);
960
961 // Calculate K! / 2^T and T; we divide out the factors of two before
962 // multiplying for calculating K! / 2^T to avoid overflow.
963 // Other overflow doesn't matter because we only care about the bottom
964 // W bits of the result.
965 APInt OddFactorial(W, 1);
966 unsigned T = 1;
967 for (unsigned i = 3; i <= K; ++i) {
968 APInt Mult(W, i);
969 unsigned TwoFactors = Mult.countTrailingZeros();
970 T += TwoFactors;
971 Mult.lshrInPlace(TwoFactors);
972 OddFactorial *= Mult;
973 }
974
975 // We need at least W + T bits for the multiplication step
976 unsigned CalculationBits = W + T;
977
978 // Calculate 2^T, at width T+W.
979 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
980
981 // Calculate the multiplicative inverse of K! / 2^T;
982 // this multiplication factor will perform the exact division by
983 // K! / 2^T.
984 APInt Mod = APInt::getSignedMinValue(W+1);
985 APInt MultiplyFactor = OddFactorial.zext(W+1);
986 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
987 MultiplyFactor = MultiplyFactor.trunc(W);
988
989 // Calculate the product, at width T+W
990 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
991 CalculationBits);
992 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
993 for (unsigned i = 1; i != K; ++i) {
994 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
995 Dividend = SE.getMulExpr(Dividend,
996 SE.getTruncateOrZeroExtend(S, CalculationTy));
997 }
998
999 // Divide by 2^T
1000 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1001
1002 // Truncate the result, and divide by K! / 2^T.
1003
1004 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1005 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1006 }
1007
1008 /// Return the value of this chain of recurrences at the specified iteration
1009 /// number. We can evaluate this recurrence by multiplying each element in the
1010 /// chain by the binomial coefficient corresponding to it. In other words, we
1011 /// can evaluate {A,+,B,+,C,+,D} as:
1012 ///
1013 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1014 ///
1015 /// where BC(It, k) stands for binomial coefficient.
evaluateAtIteration(const SCEV * It,ScalarEvolution & SE) const1016 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1017 ScalarEvolution &SE) const {
1018 const SCEV *Result = getStart();
1019 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1020 // The computation is correct in the face of overflow provided that the
1021 // multiplication is performed _after_ the evaluation of the binomial
1022 // coefficient.
1023 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
1024 if (isa<SCEVCouldNotCompute>(Coeff))
1025 return Coeff;
1026
1027 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
1028 }
1029 return Result;
1030 }
1031
1032 //===----------------------------------------------------------------------===//
1033 // SCEV Expression folder implementations
1034 //===----------------------------------------------------------------------===//
1035
getPtrToIntExpr(const SCEV * Op,Type * Ty,unsigned Depth)1036 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty,
1037 unsigned Depth) {
1038 assert(Ty->isIntegerTy() && "Target type must be an integer type!");
1039 assert(Depth <= 1 && "getPtrToIntExpr() should self-recurse at most once.");
1040
1041 // We could be called with an integer-typed operands during SCEV rewrites.
1042 // Since the operand is an integer already, just perform zext/trunc/self cast.
1043 if (!Op->getType()->isPointerTy())
1044 return getTruncateOrZeroExtend(Op, Ty);
1045
1046 // What would be an ID for such a SCEV cast expression?
1047 FoldingSetNodeID ID;
1048 ID.AddInteger(scPtrToInt);
1049 ID.AddPointer(Op);
1050
1051 void *IP = nullptr;
1052
1053 // Is there already an expression for such a cast?
1054 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1055 return getTruncateOrZeroExtend(S, Ty);
1056
1057 // If not, is this expression something we can't reduce any further?
1058 if (isa<SCEVUnknown>(Op)) {
1059 // Create an explicit cast node.
1060 // We can reuse the existing insert position since if we get here,
1061 // we won't have made any changes which would invalidate it.
1062 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1063 assert(getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(
1064 Op->getType())) == getDataLayout().getTypeSizeInBits(IntPtrTy) &&
1065 "We can only model ptrtoint if SCEV's effective (integer) type is "
1066 "sufficiently wide to represent all possible pointer values.");
1067 SCEV *S = new (SCEVAllocator)
1068 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
1069 UniqueSCEVs.InsertNode(S, IP);
1070 addToLoopUseLists(S);
1071 return getTruncateOrZeroExtend(S, Ty);
1072 }
1073
1074 assert(Depth == 0 &&
1075 "getPtrToIntExpr() should not self-recurse for non-SCEVUnknown's.");
1076
1077 // Otherwise, we've got some expression that is more complex than just a
1078 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1079 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1080 // only, and the expressions must otherwise be integer-typed.
1081 // So sink the cast down to the SCEVUnknown's.
1082
1083 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1084 /// which computes a pointer-typed value, and rewrites the whole expression
1085 /// tree so that *all* the computations are done on integers, and the only
1086 /// pointer-typed operands in the expression are SCEVUnknown.
1087 class SCEVPtrToIntSinkingRewriter
1088 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> {
1089 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>;
1090
1091 public:
1092 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
1093
1094 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
1095 SCEVPtrToIntSinkingRewriter Rewriter(SE);
1096 return Rewriter.visit(Scev);
1097 }
1098
1099 const SCEV *visit(const SCEV *S) {
1100 Type *STy = S->getType();
1101 // If the expression is not pointer-typed, just keep it as-is.
1102 if (!STy->isPointerTy())
1103 return S;
1104 // Else, recursively sink the cast down into it.
1105 return Base::visit(S);
1106 }
1107
1108 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1109 SmallVector<const SCEV *, 2> Operands;
1110 bool Changed = false;
1111 for (auto *Op : Expr->operands()) {
1112 Operands.push_back(visit(Op));
1113 Changed |= Op != Operands.back();
1114 }
1115 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1116 }
1117
1118 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1119 SmallVector<const SCEV *, 2> Operands;
1120 bool Changed = false;
1121 for (auto *Op : Expr->operands()) {
1122 Operands.push_back(visit(Op));
1123 Changed |= Op != Operands.back();
1124 }
1125 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1126 }
1127
1128 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1129 Type *ExprPtrTy = Expr->getType();
1130 assert(ExprPtrTy->isPointerTy() &&
1131 "Should only reach pointer-typed SCEVUnknown's.");
1132 Type *ExprIntPtrTy = SE.getDataLayout().getIntPtrType(ExprPtrTy);
1133 return SE.getPtrToIntExpr(Expr, ExprIntPtrTy, /*Depth=*/1);
1134 }
1135 };
1136
1137 // And actually perform the cast sinking.
1138 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
1139 assert(IntOp->getType()->isIntegerTy() &&
1140 "We must have succeeded in sinking the cast, "
1141 "and ending up with an integer-typed expression!");
1142 return getTruncateOrZeroExtend(IntOp, Ty);
1143 }
1144
getTruncateExpr(const SCEV * Op,Type * Ty,unsigned Depth)1145 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
1146 unsigned Depth) {
1147 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1148 "This is not a truncating conversion!");
1149 assert(isSCEVable(Ty) &&
1150 "This is not a conversion to a SCEVable type!");
1151 Ty = getEffectiveSCEVType(Ty);
1152
1153 FoldingSetNodeID ID;
1154 ID.AddInteger(scTruncate);
1155 ID.AddPointer(Op);
1156 ID.AddPointer(Ty);
1157 void *IP = nullptr;
1158 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1159
1160 // Fold if the operand is constant.
1161 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1162 return getConstant(
1163 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1164
1165 // trunc(trunc(x)) --> trunc(x)
1166 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1167 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1168
1169 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1170 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1171 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1172
1173 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1174 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1175 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1176
1177 if (Depth > MaxCastDepth) {
1178 SCEV *S =
1179 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1180 UniqueSCEVs.InsertNode(S, IP);
1181 addToLoopUseLists(S);
1182 return S;
1183 }
1184
1185 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1186 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1187 // if after transforming we have at most one truncate, not counting truncates
1188 // that replace other casts.
1189 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1190 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1191 SmallVector<const SCEV *, 4> Operands;
1192 unsigned numTruncs = 0;
1193 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1194 ++i) {
1195 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1196 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1197 isa<SCEVTruncateExpr>(S))
1198 numTruncs++;
1199 Operands.push_back(S);
1200 }
1201 if (numTruncs < 2) {
1202 if (isa<SCEVAddExpr>(Op))
1203 return getAddExpr(Operands);
1204 else if (isa<SCEVMulExpr>(Op))
1205 return getMulExpr(Operands);
1206 else
1207 llvm_unreachable("Unexpected SCEV type for Op.");
1208 }
1209 // Although we checked in the beginning that ID is not in the cache, it is
1210 // possible that during recursion and different modification ID was inserted
1211 // into the cache. So if we find it, just return it.
1212 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1213 return S;
1214 }
1215
1216 // If the input value is a chrec scev, truncate the chrec's operands.
1217 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1218 SmallVector<const SCEV *, 4> Operands;
1219 for (const SCEV *Op : AddRec->operands())
1220 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1221 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1222 }
1223
1224 // The cast wasn't folded; create an explicit cast node. We can reuse
1225 // the existing insert position since if we get here, we won't have
1226 // made any changes which would invalidate it.
1227 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1228 Op, Ty);
1229 UniqueSCEVs.InsertNode(S, IP);
1230 addToLoopUseLists(S);
1231 return S;
1232 }
1233
1234 // Get the limit of a recurrence such that incrementing by Step cannot cause
1235 // signed overflow as long as the value of the recurrence within the
1236 // loop does not exceed this limit before incrementing.
getSignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1237 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1238 ICmpInst::Predicate *Pred,
1239 ScalarEvolution *SE) {
1240 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1241 if (SE->isKnownPositive(Step)) {
1242 *Pred = ICmpInst::ICMP_SLT;
1243 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1244 SE->getSignedRangeMax(Step));
1245 }
1246 if (SE->isKnownNegative(Step)) {
1247 *Pred = ICmpInst::ICMP_SGT;
1248 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1249 SE->getSignedRangeMin(Step));
1250 }
1251 return nullptr;
1252 }
1253
1254 // Get the limit of a recurrence such that incrementing by Step cannot cause
1255 // unsigned overflow as long as the value of the recurrence within the loop does
1256 // not exceed this limit before incrementing.
getUnsignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1257 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1258 ICmpInst::Predicate *Pred,
1259 ScalarEvolution *SE) {
1260 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1261 *Pred = ICmpInst::ICMP_ULT;
1262
1263 return SE->getConstant(APInt::getMinValue(BitWidth) -
1264 SE->getUnsignedRangeMax(Step));
1265 }
1266
1267 namespace {
1268
1269 struct ExtendOpTraitsBase {
1270 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1271 unsigned);
1272 };
1273
1274 // Used to make code generic over signed and unsigned overflow.
1275 template <typename ExtendOp> struct ExtendOpTraits {
1276 // Members present:
1277 //
1278 // static const SCEV::NoWrapFlags WrapType;
1279 //
1280 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1281 //
1282 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1283 // ICmpInst::Predicate *Pred,
1284 // ScalarEvolution *SE);
1285 };
1286
1287 template <>
1288 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1289 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1290
1291 static const GetExtendExprTy GetExtendExpr;
1292
getOverflowLimitForStep__anonb3a128370411::ExtendOpTraits1293 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1294 ICmpInst::Predicate *Pred,
1295 ScalarEvolution *SE) {
1296 return getSignedOverflowLimitForStep(Step, Pred, SE);
1297 }
1298 };
1299
1300 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1301 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1302
1303 template <>
1304 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1305 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1306
1307 static const GetExtendExprTy GetExtendExpr;
1308
getOverflowLimitForStep__anonb3a128370411::ExtendOpTraits1309 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1310 ICmpInst::Predicate *Pred,
1311 ScalarEvolution *SE) {
1312 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1313 }
1314 };
1315
1316 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1317 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1318
1319 } // end anonymous namespace
1320
1321 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1322 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1323 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1324 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1325 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1326 // expression "Step + sext/zext(PreIncAR)" is congruent with
1327 // "sext/zext(PostIncAR)"
1328 template <typename ExtendOpTy>
getPreStartForExtend(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE,unsigned Depth)1329 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1330 ScalarEvolution *SE, unsigned Depth) {
1331 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1332 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1333
1334 const Loop *L = AR->getLoop();
1335 const SCEV *Start = AR->getStart();
1336 const SCEV *Step = AR->getStepRecurrence(*SE);
1337
1338 // Check for a simple looking step prior to loop entry.
1339 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1340 if (!SA)
1341 return nullptr;
1342
1343 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1344 // subtraction is expensive. For this purpose, perform a quick and dirty
1345 // difference, by checking for Step in the operand list.
1346 SmallVector<const SCEV *, 4> DiffOps;
1347 for (const SCEV *Op : SA->operands())
1348 if (Op != Step)
1349 DiffOps.push_back(Op);
1350
1351 if (DiffOps.size() == SA->getNumOperands())
1352 return nullptr;
1353
1354 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1355 // `Step`:
1356
1357 // 1. NSW/NUW flags on the step increment.
1358 auto PreStartFlags =
1359 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1360 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1361 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1362 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1363
1364 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1365 // "S+X does not sign/unsign-overflow".
1366 //
1367
1368 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1369 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1370 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1371 return PreStart;
1372
1373 // 2. Direct overflow check on the step operation's expression.
1374 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1375 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1376 const SCEV *OperandExtendedStart =
1377 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1378 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1379 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1380 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1381 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1382 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1383 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1384 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1385 }
1386 return PreStart;
1387 }
1388
1389 // 3. Loop precondition.
1390 ICmpInst::Predicate Pred;
1391 const SCEV *OverflowLimit =
1392 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1393
1394 if (OverflowLimit &&
1395 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1396 return PreStart;
1397
1398 return nullptr;
1399 }
1400
1401 // Get the normalized zero or sign extended expression for this AddRec's Start.
1402 template <typename ExtendOpTy>
getExtendAddRecStart(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE,unsigned Depth)1403 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1404 ScalarEvolution *SE,
1405 unsigned Depth) {
1406 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1407
1408 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1409 if (!PreStart)
1410 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1411
1412 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1413 Depth),
1414 (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1415 }
1416
1417 // Try to prove away overflow by looking at "nearby" add recurrences. A
1418 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1419 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1420 //
1421 // Formally:
1422 //
1423 // {S,+,X} == {S-T,+,X} + T
1424 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1425 //
1426 // If ({S-T,+,X} + T) does not overflow ... (1)
1427 //
1428 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1429 //
1430 // If {S-T,+,X} does not overflow ... (2)
1431 //
1432 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1433 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1434 //
1435 // If (S-T)+T does not overflow ... (3)
1436 //
1437 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1438 // == {Ext(S),+,Ext(X)} == LHS
1439 //
1440 // Thus, if (1), (2) and (3) are true for some T, then
1441 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1442 //
1443 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1444 // does not overflow" restricted to the 0th iteration. Therefore we only need
1445 // to check for (1) and (2).
1446 //
1447 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1448 // is `Delta` (defined below).
1449 template <typename ExtendOpTy>
proveNoWrapByVaryingStart(const SCEV * Start,const SCEV * Step,const Loop * L)1450 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1451 const SCEV *Step,
1452 const Loop *L) {
1453 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1454
1455 // We restrict `Start` to a constant to prevent SCEV from spending too much
1456 // time here. It is correct (but more expensive) to continue with a
1457 // non-constant `Start` and do a general SCEV subtraction to compute
1458 // `PreStart` below.
1459 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1460 if (!StartC)
1461 return false;
1462
1463 APInt StartAI = StartC->getAPInt();
1464
1465 for (unsigned Delta : {-2, -1, 1, 2}) {
1466 const SCEV *PreStart = getConstant(StartAI - Delta);
1467
1468 FoldingSetNodeID ID;
1469 ID.AddInteger(scAddRecExpr);
1470 ID.AddPointer(PreStart);
1471 ID.AddPointer(Step);
1472 ID.AddPointer(L);
1473 void *IP = nullptr;
1474 const auto *PreAR =
1475 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1476
1477 // Give up if we don't already have the add recurrence we need because
1478 // actually constructing an add recurrence is relatively expensive.
1479 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1480 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1481 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1482 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1483 DeltaS, &Pred, this);
1484 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1485 return true;
1486 }
1487 }
1488
1489 return false;
1490 }
1491
1492 // Finds an integer D for an expression (C + x + y + ...) such that the top
1493 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1494 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1495 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1496 // the (C + x + y + ...) expression is \p WholeAddExpr.
extractConstantWithoutWrapping(ScalarEvolution & SE,const SCEVConstant * ConstantTerm,const SCEVAddExpr * WholeAddExpr)1497 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1498 const SCEVConstant *ConstantTerm,
1499 const SCEVAddExpr *WholeAddExpr) {
1500 const APInt &C = ConstantTerm->getAPInt();
1501 const unsigned BitWidth = C.getBitWidth();
1502 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1503 uint32_t TZ = BitWidth;
1504 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1505 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1506 if (TZ) {
1507 // Set D to be as many least significant bits of C as possible while still
1508 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1509 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1510 }
1511 return APInt(BitWidth, 0);
1512 }
1513
1514 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1515 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1516 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1517 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
extractConstantWithoutWrapping(ScalarEvolution & SE,const APInt & ConstantStart,const SCEV * Step)1518 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1519 const APInt &ConstantStart,
1520 const SCEV *Step) {
1521 const unsigned BitWidth = ConstantStart.getBitWidth();
1522 const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1523 if (TZ)
1524 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1525 : ConstantStart;
1526 return APInt(BitWidth, 0);
1527 }
1528
1529 const SCEV *
getZeroExtendExpr(const SCEV * Op,Type * Ty,unsigned Depth)1530 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1531 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1532 "This is not an extending conversion!");
1533 assert(isSCEVable(Ty) &&
1534 "This is not a conversion to a SCEVable type!");
1535 Ty = getEffectiveSCEVType(Ty);
1536
1537 // Fold if the operand is constant.
1538 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1539 return getConstant(
1540 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1541
1542 // zext(zext(x)) --> zext(x)
1543 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1544 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1545
1546 // Before doing any expensive analysis, check to see if we've already
1547 // computed a SCEV for this Op and Ty.
1548 FoldingSetNodeID ID;
1549 ID.AddInteger(scZeroExtend);
1550 ID.AddPointer(Op);
1551 ID.AddPointer(Ty);
1552 void *IP = nullptr;
1553 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1554 if (Depth > MaxCastDepth) {
1555 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1556 Op, Ty);
1557 UniqueSCEVs.InsertNode(S, IP);
1558 addToLoopUseLists(S);
1559 return S;
1560 }
1561
1562 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1563 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1564 // It's possible the bits taken off by the truncate were all zero bits. If
1565 // so, we should be able to simplify this further.
1566 const SCEV *X = ST->getOperand();
1567 ConstantRange CR = getUnsignedRange(X);
1568 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1569 unsigned NewBits = getTypeSizeInBits(Ty);
1570 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1571 CR.zextOrTrunc(NewBits)))
1572 return getTruncateOrZeroExtend(X, Ty, Depth);
1573 }
1574
1575 // If the input value is a chrec scev, and we can prove that the value
1576 // did not overflow the old, smaller, value, we can zero extend all of the
1577 // operands (often constants). This allows analysis of something like
1578 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1579 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1580 if (AR->isAffine()) {
1581 const SCEV *Start = AR->getStart();
1582 const SCEV *Step = AR->getStepRecurrence(*this);
1583 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1584 const Loop *L = AR->getLoop();
1585
1586 if (!AR->hasNoUnsignedWrap()) {
1587 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1588 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1589 }
1590
1591 // If we have special knowledge that this addrec won't overflow,
1592 // we don't need to do any further analysis.
1593 if (AR->hasNoUnsignedWrap())
1594 return getAddRecExpr(
1595 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1596 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1597
1598 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1599 // Note that this serves two purposes: It filters out loops that are
1600 // simply not analyzable, and it covers the case where this code is
1601 // being called from within backedge-taken count analysis, such that
1602 // attempting to ask for the backedge-taken count would likely result
1603 // in infinite recursion. In the later case, the analysis code will
1604 // cope with a conservative value, and it will take care to purge
1605 // that value once it has finished.
1606 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1607 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1608 // Manually compute the final value for AR, checking for
1609 // overflow.
1610
1611 // Check whether the backedge-taken count can be losslessly casted to
1612 // the addrec's type. The count is always unsigned.
1613 const SCEV *CastedMaxBECount =
1614 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1615 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1616 CastedMaxBECount, MaxBECount->getType(), Depth);
1617 if (MaxBECount == RecastedMaxBECount) {
1618 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1619 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1620 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1621 SCEV::FlagAnyWrap, Depth + 1);
1622 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1623 SCEV::FlagAnyWrap,
1624 Depth + 1),
1625 WideTy, Depth + 1);
1626 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1627 const SCEV *WideMaxBECount =
1628 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1629 const SCEV *OperandExtendedAdd =
1630 getAddExpr(WideStart,
1631 getMulExpr(WideMaxBECount,
1632 getZeroExtendExpr(Step, WideTy, Depth + 1),
1633 SCEV::FlagAnyWrap, Depth + 1),
1634 SCEV::FlagAnyWrap, Depth + 1);
1635 if (ZAdd == OperandExtendedAdd) {
1636 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1637 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1638 // Return the expression with the addrec on the outside.
1639 return getAddRecExpr(
1640 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1641 Depth + 1),
1642 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1643 AR->getNoWrapFlags());
1644 }
1645 // Similar to above, only this time treat the step value as signed.
1646 // This covers loops that count down.
1647 OperandExtendedAdd =
1648 getAddExpr(WideStart,
1649 getMulExpr(WideMaxBECount,
1650 getSignExtendExpr(Step, WideTy, Depth + 1),
1651 SCEV::FlagAnyWrap, Depth + 1),
1652 SCEV::FlagAnyWrap, Depth + 1);
1653 if (ZAdd == OperandExtendedAdd) {
1654 // Cache knowledge of AR NW, which is propagated to this AddRec.
1655 // Negative step causes unsigned wrap, but it still can't self-wrap.
1656 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1657 // Return the expression with the addrec on the outside.
1658 return getAddRecExpr(
1659 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1660 Depth + 1),
1661 getSignExtendExpr(Step, Ty, Depth + 1), L,
1662 AR->getNoWrapFlags());
1663 }
1664 }
1665 }
1666
1667 // Normally, in the cases we can prove no-overflow via a
1668 // backedge guarding condition, we can also compute a backedge
1669 // taken count for the loop. The exceptions are assumptions and
1670 // guards present in the loop -- SCEV is not great at exploiting
1671 // these to compute max backedge taken counts, but can still use
1672 // these to prove lack of overflow. Use this fact to avoid
1673 // doing extra work that may not pay off.
1674 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1675 !AC.assumptions().empty()) {
1676 // If the backedge is guarded by a comparison with the pre-inc
1677 // value the addrec is safe. Also, if the entry is guarded by
1678 // a comparison with the start value and the backedge is
1679 // guarded by a comparison with the post-inc value, the addrec
1680 // is safe.
1681 if (isKnownPositive(Step)) {
1682 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1683 getUnsignedRangeMax(Step));
1684 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1685 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
1686 // Cache knowledge of AR NUW, which is propagated to this
1687 // AddRec.
1688 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1689 // Return the expression with the addrec on the outside.
1690 return getAddRecExpr(
1691 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1692 Depth + 1),
1693 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1694 AR->getNoWrapFlags());
1695 }
1696 } else if (isKnownNegative(Step)) {
1697 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1698 getSignedRangeMin(Step));
1699 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1700 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1701 // Cache knowledge of AR NW, which is propagated to this
1702 // AddRec. Negative step causes unsigned wrap, but it
1703 // still can't self-wrap.
1704 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1705 // Return the expression with the addrec on the outside.
1706 return getAddRecExpr(
1707 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1708 Depth + 1),
1709 getSignExtendExpr(Step, Ty, Depth + 1), L,
1710 AR->getNoWrapFlags());
1711 }
1712 }
1713 }
1714
1715 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1716 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1717 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1718 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1719 const APInt &C = SC->getAPInt();
1720 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1721 if (D != 0) {
1722 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1723 const SCEV *SResidual =
1724 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1725 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1726 return getAddExpr(SZExtD, SZExtR,
1727 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1728 Depth + 1);
1729 }
1730 }
1731
1732 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1733 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1734 return getAddRecExpr(
1735 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1736 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1737 }
1738 }
1739
1740 // zext(A % B) --> zext(A) % zext(B)
1741 {
1742 const SCEV *LHS;
1743 const SCEV *RHS;
1744 if (matchURem(Op, LHS, RHS))
1745 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1746 getZeroExtendExpr(RHS, Ty, Depth + 1));
1747 }
1748
1749 // zext(A / B) --> zext(A) / zext(B).
1750 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1751 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1752 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1753
1754 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1755 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1756 if (SA->hasNoUnsignedWrap()) {
1757 // If the addition does not unsign overflow then we can, by definition,
1758 // commute the zero extension with the addition operation.
1759 SmallVector<const SCEV *, 4> Ops;
1760 for (const auto *Op : SA->operands())
1761 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1762 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1763 }
1764
1765 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1766 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1767 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1768 //
1769 // Often address arithmetics contain expressions like
1770 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1771 // This transformation is useful while proving that such expressions are
1772 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1773 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1774 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1775 if (D != 0) {
1776 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1777 const SCEV *SResidual =
1778 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1779 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1780 return getAddExpr(SZExtD, SZExtR,
1781 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1782 Depth + 1);
1783 }
1784 }
1785 }
1786
1787 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1788 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1789 if (SM->hasNoUnsignedWrap()) {
1790 // If the multiply does not unsign overflow then we can, by definition,
1791 // commute the zero extension with the multiply operation.
1792 SmallVector<const SCEV *, 4> Ops;
1793 for (const auto *Op : SM->operands())
1794 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1795 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1796 }
1797
1798 // zext(2^K * (trunc X to iN)) to iM ->
1799 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1800 //
1801 // Proof:
1802 //
1803 // zext(2^K * (trunc X to iN)) to iM
1804 // = zext((trunc X to iN) << K) to iM
1805 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1806 // (because shl removes the top K bits)
1807 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1808 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1809 //
1810 if (SM->getNumOperands() == 2)
1811 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1812 if (MulLHS->getAPInt().isPowerOf2())
1813 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1814 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1815 MulLHS->getAPInt().logBase2();
1816 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1817 return getMulExpr(
1818 getZeroExtendExpr(MulLHS, Ty),
1819 getZeroExtendExpr(
1820 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1821 SCEV::FlagNUW, Depth + 1);
1822 }
1823 }
1824
1825 // The cast wasn't folded; create an explicit cast node.
1826 // Recompute the insert position, as it may have been invalidated.
1827 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1828 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1829 Op, Ty);
1830 UniqueSCEVs.InsertNode(S, IP);
1831 addToLoopUseLists(S);
1832 return S;
1833 }
1834
1835 const SCEV *
getSignExtendExpr(const SCEV * Op,Type * Ty,unsigned Depth)1836 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1837 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1838 "This is not an extending conversion!");
1839 assert(isSCEVable(Ty) &&
1840 "This is not a conversion to a SCEVable type!");
1841 Ty = getEffectiveSCEVType(Ty);
1842
1843 // Fold if the operand is constant.
1844 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1845 return getConstant(
1846 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1847
1848 // sext(sext(x)) --> sext(x)
1849 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1850 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1851
1852 // sext(zext(x)) --> zext(x)
1853 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1854 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1855
1856 // Before doing any expensive analysis, check to see if we've already
1857 // computed a SCEV for this Op and Ty.
1858 FoldingSetNodeID ID;
1859 ID.AddInteger(scSignExtend);
1860 ID.AddPointer(Op);
1861 ID.AddPointer(Ty);
1862 void *IP = nullptr;
1863 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1864 // Limit recursion depth.
1865 if (Depth > MaxCastDepth) {
1866 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1867 Op, Ty);
1868 UniqueSCEVs.InsertNode(S, IP);
1869 addToLoopUseLists(S);
1870 return S;
1871 }
1872
1873 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1874 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1875 // It's possible the bits taken off by the truncate were all sign bits. If
1876 // so, we should be able to simplify this further.
1877 const SCEV *X = ST->getOperand();
1878 ConstantRange CR = getSignedRange(X);
1879 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1880 unsigned NewBits = getTypeSizeInBits(Ty);
1881 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1882 CR.sextOrTrunc(NewBits)))
1883 return getTruncateOrSignExtend(X, Ty, Depth);
1884 }
1885
1886 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1887 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1888 if (SA->hasNoSignedWrap()) {
1889 // If the addition does not sign overflow then we can, by definition,
1890 // commute the sign extension with the addition operation.
1891 SmallVector<const SCEV *, 4> Ops;
1892 for (const auto *Op : SA->operands())
1893 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1894 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
1895 }
1896
1897 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1898 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1899 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1900 //
1901 // For instance, this will bring two seemingly different expressions:
1902 // 1 + sext(5 + 20 * %x + 24 * %y) and
1903 // sext(6 + 20 * %x + 24 * %y)
1904 // to the same form:
1905 // 2 + sext(4 + 20 * %x + 24 * %y)
1906 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1907 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1908 if (D != 0) {
1909 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
1910 const SCEV *SResidual =
1911 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1912 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1913 return getAddExpr(SSExtD, SSExtR,
1914 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1915 Depth + 1);
1916 }
1917 }
1918 }
1919 // If the input value is a chrec scev, and we can prove that the value
1920 // did not overflow the old, smaller, value, we can sign extend all of the
1921 // operands (often constants). This allows analysis of something like
1922 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1923 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1924 if (AR->isAffine()) {
1925 const SCEV *Start = AR->getStart();
1926 const SCEV *Step = AR->getStepRecurrence(*this);
1927 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1928 const Loop *L = AR->getLoop();
1929
1930 if (!AR->hasNoSignedWrap()) {
1931 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1932 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1933 }
1934
1935 // If we have special knowledge that this addrec won't overflow,
1936 // we don't need to do any further analysis.
1937 if (AR->hasNoSignedWrap())
1938 return getAddRecExpr(
1939 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
1940 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
1941
1942 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1943 // Note that this serves two purposes: It filters out loops that are
1944 // simply not analyzable, and it covers the case where this code is
1945 // being called from within backedge-taken count analysis, such that
1946 // attempting to ask for the backedge-taken count would likely result
1947 // in infinite recursion. In the later case, the analysis code will
1948 // cope with a conservative value, and it will take care to purge
1949 // that value once it has finished.
1950 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1951 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1952 // Manually compute the final value for AR, checking for
1953 // overflow.
1954
1955 // Check whether the backedge-taken count can be losslessly casted to
1956 // the addrec's type. The count is always unsigned.
1957 const SCEV *CastedMaxBECount =
1958 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1959 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1960 CastedMaxBECount, MaxBECount->getType(), Depth);
1961 if (MaxBECount == RecastedMaxBECount) {
1962 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1963 // Check whether Start+Step*MaxBECount has no signed overflow.
1964 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
1965 SCEV::FlagAnyWrap, Depth + 1);
1966 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
1967 SCEV::FlagAnyWrap,
1968 Depth + 1),
1969 WideTy, Depth + 1);
1970 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
1971 const SCEV *WideMaxBECount =
1972 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1973 const SCEV *OperandExtendedAdd =
1974 getAddExpr(WideStart,
1975 getMulExpr(WideMaxBECount,
1976 getSignExtendExpr(Step, WideTy, Depth + 1),
1977 SCEV::FlagAnyWrap, Depth + 1),
1978 SCEV::FlagAnyWrap, Depth + 1);
1979 if (SAdd == OperandExtendedAdd) {
1980 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1981 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
1982 // Return the expression with the addrec on the outside.
1983 return getAddRecExpr(
1984 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
1985 Depth + 1),
1986 getSignExtendExpr(Step, Ty, Depth + 1), L,
1987 AR->getNoWrapFlags());
1988 }
1989 // Similar to above, only this time treat the step value as unsigned.
1990 // This covers loops that count up with an unsigned step.
1991 OperandExtendedAdd =
1992 getAddExpr(WideStart,
1993 getMulExpr(WideMaxBECount,
1994 getZeroExtendExpr(Step, WideTy, Depth + 1),
1995 SCEV::FlagAnyWrap, Depth + 1),
1996 SCEV::FlagAnyWrap, Depth + 1);
1997 if (SAdd == OperandExtendedAdd) {
1998 // If AR wraps around then
1999 //
2000 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2001 // => SAdd != OperandExtendedAdd
2002 //
2003 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2004 // (SAdd == OperandExtendedAdd => AR is NW)
2005
2006 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2007
2008 // Return the expression with the addrec on the outside.
2009 return getAddRecExpr(
2010 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2011 Depth + 1),
2012 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2013 AR->getNoWrapFlags());
2014 }
2015 }
2016 }
2017
2018 // Normally, in the cases we can prove no-overflow via a
2019 // backedge guarding condition, we can also compute a backedge
2020 // taken count for the loop. The exceptions are assumptions and
2021 // guards present in the loop -- SCEV is not great at exploiting
2022 // these to compute max backedge taken counts, but can still use
2023 // these to prove lack of overflow. Use this fact to avoid
2024 // doing extra work that may not pay off.
2025
2026 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
2027 !AC.assumptions().empty()) {
2028 // If the backedge is guarded by a comparison with the pre-inc
2029 // value the addrec is safe. Also, if the entry is guarded by
2030 // a comparison with the start value and the backedge is
2031 // guarded by a comparison with the post-inc value, the addrec
2032 // is safe.
2033 ICmpInst::Predicate Pred;
2034 const SCEV *OverflowLimit =
2035 getSignedOverflowLimitForStep(Step, &Pred, this);
2036 if (OverflowLimit &&
2037 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
2038 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
2039 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
2040 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2041 return getAddRecExpr(
2042 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2043 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2044 }
2045 }
2046
2047 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2048 // if D + (C - D + Step * n) could be proven to not signed wrap
2049 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2050 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2051 const APInt &C = SC->getAPInt();
2052 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2053 if (D != 0) {
2054 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2055 const SCEV *SResidual =
2056 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2057 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2058 return getAddExpr(SSExtD, SSExtR,
2059 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
2060 Depth + 1);
2061 }
2062 }
2063
2064 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2065 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2066 return getAddRecExpr(
2067 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2068 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2069 }
2070 }
2071
2072 // If the input value is provably positive and we could not simplify
2073 // away the sext build a zext instead.
2074 if (isKnownNonNegative(Op))
2075 return getZeroExtendExpr(Op, Ty, Depth + 1);
2076
2077 // The cast wasn't folded; create an explicit cast node.
2078 // Recompute the insert position, as it may have been invalidated.
2079 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2080 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2081 Op, Ty);
2082 UniqueSCEVs.InsertNode(S, IP);
2083 addToLoopUseLists(S);
2084 return S;
2085 }
2086
2087 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2088 /// unspecified bits out to the given type.
getAnyExtendExpr(const SCEV * Op,Type * Ty)2089 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
2090 Type *Ty) {
2091 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2092 "This is not an extending conversion!");
2093 assert(isSCEVable(Ty) &&
2094 "This is not a conversion to a SCEVable type!");
2095 Ty = getEffectiveSCEVType(Ty);
2096
2097 // Sign-extend negative constants.
2098 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2099 if (SC->getAPInt().isNegative())
2100 return getSignExtendExpr(Op, Ty);
2101
2102 // Peel off a truncate cast.
2103 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2104 const SCEV *NewOp = T->getOperand();
2105 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2106 return getAnyExtendExpr(NewOp, Ty);
2107 return getTruncateOrNoop(NewOp, Ty);
2108 }
2109
2110 // Next try a zext cast. If the cast is folded, use it.
2111 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2112 if (!isa<SCEVZeroExtendExpr>(ZExt))
2113 return ZExt;
2114
2115 // Next try a sext cast. If the cast is folded, use it.
2116 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2117 if (!isa<SCEVSignExtendExpr>(SExt))
2118 return SExt;
2119
2120 // Force the cast to be folded into the operands of an addrec.
2121 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2122 SmallVector<const SCEV *, 4> Ops;
2123 for (const SCEV *Op : AR->operands())
2124 Ops.push_back(getAnyExtendExpr(Op, Ty));
2125 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2126 }
2127
2128 // If the expression is obviously signed, use the sext cast value.
2129 if (isa<SCEVSMaxExpr>(Op))
2130 return SExt;
2131
2132 // Absent any other information, use the zext cast value.
2133 return ZExt;
2134 }
2135
2136 /// Process the given Ops list, which is a list of operands to be added under
2137 /// the given scale, update the given map. This is a helper function for
2138 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2139 /// that would form an add expression like this:
2140 ///
2141 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2142 ///
2143 /// where A and B are constants, update the map with these values:
2144 ///
2145 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2146 ///
2147 /// and add 13 + A*B*29 to AccumulatedConstant.
2148 /// This will allow getAddRecExpr to produce this:
2149 ///
2150 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2151 ///
2152 /// This form often exposes folding opportunities that are hidden in
2153 /// the original operand list.
2154 ///
2155 /// Return true iff it appears that any interesting folding opportunities
2156 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2157 /// the common case where no interesting opportunities are present, and
2158 /// is also used as a check to avoid infinite recursion.
2159 static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *,APInt> & M,SmallVectorImpl<const SCEV * > & NewOps,APInt & AccumulatedConstant,const SCEV * const * Ops,size_t NumOperands,const APInt & Scale,ScalarEvolution & SE)2160 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
2161 SmallVectorImpl<const SCEV *> &NewOps,
2162 APInt &AccumulatedConstant,
2163 const SCEV *const *Ops, size_t NumOperands,
2164 const APInt &Scale,
2165 ScalarEvolution &SE) {
2166 bool Interesting = false;
2167
2168 // Iterate over the add operands. They are sorted, with constants first.
2169 unsigned i = 0;
2170 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2171 ++i;
2172 // Pull a buried constant out to the outside.
2173 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2174 Interesting = true;
2175 AccumulatedConstant += Scale * C->getAPInt();
2176 }
2177
2178 // Next comes everything else. We're especially interested in multiplies
2179 // here, but they're in the middle, so just visit the rest with one loop.
2180 for (; i != NumOperands; ++i) {
2181 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2182 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2183 APInt NewScale =
2184 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2185 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2186 // A multiplication of a constant with another add; recurse.
2187 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2188 Interesting |=
2189 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2190 Add->op_begin(), Add->getNumOperands(),
2191 NewScale, SE);
2192 } else {
2193 // A multiplication of a constant with some other value. Update
2194 // the map.
2195 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
2196 const SCEV *Key = SE.getMulExpr(MulOps);
2197 auto Pair = M.insert({Key, NewScale});
2198 if (Pair.second) {
2199 NewOps.push_back(Pair.first->first);
2200 } else {
2201 Pair.first->second += NewScale;
2202 // The map already had an entry for this value, which may indicate
2203 // a folding opportunity.
2204 Interesting = true;
2205 }
2206 }
2207 } else {
2208 // An ordinary operand. Update the map.
2209 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2210 M.insert({Ops[i], Scale});
2211 if (Pair.second) {
2212 NewOps.push_back(Pair.first->first);
2213 } else {
2214 Pair.first->second += Scale;
2215 // The map already had an entry for this value, which may indicate
2216 // a folding opportunity.
2217 Interesting = true;
2218 }
2219 }
2220 }
2221
2222 return Interesting;
2223 }
2224
2225 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2226 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2227 // can't-overflow flags for the operation if possible.
2228 static SCEV::NoWrapFlags
StrengthenNoWrapFlags(ScalarEvolution * SE,SCEVTypes Type,const ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2229 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
2230 const ArrayRef<const SCEV *> Ops,
2231 SCEV::NoWrapFlags Flags) {
2232 using namespace std::placeholders;
2233
2234 using OBO = OverflowingBinaryOperator;
2235
2236 bool CanAnalyze =
2237 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2238 (void)CanAnalyze;
2239 assert(CanAnalyze && "don't call from other places!");
2240
2241 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2242 SCEV::NoWrapFlags SignOrUnsignWrap =
2243 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2244
2245 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2246 auto IsKnownNonNegative = [&](const SCEV *S) {
2247 return SE->isKnownNonNegative(S);
2248 };
2249
2250 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2251 Flags =
2252 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2253
2254 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2255
2256 if (SignOrUnsignWrap != SignOrUnsignMask &&
2257 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2258 isa<SCEVConstant>(Ops[0])) {
2259
2260 auto Opcode = [&] {
2261 switch (Type) {
2262 case scAddExpr:
2263 return Instruction::Add;
2264 case scMulExpr:
2265 return Instruction::Mul;
2266 default:
2267 llvm_unreachable("Unexpected SCEV op.");
2268 }
2269 }();
2270
2271 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2272
2273 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2274 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2275 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2276 Opcode, C, OBO::NoSignedWrap);
2277 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2278 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2279 }
2280
2281 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2282 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2283 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2284 Opcode, C, OBO::NoUnsignedWrap);
2285 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2286 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2287 }
2288 }
2289
2290 return Flags;
2291 }
2292
isAvailableAtLoopEntry(const SCEV * S,const Loop * L)2293 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
2294 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2295 }
2296
2297 /// Get a canonical add expression, or something simpler if possible.
getAddExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OrigFlags,unsigned Depth)2298 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2299 SCEV::NoWrapFlags OrigFlags,
2300 unsigned Depth) {
2301 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2302 "only nuw or nsw allowed");
2303 assert(!Ops.empty() && "Cannot get empty add!");
2304 if (Ops.size() == 1) return Ops[0];
2305 #ifndef NDEBUG
2306 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2307 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2308 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2309 "SCEVAddExpr operand types don't match!");
2310 #endif
2311
2312 // Sort by complexity, this groups all similar expression types together.
2313 GroupByComplexity(Ops, &LI, DT);
2314
2315 // If there are any constants, fold them together.
2316 unsigned Idx = 0;
2317 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2318 ++Idx;
2319 assert(Idx < Ops.size());
2320 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2321 // We found two constants, fold them together!
2322 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2323 if (Ops.size() == 2) return Ops[0];
2324 Ops.erase(Ops.begin()+1); // Erase the folded element
2325 LHSC = cast<SCEVConstant>(Ops[0]);
2326 }
2327
2328 // If we are left with a constant zero being added, strip it off.
2329 if (LHSC->getValue()->isZero()) {
2330 Ops.erase(Ops.begin());
2331 --Idx;
2332 }
2333
2334 if (Ops.size() == 1) return Ops[0];
2335 }
2336
2337 // Delay expensive flag strengthening until necessary.
2338 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2339 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2340 };
2341
2342 // Limit recursion calls depth.
2343 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2344 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2345
2346 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) {
2347 // Don't strengthen flags if we have no new information.
2348 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2349 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2350 Add->setNoWrapFlags(ComputeFlags(Ops));
2351 return S;
2352 }
2353
2354 // Okay, check to see if the same value occurs in the operand list more than
2355 // once. If so, merge them together into an multiply expression. Since we
2356 // sorted the list, these values are required to be adjacent.
2357 Type *Ty = Ops[0]->getType();
2358 bool FoundMatch = false;
2359 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2360 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2361 // Scan ahead to count how many equal operands there are.
2362 unsigned Count = 2;
2363 while (i+Count != e && Ops[i+Count] == Ops[i])
2364 ++Count;
2365 // Merge the values into a multiply.
2366 const SCEV *Scale = getConstant(Ty, Count);
2367 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2368 if (Ops.size() == Count)
2369 return Mul;
2370 Ops[i] = Mul;
2371 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2372 --i; e -= Count - 1;
2373 FoundMatch = true;
2374 }
2375 if (FoundMatch)
2376 return getAddExpr(Ops, OrigFlags, Depth + 1);
2377
2378 // Check for truncates. If all the operands are truncated from the same
2379 // type, see if factoring out the truncate would permit the result to be
2380 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2381 // if the contents of the resulting outer trunc fold to something simple.
2382 auto FindTruncSrcType = [&]() -> Type * {
2383 // We're ultimately looking to fold an addrec of truncs and muls of only
2384 // constants and truncs, so if we find any other types of SCEV
2385 // as operands of the addrec then we bail and return nullptr here.
2386 // Otherwise, we return the type of the operand of a trunc that we find.
2387 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2388 return T->getOperand()->getType();
2389 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2390 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2391 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2392 return T->getOperand()->getType();
2393 }
2394 return nullptr;
2395 };
2396 if (auto *SrcType = FindTruncSrcType()) {
2397 SmallVector<const SCEV *, 8> LargeOps;
2398 bool Ok = true;
2399 // Check all the operands to see if they can be represented in the
2400 // source type of the truncate.
2401 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2402 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2403 if (T->getOperand()->getType() != SrcType) {
2404 Ok = false;
2405 break;
2406 }
2407 LargeOps.push_back(T->getOperand());
2408 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2409 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2410 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2411 SmallVector<const SCEV *, 8> LargeMulOps;
2412 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2413 if (const SCEVTruncateExpr *T =
2414 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2415 if (T->getOperand()->getType() != SrcType) {
2416 Ok = false;
2417 break;
2418 }
2419 LargeMulOps.push_back(T->getOperand());
2420 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2421 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2422 } else {
2423 Ok = false;
2424 break;
2425 }
2426 }
2427 if (Ok)
2428 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2429 } else {
2430 Ok = false;
2431 break;
2432 }
2433 }
2434 if (Ok) {
2435 // Evaluate the expression in the larger type.
2436 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2437 // If it folds to something simple, use it. Otherwise, don't.
2438 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2439 return getTruncateExpr(Fold, Ty);
2440 }
2441 }
2442
2443 // Skip past any other cast SCEVs.
2444 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2445 ++Idx;
2446
2447 // If there are add operands they would be next.
2448 if (Idx < Ops.size()) {
2449 bool DeletedAdd = false;
2450 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2451 if (Ops.size() > AddOpsInlineThreshold ||
2452 Add->getNumOperands() > AddOpsInlineThreshold)
2453 break;
2454 // If we have an add, expand the add operands onto the end of the operands
2455 // list.
2456 Ops.erase(Ops.begin()+Idx);
2457 Ops.append(Add->op_begin(), Add->op_end());
2458 DeletedAdd = true;
2459 }
2460
2461 // If we deleted at least one add, we added operands to the end of the list,
2462 // and they are not necessarily sorted. Recurse to resort and resimplify
2463 // any operands we just acquired.
2464 if (DeletedAdd)
2465 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2466 }
2467
2468 // Skip over the add expression until we get to a multiply.
2469 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2470 ++Idx;
2471
2472 // Check to see if there are any folding opportunities present with
2473 // operands multiplied by constant values.
2474 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2475 uint64_t BitWidth = getTypeSizeInBits(Ty);
2476 DenseMap<const SCEV *, APInt> M;
2477 SmallVector<const SCEV *, 8> NewOps;
2478 APInt AccumulatedConstant(BitWidth, 0);
2479 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2480 Ops.data(), Ops.size(),
2481 APInt(BitWidth, 1), *this)) {
2482 struct APIntCompare {
2483 bool operator()(const APInt &LHS, const APInt &RHS) const {
2484 return LHS.ult(RHS);
2485 }
2486 };
2487
2488 // Some interesting folding opportunity is present, so its worthwhile to
2489 // re-generate the operands list. Group the operands by constant scale,
2490 // to avoid multiplying by the same constant scale multiple times.
2491 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2492 for (const SCEV *NewOp : NewOps)
2493 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2494 // Re-generate the operands list.
2495 Ops.clear();
2496 if (AccumulatedConstant != 0)
2497 Ops.push_back(getConstant(AccumulatedConstant));
2498 for (auto &MulOp : MulOpLists)
2499 if (MulOp.first != 0)
2500 Ops.push_back(getMulExpr(
2501 getConstant(MulOp.first),
2502 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2503 SCEV::FlagAnyWrap, Depth + 1));
2504 if (Ops.empty())
2505 return getZero(Ty);
2506 if (Ops.size() == 1)
2507 return Ops[0];
2508 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2509 }
2510 }
2511
2512 // If we are adding something to a multiply expression, make sure the
2513 // something is not already an operand of the multiply. If so, merge it into
2514 // the multiply.
2515 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2516 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2517 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2518 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2519 if (isa<SCEVConstant>(MulOpSCEV))
2520 continue;
2521 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2522 if (MulOpSCEV == Ops[AddOp]) {
2523 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2524 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2525 if (Mul->getNumOperands() != 2) {
2526 // If the multiply has more than two operands, we must get the
2527 // Y*Z term.
2528 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2529 Mul->op_begin()+MulOp);
2530 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2531 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2532 }
2533 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2534 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2535 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2536 SCEV::FlagAnyWrap, Depth + 1);
2537 if (Ops.size() == 2) return OuterMul;
2538 if (AddOp < Idx) {
2539 Ops.erase(Ops.begin()+AddOp);
2540 Ops.erase(Ops.begin()+Idx-1);
2541 } else {
2542 Ops.erase(Ops.begin()+Idx);
2543 Ops.erase(Ops.begin()+AddOp-1);
2544 }
2545 Ops.push_back(OuterMul);
2546 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2547 }
2548
2549 // Check this multiply against other multiplies being added together.
2550 for (unsigned OtherMulIdx = Idx+1;
2551 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2552 ++OtherMulIdx) {
2553 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2554 // If MulOp occurs in OtherMul, we can fold the two multiplies
2555 // together.
2556 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2557 OMulOp != e; ++OMulOp)
2558 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2559 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2560 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2561 if (Mul->getNumOperands() != 2) {
2562 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2563 Mul->op_begin()+MulOp);
2564 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2565 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2566 }
2567 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2568 if (OtherMul->getNumOperands() != 2) {
2569 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2570 OtherMul->op_begin()+OMulOp);
2571 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2572 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2573 }
2574 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2575 const SCEV *InnerMulSum =
2576 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2577 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2578 SCEV::FlagAnyWrap, Depth + 1);
2579 if (Ops.size() == 2) return OuterMul;
2580 Ops.erase(Ops.begin()+Idx);
2581 Ops.erase(Ops.begin()+OtherMulIdx-1);
2582 Ops.push_back(OuterMul);
2583 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2584 }
2585 }
2586 }
2587 }
2588
2589 // If there are any add recurrences in the operands list, see if any other
2590 // added values are loop invariant. If so, we can fold them into the
2591 // recurrence.
2592 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2593 ++Idx;
2594
2595 // Scan over all recurrences, trying to fold loop invariants into them.
2596 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2597 // Scan all of the other operands to this add and add them to the vector if
2598 // they are loop invariant w.r.t. the recurrence.
2599 SmallVector<const SCEV *, 8> LIOps;
2600 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2601 const Loop *AddRecLoop = AddRec->getLoop();
2602 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2603 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2604 LIOps.push_back(Ops[i]);
2605 Ops.erase(Ops.begin()+i);
2606 --i; --e;
2607 }
2608
2609 // If we found some loop invariants, fold them into the recurrence.
2610 if (!LIOps.empty()) {
2611 // Compute nowrap flags for the addition of the loop-invariant ops and
2612 // the addrec. Temporarily push it as an operand for that purpose.
2613 LIOps.push_back(AddRec);
2614 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2615 LIOps.pop_back();
2616
2617 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2618 LIOps.push_back(AddRec->getStart());
2619
2620 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2621 AddRec->op_end());
2622 // This follows from the fact that the no-wrap flags on the outer add
2623 // expression are applicable on the 0th iteration, when the add recurrence
2624 // will be equal to its start value.
2625 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);
2626
2627 // Build the new addrec. Propagate the NUW and NSW flags if both the
2628 // outer add and the inner addrec are guaranteed to have no overflow.
2629 // Always propagate NW.
2630 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2631 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2632
2633 // If all of the other operands were loop invariant, we are done.
2634 if (Ops.size() == 1) return NewRec;
2635
2636 // Otherwise, add the folded AddRec by the non-invariant parts.
2637 for (unsigned i = 0;; ++i)
2638 if (Ops[i] == AddRec) {
2639 Ops[i] = NewRec;
2640 break;
2641 }
2642 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2643 }
2644
2645 // Okay, if there weren't any loop invariants to be folded, check to see if
2646 // there are multiple AddRec's with the same loop induction variable being
2647 // added together. If so, we can fold them.
2648 for (unsigned OtherIdx = Idx+1;
2649 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2650 ++OtherIdx) {
2651 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2652 // so that the 1st found AddRecExpr is dominated by all others.
2653 assert(DT.dominates(
2654 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2655 AddRec->getLoop()->getHeader()) &&
2656 "AddRecExprs are not sorted in reverse dominance order?");
2657 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2658 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2659 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2660 AddRec->op_end());
2661 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2662 ++OtherIdx) {
2663 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2664 if (OtherAddRec->getLoop() == AddRecLoop) {
2665 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2666 i != e; ++i) {
2667 if (i >= AddRecOps.size()) {
2668 AddRecOps.append(OtherAddRec->op_begin()+i,
2669 OtherAddRec->op_end());
2670 break;
2671 }
2672 SmallVector<const SCEV *, 2> TwoOps = {
2673 AddRecOps[i], OtherAddRec->getOperand(i)};
2674 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2675 }
2676 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2677 }
2678 }
2679 // Step size has changed, so we cannot guarantee no self-wraparound.
2680 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2681 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2682 }
2683 }
2684
2685 // Otherwise couldn't fold anything into this recurrence. Move onto the
2686 // next one.
2687 }
2688
2689 // Okay, it looks like we really DO need an add expr. Check to see if we
2690 // already have one, otherwise create a new one.
2691 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2692 }
2693
2694 const SCEV *
getOrCreateAddExpr(ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2695 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2696 SCEV::NoWrapFlags Flags) {
2697 FoldingSetNodeID ID;
2698 ID.AddInteger(scAddExpr);
2699 for (const SCEV *Op : Ops)
2700 ID.AddPointer(Op);
2701 void *IP = nullptr;
2702 SCEVAddExpr *S =
2703 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2704 if (!S) {
2705 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2706 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2707 S = new (SCEVAllocator)
2708 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2709 UniqueSCEVs.InsertNode(S, IP);
2710 addToLoopUseLists(S);
2711 }
2712 S->setNoWrapFlags(Flags);
2713 return S;
2714 }
2715
2716 const SCEV *
getOrCreateAddRecExpr(ArrayRef<const SCEV * > Ops,const Loop * L,SCEV::NoWrapFlags Flags)2717 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2718 const Loop *L, SCEV::NoWrapFlags Flags) {
2719 FoldingSetNodeID ID;
2720 ID.AddInteger(scAddRecExpr);
2721 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2722 ID.AddPointer(Ops[i]);
2723 ID.AddPointer(L);
2724 void *IP = nullptr;
2725 SCEVAddRecExpr *S =
2726 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2727 if (!S) {
2728 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2729 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2730 S = new (SCEVAllocator)
2731 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2732 UniqueSCEVs.InsertNode(S, IP);
2733 addToLoopUseLists(S);
2734 }
2735 setNoWrapFlags(S, Flags);
2736 return S;
2737 }
2738
2739 const SCEV *
getOrCreateMulExpr(ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2740 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2741 SCEV::NoWrapFlags Flags) {
2742 FoldingSetNodeID ID;
2743 ID.AddInteger(scMulExpr);
2744 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2745 ID.AddPointer(Ops[i]);
2746 void *IP = nullptr;
2747 SCEVMulExpr *S =
2748 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2749 if (!S) {
2750 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2751 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2752 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2753 O, Ops.size());
2754 UniqueSCEVs.InsertNode(S, IP);
2755 addToLoopUseLists(S);
2756 }
2757 S->setNoWrapFlags(Flags);
2758 return S;
2759 }
2760
umul_ov(uint64_t i,uint64_t j,bool & Overflow)2761 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2762 uint64_t k = i*j;
2763 if (j > 1 && k / j != i) Overflow = true;
2764 return k;
2765 }
2766
2767 /// Compute the result of "n choose k", the binomial coefficient. If an
2768 /// intermediate computation overflows, Overflow will be set and the return will
2769 /// be garbage. Overflow is not cleared on absence of overflow.
Choose(uint64_t n,uint64_t k,bool & Overflow)2770 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2771 // We use the multiplicative formula:
2772 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2773 // At each iteration, we take the n-th term of the numeral and divide by the
2774 // (k-n)th term of the denominator. This division will always produce an
2775 // integral result, and helps reduce the chance of overflow in the
2776 // intermediate computations. However, we can still overflow even when the
2777 // final result would fit.
2778
2779 if (n == 0 || n == k) return 1;
2780 if (k > n) return 0;
2781
2782 if (k > n/2)
2783 k = n-k;
2784
2785 uint64_t r = 1;
2786 for (uint64_t i = 1; i <= k; ++i) {
2787 r = umul_ov(r, n-(i-1), Overflow);
2788 r /= i;
2789 }
2790 return r;
2791 }
2792
2793 /// Determine if any of the operands in this SCEV are a constant or if
2794 /// any of the add or multiply expressions in this SCEV contain a constant.
containsConstantInAddMulChain(const SCEV * StartExpr)2795 static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
2796 struct FindConstantInAddMulChain {
2797 bool FoundConstant = false;
2798
2799 bool follow(const SCEV *S) {
2800 FoundConstant |= isa<SCEVConstant>(S);
2801 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2802 }
2803
2804 bool isDone() const {
2805 return FoundConstant;
2806 }
2807 };
2808
2809 FindConstantInAddMulChain F;
2810 SCEVTraversal<FindConstantInAddMulChain> ST(F);
2811 ST.visitAll(StartExpr);
2812 return F.FoundConstant;
2813 }
2814
2815 /// Get a canonical multiply expression, or something simpler if possible.
getMulExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OrigFlags,unsigned Depth)2816 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2817 SCEV::NoWrapFlags OrigFlags,
2818 unsigned Depth) {
2819 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2820 "only nuw or nsw allowed");
2821 assert(!Ops.empty() && "Cannot get empty mul!");
2822 if (Ops.size() == 1) return Ops[0];
2823 #ifndef NDEBUG
2824 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2825 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2826 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2827 "SCEVMulExpr operand types don't match!");
2828 #endif
2829
2830 // Sort by complexity, this groups all similar expression types together.
2831 GroupByComplexity(Ops, &LI, DT);
2832
2833 // If there are any constants, fold them together.
2834 unsigned Idx = 0;
2835 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2836 ++Idx;
2837 assert(Idx < Ops.size());
2838 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2839 // We found two constants, fold them together!
2840 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
2841 if (Ops.size() == 2) return Ops[0];
2842 Ops.erase(Ops.begin()+1); // Erase the folded element
2843 LHSC = cast<SCEVConstant>(Ops[0]);
2844 }
2845
2846 // If we have a multiply of zero, it will always be zero.
2847 if (LHSC->getValue()->isZero())
2848 return LHSC;
2849
2850 // If we are left with a constant one being multiplied, strip it off.
2851 if (LHSC->getValue()->isOne()) {
2852 Ops.erase(Ops.begin());
2853 --Idx;
2854 }
2855
2856 if (Ops.size() == 1)
2857 return Ops[0];
2858 }
2859
2860 // Delay expensive flag strengthening until necessary.
2861 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2862 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
2863 };
2864
2865 // Limit recursion calls depth.
2866 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2867 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
2868
2869 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) {
2870 // Don't strengthen flags if we have no new information.
2871 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
2872 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
2873 Mul->setNoWrapFlags(ComputeFlags(Ops));
2874 return S;
2875 }
2876
2877 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2878 if (Ops.size() == 2) {
2879 // C1*(C2+V) -> C1*C2 + C1*V
2880 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
2881 // If any of Add's ops are Adds or Muls with a constant, apply this
2882 // transformation as well.
2883 //
2884 // TODO: There are some cases where this transformation is not
2885 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
2886 // this transformation should be narrowed down.
2887 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
2888 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
2889 SCEV::FlagAnyWrap, Depth + 1),
2890 getMulExpr(LHSC, Add->getOperand(1),
2891 SCEV::FlagAnyWrap, Depth + 1),
2892 SCEV::FlagAnyWrap, Depth + 1);
2893
2894 if (Ops[0]->isAllOnesValue()) {
2895 // If we have a mul by -1 of an add, try distributing the -1 among the
2896 // add operands.
2897 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
2898 SmallVector<const SCEV *, 4> NewOps;
2899 bool AnyFolded = false;
2900 for (const SCEV *AddOp : Add->operands()) {
2901 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
2902 Depth + 1);
2903 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
2904 NewOps.push_back(Mul);
2905 }
2906 if (AnyFolded)
2907 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
2908 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2909 // Negation preserves a recurrence's no self-wrap property.
2910 SmallVector<const SCEV *, 4> Operands;
2911 for (const SCEV *AddRecOp : AddRec->operands())
2912 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
2913 Depth + 1));
2914
2915 return getAddRecExpr(Operands, AddRec->getLoop(),
2916 AddRec->getNoWrapFlags(SCEV::FlagNW));
2917 }
2918 }
2919 }
2920 }
2921
2922 // Skip over the add expression until we get to a multiply.
2923 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2924 ++Idx;
2925
2926 // If there are mul operands inline them all into this expression.
2927 if (Idx < Ops.size()) {
2928 bool DeletedMul = false;
2929 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2930 if (Ops.size() > MulOpsInlineThreshold)
2931 break;
2932 // If we have an mul, expand the mul operands onto the end of the
2933 // operands list.
2934 Ops.erase(Ops.begin()+Idx);
2935 Ops.append(Mul->op_begin(), Mul->op_end());
2936 DeletedMul = true;
2937 }
2938
2939 // If we deleted at least one mul, we added operands to the end of the
2940 // list, and they are not necessarily sorted. Recurse to resort and
2941 // resimplify any operands we just acquired.
2942 if (DeletedMul)
2943 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2944 }
2945
2946 // If there are any add recurrences in the operands list, see if any other
2947 // added values are loop invariant. If so, we can fold them into the
2948 // recurrence.
2949 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2950 ++Idx;
2951
2952 // Scan over all recurrences, trying to fold loop invariants into them.
2953 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2954 // Scan all of the other operands to this mul and add them to the vector
2955 // if they are loop invariant w.r.t. the recurrence.
2956 SmallVector<const SCEV *, 8> LIOps;
2957 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2958 const Loop *AddRecLoop = AddRec->getLoop();
2959 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2960 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2961 LIOps.push_back(Ops[i]);
2962 Ops.erase(Ops.begin()+i);
2963 --i; --e;
2964 }
2965
2966 // If we found some loop invariants, fold them into the recurrence.
2967 if (!LIOps.empty()) {
2968 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
2969 SmallVector<const SCEV *, 4> NewOps;
2970 NewOps.reserve(AddRec->getNumOperands());
2971 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
2972 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2973 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
2974 SCEV::FlagAnyWrap, Depth + 1));
2975
2976 // Build the new addrec. Propagate the NUW and NSW flags if both the
2977 // outer mul and the inner addrec are guaranteed to have no overflow.
2978 //
2979 // No self-wrap cannot be guaranteed after changing the step size, but
2980 // will be inferred if either NUW or NSW is true.
2981 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec});
2982 const SCEV *NewRec = getAddRecExpr(
2983 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags));
2984
2985 // If all of the other operands were loop invariant, we are done.
2986 if (Ops.size() == 1) return NewRec;
2987
2988 // Otherwise, multiply the folded AddRec by the non-invariant parts.
2989 for (unsigned i = 0;; ++i)
2990 if (Ops[i] == AddRec) {
2991 Ops[i] = NewRec;
2992 break;
2993 }
2994 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2995 }
2996
2997 // Okay, if there weren't any loop invariants to be folded, check to see
2998 // if there are multiple AddRec's with the same loop induction variable
2999 // being multiplied together. If so, we can fold them.
3000
3001 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3002 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3003 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3004 // ]]],+,...up to x=2n}.
3005 // Note that the arguments to choose() are always integers with values
3006 // known at compile time, never SCEV objects.
3007 //
3008 // The implementation avoids pointless extra computations when the two
3009 // addrec's are of different length (mathematically, it's equivalent to
3010 // an infinite stream of zeros on the right).
3011 bool OpsModified = false;
3012 for (unsigned OtherIdx = Idx+1;
3013 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3014 ++OtherIdx) {
3015 const SCEVAddRecExpr *OtherAddRec =
3016 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3017 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3018 continue;
3019
3020 // Limit max number of arguments to avoid creation of unreasonably big
3021 // SCEVAddRecs with very complex operands.
3022 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3023 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3024 continue;
3025
3026 bool Overflow = false;
3027 Type *Ty = AddRec->getType();
3028 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3029 SmallVector<const SCEV*, 7> AddRecOps;
3030 for (int x = 0, xe = AddRec->getNumOperands() +
3031 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3032 SmallVector <const SCEV *, 7> SumOps;
3033 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3034 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3035 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3036 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3037 z < ze && !Overflow; ++z) {
3038 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3039 uint64_t Coeff;
3040 if (LargerThan64Bits)
3041 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3042 else
3043 Coeff = Coeff1*Coeff2;
3044 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3045 const SCEV *Term1 = AddRec->getOperand(y-z);
3046 const SCEV *Term2 = OtherAddRec->getOperand(z);
3047 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3048 SCEV::FlagAnyWrap, Depth + 1));
3049 }
3050 }
3051 if (SumOps.empty())
3052 SumOps.push_back(getZero(Ty));
3053 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3054 }
3055 if (!Overflow) {
3056 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3057 SCEV::FlagAnyWrap);
3058 if (Ops.size() == 2) return NewAddRec;
3059 Ops[Idx] = NewAddRec;
3060 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3061 OpsModified = true;
3062 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3063 if (!AddRec)
3064 break;
3065 }
3066 }
3067 if (OpsModified)
3068 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3069
3070 // Otherwise couldn't fold anything into this recurrence. Move onto the
3071 // next one.
3072 }
3073
3074 // Okay, it looks like we really DO need an mul expr. Check to see if we
3075 // already have one, otherwise create a new one.
3076 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3077 }
3078
3079 /// Represents an unsigned remainder expression based on unsigned division.
getURemExpr(const SCEV * LHS,const SCEV * RHS)3080 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
3081 const SCEV *RHS) {
3082 assert(getEffectiveSCEVType(LHS->getType()) ==
3083 getEffectiveSCEVType(RHS->getType()) &&
3084 "SCEVURemExpr operand types don't match!");
3085
3086 // Short-circuit easy cases
3087 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3088 // If constant is one, the result is trivial
3089 if (RHSC->getValue()->isOne())
3090 return getZero(LHS->getType()); // X urem 1 --> 0
3091
3092 // If constant is a power of two, fold into a zext(trunc(LHS)).
3093 if (RHSC->getAPInt().isPowerOf2()) {
3094 Type *FullTy = LHS->getType();
3095 Type *TruncTy =
3096 IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3097 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3098 }
3099 }
3100
3101 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3102 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3103 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3104 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3105 }
3106
3107 /// Get a canonical unsigned division expression, or something simpler if
3108 /// possible.
getUDivExpr(const SCEV * LHS,const SCEV * RHS)3109 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
3110 const SCEV *RHS) {
3111 assert(getEffectiveSCEVType(LHS->getType()) ==
3112 getEffectiveSCEVType(RHS->getType()) &&
3113 "SCEVUDivExpr operand types don't match!");
3114
3115 FoldingSetNodeID ID;
3116 ID.AddInteger(scUDivExpr);
3117 ID.AddPointer(LHS);
3118 ID.AddPointer(RHS);
3119 void *IP = nullptr;
3120 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3121 return S;
3122
3123 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3124 if (RHSC->getValue()->isOne())
3125 return LHS; // X udiv 1 --> x
3126 // If the denominator is zero, the result of the udiv is undefined. Don't
3127 // try to analyze it, because the resolution chosen here may differ from
3128 // the resolution chosen in other parts of the compiler.
3129 if (!RHSC->getValue()->isZero()) {
3130 // Determine if the division can be folded into the operands of
3131 // its operands.
3132 // TODO: Generalize this to non-constants by using known-bits information.
3133 Type *Ty = LHS->getType();
3134 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3135 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3136 // For non-power-of-two values, effectively round the value up to the
3137 // nearest power of two.
3138 if (!RHSC->getAPInt().isPowerOf2())
3139 ++MaxShiftAmt;
3140 IntegerType *ExtTy =
3141 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3142 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3143 if (const SCEVConstant *Step =
3144 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3145 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3146 const APInt &StepInt = Step->getAPInt();
3147 const APInt &DivInt = RHSC->getAPInt();
3148 if (!StepInt.urem(DivInt) &&
3149 getZeroExtendExpr(AR, ExtTy) ==
3150 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3151 getZeroExtendExpr(Step, ExtTy),
3152 AR->getLoop(), SCEV::FlagAnyWrap)) {
3153 SmallVector<const SCEV *, 4> Operands;
3154 for (const SCEV *Op : AR->operands())
3155 Operands.push_back(getUDivExpr(Op, RHS));
3156 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3157 }
3158 /// Get a canonical UDivExpr for a recurrence.
3159 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3160 // We can currently only fold X%N if X is constant.
3161 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3162 if (StartC && !DivInt.urem(StepInt) &&
3163 getZeroExtendExpr(AR, ExtTy) ==
3164 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3165 getZeroExtendExpr(Step, ExtTy),
3166 AR->getLoop(), SCEV::FlagAnyWrap)) {
3167 const APInt &StartInt = StartC->getAPInt();
3168 const APInt &StartRem = StartInt.urem(StepInt);
3169 if (StartRem != 0) {
3170 const SCEV *NewLHS =
3171 getAddRecExpr(getConstant(StartInt - StartRem), Step,
3172 AR->getLoop(), SCEV::FlagNW);
3173 if (LHS != NewLHS) {
3174 LHS = NewLHS;
3175
3176 // Reset the ID to include the new LHS, and check if it is
3177 // already cached.
3178 ID.clear();
3179 ID.AddInteger(scUDivExpr);
3180 ID.AddPointer(LHS);
3181 ID.AddPointer(RHS);
3182 IP = nullptr;
3183 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3184 return S;
3185 }
3186 }
3187 }
3188 }
3189 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3190 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3191 SmallVector<const SCEV *, 4> Operands;
3192 for (const SCEV *Op : M->operands())
3193 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3194 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3195 // Find an operand that's safely divisible.
3196 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3197 const SCEV *Op = M->getOperand(i);
3198 const SCEV *Div = getUDivExpr(Op, RHSC);
3199 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3200 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
3201 M->op_end());
3202 Operands[i] = Div;
3203 return getMulExpr(Operands);
3204 }
3205 }
3206 }
3207
3208 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3209 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3210 if (auto *DivisorConstant =
3211 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3212 bool Overflow = false;
3213 APInt NewRHS =
3214 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3215 if (Overflow) {
3216 return getConstant(RHSC->getType(), 0, false);
3217 }
3218 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3219 }
3220 }
3221
3222 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3223 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3224 SmallVector<const SCEV *, 4> Operands;
3225 for (const SCEV *Op : A->operands())
3226 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3227 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3228 Operands.clear();
3229 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3230 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3231 if (isa<SCEVUDivExpr>(Op) ||
3232 getMulExpr(Op, RHS) != A->getOperand(i))
3233 break;
3234 Operands.push_back(Op);
3235 }
3236 if (Operands.size() == A->getNumOperands())
3237 return getAddExpr(Operands);
3238 }
3239 }
3240
3241 // Fold if both operands are constant.
3242 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3243 Constant *LHSCV = LHSC->getValue();
3244 Constant *RHSCV = RHSC->getValue();
3245 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3246 RHSCV)));
3247 }
3248 }
3249 }
3250
3251 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3252 // changes). Make sure we get a new one.
3253 IP = nullptr;
3254 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3255 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3256 LHS, RHS);
3257 UniqueSCEVs.InsertNode(S, IP);
3258 addToLoopUseLists(S);
3259 return S;
3260 }
3261
gcd(const SCEVConstant * C1,const SCEVConstant * C2)3262 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3263 APInt A = C1->getAPInt().abs();
3264 APInt B = C2->getAPInt().abs();
3265 uint32_t ABW = A.getBitWidth();
3266 uint32_t BBW = B.getBitWidth();
3267
3268 if (ABW > BBW)
3269 B = B.zext(ABW);
3270 else if (ABW < BBW)
3271 A = A.zext(BBW);
3272
3273 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3274 }
3275
3276 /// Get a canonical unsigned division expression, or something simpler if
3277 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3278 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3279 /// it's not exact because the udiv may be clearing bits.
getUDivExactExpr(const SCEV * LHS,const SCEV * RHS)3280 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
3281 const SCEV *RHS) {
3282 // TODO: we could try to find factors in all sorts of things, but for now we
3283 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3284 // end of this file for inspiration.
3285
3286 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3287 if (!Mul || !Mul->hasNoUnsignedWrap())
3288 return getUDivExpr(LHS, RHS);
3289
3290 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3291 // If the mulexpr multiplies by a constant, then that constant must be the
3292 // first element of the mulexpr.
3293 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3294 if (LHSCst == RHSCst) {
3295 SmallVector<const SCEV *, 2> Operands;
3296 Operands.append(Mul->op_begin() + 1, Mul->op_end());
3297 return getMulExpr(Operands);
3298 }
3299
3300 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3301 // that there's a factor provided by one of the other terms. We need to
3302 // check.
3303 APInt Factor = gcd(LHSCst, RHSCst);
3304 if (!Factor.isIntN(1)) {
3305 LHSCst =
3306 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3307 RHSCst =
3308 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3309 SmallVector<const SCEV *, 2> Operands;
3310 Operands.push_back(LHSCst);
3311 Operands.append(Mul->op_begin() + 1, Mul->op_end());
3312 LHS = getMulExpr(Operands);
3313 RHS = RHSCst;
3314 Mul = dyn_cast<SCEVMulExpr>(LHS);
3315 if (!Mul)
3316 return getUDivExactExpr(LHS, RHS);
3317 }
3318 }
3319 }
3320
3321 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3322 if (Mul->getOperand(i) == RHS) {
3323 SmallVector<const SCEV *, 2> Operands;
3324 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3325 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3326 return getMulExpr(Operands);
3327 }
3328 }
3329
3330 return getUDivExpr(LHS, RHS);
3331 }
3332
3333 /// Get an add recurrence expression for the specified loop. Simplify the
3334 /// expression as much as possible.
getAddRecExpr(const SCEV * Start,const SCEV * Step,const Loop * L,SCEV::NoWrapFlags Flags)3335 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3336 const Loop *L,
3337 SCEV::NoWrapFlags Flags) {
3338 SmallVector<const SCEV *, 4> Operands;
3339 Operands.push_back(Start);
3340 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3341 if (StepChrec->getLoop() == L) {
3342 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3343 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3344 }
3345
3346 Operands.push_back(Step);
3347 return getAddRecExpr(Operands, L, Flags);
3348 }
3349
3350 /// Get an add recurrence expression for the specified loop. Simplify the
3351 /// expression as much as possible.
3352 const SCEV *
getAddRecExpr(SmallVectorImpl<const SCEV * > & Operands,const Loop * L,SCEV::NoWrapFlags Flags)3353 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
3354 const Loop *L, SCEV::NoWrapFlags Flags) {
3355 if (Operands.size() == 1) return Operands[0];
3356 #ifndef NDEBUG
3357 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3358 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
3359 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
3360 "SCEVAddRecExpr operand types don't match!");
3361 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3362 assert(isLoopInvariant(Operands[i], L) &&
3363 "SCEVAddRecExpr operand is not loop-invariant!");
3364 #endif
3365
3366 if (Operands.back()->isZero()) {
3367 Operands.pop_back();
3368 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3369 }
3370
3371 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3372 // use that information to infer NUW and NSW flags. However, computing a
3373 // BE count requires calling getAddRecExpr, so we may not yet have a
3374 // meaningful BE count at this point (and if we don't, we'd be stuck
3375 // with a SCEVCouldNotCompute as the cached BE count).
3376
3377 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3378
3379 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3380 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3381 const Loop *NestedLoop = NestedAR->getLoop();
3382 if (L->contains(NestedLoop)
3383 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3384 : (!NestedLoop->contains(L) &&
3385 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3386 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
3387 NestedAR->op_end());
3388 Operands[0] = NestedAR->getStart();
3389 // AddRecs require their operands be loop-invariant with respect to their
3390 // loops. Don't perform this transformation if it would break this
3391 // requirement.
3392 bool AllInvariant = all_of(
3393 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3394
3395 if (AllInvariant) {
3396 // Create a recurrence for the outer loop with the same step size.
3397 //
3398 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3399 // inner recurrence has the same property.
3400 SCEV::NoWrapFlags OuterFlags =
3401 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3402
3403 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3404 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3405 return isLoopInvariant(Op, NestedLoop);
3406 });
3407
3408 if (AllInvariant) {
3409 // Ok, both add recurrences are valid after the transformation.
3410 //
3411 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3412 // the outer recurrence has the same property.
3413 SCEV::NoWrapFlags InnerFlags =
3414 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3415 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3416 }
3417 }
3418 // Reset Operands to its original state.
3419 Operands[0] = NestedAR;
3420 }
3421 }
3422
3423 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3424 // already have one, otherwise create a new one.
3425 return getOrCreateAddRecExpr(Operands, L, Flags);
3426 }
3427
3428 const SCEV *
getGEPExpr(GEPOperator * GEP,const SmallVectorImpl<const SCEV * > & IndexExprs)3429 ScalarEvolution::getGEPExpr(GEPOperator *GEP,
3430 const SmallVectorImpl<const SCEV *> &IndexExprs) {
3431 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3432 // getSCEV(Base)->getType() has the same address space as Base->getType()
3433 // because SCEV::getType() preserves the address space.
3434 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3435 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3436 // instruction to its SCEV, because the Instruction may be guarded by control
3437 // flow and the no-overflow bits may not be valid for the expression in any
3438 // context. This can be fixed similarly to how these flags are handled for
3439 // adds.
3440 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW
3441 : SCEV::FlagAnyWrap;
3442
3443 Type *CurTy = GEP->getType();
3444 bool FirstIter = true;
3445 SmallVector<const SCEV *, 4> AddOps{BaseExpr};
3446 for (const SCEV *IndexExpr : IndexExprs) {
3447 // Compute the (potentially symbolic) offset in bytes for this index.
3448 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3449 // For a struct, add the member offset.
3450 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3451 unsigned FieldNo = Index->getZExtValue();
3452 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3453 AddOps.push_back(FieldOffset);
3454
3455 // Update CurTy to the type of the field at Index.
3456 CurTy = STy->getTypeAtIndex(Index);
3457 } else {
3458 // Update CurTy to its element type.
3459 if (FirstIter) {
3460 assert(isa<PointerType>(CurTy) &&
3461 "The first index of a GEP indexes a pointer");
3462 CurTy = GEP->getSourceElementType();
3463 FirstIter = false;
3464 } else {
3465 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
3466 }
3467 // For an array, add the element offset, explicitly scaled.
3468 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3469 // Getelementptr indices are signed.
3470 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3471
3472 // Multiply the index by the element size to compute the element offset.
3473 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
3474 AddOps.push_back(LocalOffset);
3475 }
3476 }
3477
3478 // Add the base and all the offsets together.
3479 return getAddExpr(AddOps, Wrap);
3480 }
3481
3482 std::tuple<SCEV *, FoldingSetNodeID, void *>
findExistingSCEVInCache(SCEVTypes SCEVType,ArrayRef<const SCEV * > Ops)3483 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3484 ArrayRef<const SCEV *> Ops) {
3485 FoldingSetNodeID ID;
3486 void *IP = nullptr;
3487 ID.AddInteger(SCEVType);
3488 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3489 ID.AddPointer(Ops[i]);
3490 return std::tuple<SCEV *, FoldingSetNodeID, void *>(
3491 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP);
3492 }
3493
getAbsExpr(const SCEV * Op,bool IsNSW)3494 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3495 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3496 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3497 }
3498
getSignumExpr(const SCEV * Op)3499 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) {
3500 Type *Ty = Op->getType();
3501 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty));
3502 }
3503
getMinMaxExpr(SCEVTypes Kind,SmallVectorImpl<const SCEV * > & Ops)3504 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
3505 SmallVectorImpl<const SCEV *> &Ops) {
3506 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3507 if (Ops.size() == 1) return Ops[0];
3508 #ifndef NDEBUG
3509 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3510 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3511 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3512 "Operand types don't match!");
3513 #endif
3514
3515 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3516 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3517
3518 // Sort by complexity, this groups all similar expression types together.
3519 GroupByComplexity(Ops, &LI, DT);
3520
3521 // Check if we have created the same expression before.
3522 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) {
3523 return S;
3524 }
3525
3526 // If there are any constants, fold them together.
3527 unsigned Idx = 0;
3528 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3529 ++Idx;
3530 assert(Idx < Ops.size());
3531 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3532 if (Kind == scSMaxExpr)
3533 return APIntOps::smax(LHS, RHS);
3534 else if (Kind == scSMinExpr)
3535 return APIntOps::smin(LHS, RHS);
3536 else if (Kind == scUMaxExpr)
3537 return APIntOps::umax(LHS, RHS);
3538 else if (Kind == scUMinExpr)
3539 return APIntOps::umin(LHS, RHS);
3540 llvm_unreachable("Unknown SCEV min/max opcode");
3541 };
3542
3543 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3544 // We found two constants, fold them together!
3545 ConstantInt *Fold = ConstantInt::get(
3546 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3547 Ops[0] = getConstant(Fold);
3548 Ops.erase(Ops.begin()+1); // Erase the folded element
3549 if (Ops.size() == 1) return Ops[0];
3550 LHSC = cast<SCEVConstant>(Ops[0]);
3551 }
3552
3553 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3554 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3555
3556 if (IsMax ? IsMinV : IsMaxV) {
3557 // If we are left with a constant minimum(/maximum)-int, strip it off.
3558 Ops.erase(Ops.begin());
3559 --Idx;
3560 } else if (IsMax ? IsMaxV : IsMinV) {
3561 // If we have a max(/min) with a constant maximum(/minimum)-int,
3562 // it will always be the extremum.
3563 return LHSC;
3564 }
3565
3566 if (Ops.size() == 1) return Ops[0];
3567 }
3568
3569 // Find the first operation of the same kind
3570 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3571 ++Idx;
3572
3573 // Check to see if one of the operands is of the same kind. If so, expand its
3574 // operands onto our operand list, and recurse to simplify.
3575 if (Idx < Ops.size()) {
3576 bool DeletedAny = false;
3577 while (Ops[Idx]->getSCEVType() == Kind) {
3578 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3579 Ops.erase(Ops.begin()+Idx);
3580 Ops.append(SMME->op_begin(), SMME->op_end());
3581 DeletedAny = true;
3582 }
3583
3584 if (DeletedAny)
3585 return getMinMaxExpr(Kind, Ops);
3586 }
3587
3588 // Okay, check to see if the same value occurs in the operand list twice. If
3589 // so, delete one. Since we sorted the list, these values are required to
3590 // be adjacent.
3591 llvm::CmpInst::Predicate GEPred =
3592 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
3593 llvm::CmpInst::Predicate LEPred =
3594 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
3595 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3596 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3597 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3598 if (Ops[i] == Ops[i + 1] ||
3599 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3600 // X op Y op Y --> X op Y
3601 // X op Y --> X, if we know X, Y are ordered appropriately
3602 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3603 --i;
3604 --e;
3605 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3606 Ops[i + 1])) {
3607 // X op Y --> Y, if we know X, Y are ordered appropriately
3608 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3609 --i;
3610 --e;
3611 }
3612 }
3613
3614 if (Ops.size() == 1) return Ops[0];
3615
3616 assert(!Ops.empty() && "Reduced smax down to nothing!");
3617
3618 // Okay, it looks like we really DO need an expr. Check to see if we
3619 // already have one, otherwise create a new one.
3620 const SCEV *ExistingSCEV;
3621 FoldingSetNodeID ID;
3622 void *IP;
3623 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops);
3624 if (ExistingSCEV)
3625 return ExistingSCEV;
3626 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3627 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3628 SCEV *S = new (SCEVAllocator)
3629 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3630
3631 UniqueSCEVs.InsertNode(S, IP);
3632 addToLoopUseLists(S);
3633 return S;
3634 }
3635
getSMaxExpr(const SCEV * LHS,const SCEV * RHS)3636 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3637 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3638 return getSMaxExpr(Ops);
3639 }
3640
getSMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3641 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3642 return getMinMaxExpr(scSMaxExpr, Ops);
3643 }
3644
getUMaxExpr(const SCEV * LHS,const SCEV * RHS)3645 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3646 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3647 return getUMaxExpr(Ops);
3648 }
3649
getUMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3650 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3651 return getMinMaxExpr(scUMaxExpr, Ops);
3652 }
3653
getSMinExpr(const SCEV * LHS,const SCEV * RHS)3654 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3655 const SCEV *RHS) {
3656 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3657 return getSMinExpr(Ops);
3658 }
3659
getSMinExpr(SmallVectorImpl<const SCEV * > & Ops)3660 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3661 return getMinMaxExpr(scSMinExpr, Ops);
3662 }
3663
getUMinExpr(const SCEV * LHS,const SCEV * RHS)3664 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3665 const SCEV *RHS) {
3666 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3667 return getUMinExpr(Ops);
3668 }
3669
getUMinExpr(SmallVectorImpl<const SCEV * > & Ops)3670 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3671 return getMinMaxExpr(scUMinExpr, Ops);
3672 }
3673
getSizeOfExpr(Type * IntTy,Type * AllocTy)3674 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3675 if (isa<ScalableVectorType>(AllocTy)) {
3676 Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo());
3677 Constant *One = ConstantInt::get(IntTy, 1);
3678 Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One);
3679 // Note that the expression we created is the final expression, we don't
3680 // want to simplify it any further Also, if we call a normal getSCEV(),
3681 // we'll end up in an endless recursion. So just create an SCEVUnknown.
3682 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy));
3683 }
3684 // We can bypass creating a target-independent
3685 // constant expression and then folding it back into a ConstantInt.
3686 // This is just a compile-time optimization.
3687 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3688 }
3689
getOffsetOfExpr(Type * IntTy,StructType * STy,unsigned FieldNo)3690 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3691 StructType *STy,
3692 unsigned FieldNo) {
3693 // We can bypass creating a target-independent
3694 // constant expression and then folding it back into a ConstantInt.
3695 // This is just a compile-time optimization.
3696 return getConstant(
3697 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3698 }
3699
getUnknown(Value * V)3700 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3701 // Don't attempt to do anything other than create a SCEVUnknown object
3702 // here. createSCEV only calls getUnknown after checking for all other
3703 // interesting possibilities, and any other code that calls getUnknown
3704 // is doing so in order to hide a value from SCEV canonicalization.
3705
3706 FoldingSetNodeID ID;
3707 ID.AddInteger(scUnknown);
3708 ID.AddPointer(V);
3709 void *IP = nullptr;
3710 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3711 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3712 "Stale SCEVUnknown in uniquing map!");
3713 return S;
3714 }
3715 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3716 FirstUnknown);
3717 FirstUnknown = cast<SCEVUnknown>(S);
3718 UniqueSCEVs.InsertNode(S, IP);
3719 return S;
3720 }
3721
3722 //===----------------------------------------------------------------------===//
3723 // Basic SCEV Analysis and PHI Idiom Recognition Code
3724 //
3725
3726 /// Test if values of the given type are analyzable within the SCEV
3727 /// framework. This primarily includes integer types, and it can optionally
3728 /// include pointer types if the ScalarEvolution class has access to
3729 /// target-specific information.
isSCEVable(Type * Ty) const3730 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3731 // Integers and pointers are always SCEVable.
3732 return Ty->isIntOrPtrTy();
3733 }
3734
3735 /// Return the size in bits of the specified type, for which isSCEVable must
3736 /// return true.
getTypeSizeInBits(Type * Ty) const3737 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3738 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3739 if (Ty->isPointerTy())
3740 return getDataLayout().getIndexTypeSizeInBits(Ty);
3741 return getDataLayout().getTypeSizeInBits(Ty);
3742 }
3743
3744 /// Return a type with the same bitwidth as the given type and which represents
3745 /// how SCEV will treat the given type, for which isSCEVable must return
3746 /// true. For pointer types, this is the pointer index sized integer type.
getEffectiveSCEVType(Type * Ty) const3747 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3748 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3749
3750 if (Ty->isIntegerTy())
3751 return Ty;
3752
3753 // The only other support type is pointer.
3754 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3755 return getDataLayout().getIndexType(Ty);
3756 }
3757
getWiderType(Type * T1,Type * T2) const3758 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
3759 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
3760 }
3761
getCouldNotCompute()3762 const SCEV *ScalarEvolution::getCouldNotCompute() {
3763 return CouldNotCompute.get();
3764 }
3765
checkValidity(const SCEV * S) const3766 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3767 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
3768 auto *SU = dyn_cast<SCEVUnknown>(S);
3769 return SU && SU->getValue() == nullptr;
3770 });
3771
3772 return !ContainsNulls;
3773 }
3774
containsAddRecurrence(const SCEV * S)3775 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
3776 HasRecMapType::iterator I = HasRecMap.find(S);
3777 if (I != HasRecMap.end())
3778 return I->second;
3779
3780 bool FoundAddRec =
3781 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
3782 HasRecMap.insert({S, FoundAddRec});
3783 return FoundAddRec;
3784 }
3785
3786 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3787 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3788 /// offset I, then return {S', I}, else return {\p S, nullptr}.
splitAddExpr(const SCEV * S)3789 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
3790 const auto *Add = dyn_cast<SCEVAddExpr>(S);
3791 if (!Add)
3792 return {S, nullptr};
3793
3794 if (Add->getNumOperands() != 2)
3795 return {S, nullptr};
3796
3797 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
3798 if (!ConstOp)
3799 return {S, nullptr};
3800
3801 return {Add->getOperand(1), ConstOp->getValue()};
3802 }
3803
3804 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3805 /// by the value and offset from any ValueOffsetPair in the set.
3806 SetVector<ScalarEvolution::ValueOffsetPair> *
getSCEVValues(const SCEV * S)3807 ScalarEvolution::getSCEVValues(const SCEV *S) {
3808 ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
3809 if (SI == ExprValueMap.end())
3810 return nullptr;
3811 #ifndef NDEBUG
3812 if (VerifySCEVMap) {
3813 // Check there is no dangling Value in the set returned.
3814 for (const auto &VE : SI->second)
3815 assert(ValueExprMap.count(VE.first));
3816 }
3817 #endif
3818 return &SI->second;
3819 }
3820
3821 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
3822 /// cannot be used separately. eraseValueFromMap should be used to remove
3823 /// V from ValueExprMap and ExprValueMap at the same time.
eraseValueFromMap(Value * V)3824 void ScalarEvolution::eraseValueFromMap(Value *V) {
3825 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3826 if (I != ValueExprMap.end()) {
3827 const SCEV *S = I->second;
3828 // Remove {V, 0} from the set of ExprValueMap[S]
3829 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S))
3830 SV->remove({V, nullptr});
3831
3832 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
3833 const SCEV *Stripped;
3834 ConstantInt *Offset;
3835 std::tie(Stripped, Offset) = splitAddExpr(S);
3836 if (Offset != nullptr) {
3837 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped))
3838 SV->remove({V, Offset});
3839 }
3840 ValueExprMap.erase(V);
3841 }
3842 }
3843
3844 /// Check whether value has nuw/nsw/exact set but SCEV does not.
3845 /// TODO: In reality it is better to check the poison recursively
3846 /// but this is better than nothing.
SCEVLostPoisonFlags(const SCEV * S,const Value * V)3847 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
3848 if (auto *I = dyn_cast<Instruction>(V)) {
3849 if (isa<OverflowingBinaryOperator>(I)) {
3850 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
3851 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
3852 return true;
3853 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
3854 return true;
3855 }
3856 } else if (isa<PossiblyExactOperator>(I) && I->isExact())
3857 return true;
3858 }
3859 return false;
3860 }
3861
3862 /// Return an existing SCEV if it exists, otherwise analyze the expression and
3863 /// create a new one.
getSCEV(Value * V)3864 const SCEV *ScalarEvolution::getSCEV(Value *V) {
3865 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3866
3867 const SCEV *S = getExistingSCEV(V);
3868 if (S == nullptr) {
3869 S = createSCEV(V);
3870 // During PHI resolution, it is possible to create two SCEVs for the same
3871 // V, so it is needed to double check whether V->S is inserted into
3872 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
3873 std::pair<ValueExprMapType::iterator, bool> Pair =
3874 ValueExprMap.insert({SCEVCallbackVH(V, this), S});
3875 if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
3876 ExprValueMap[S].insert({V, nullptr});
3877
3878 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
3879 // ExprValueMap.
3880 const SCEV *Stripped = S;
3881 ConstantInt *Offset = nullptr;
3882 std::tie(Stripped, Offset) = splitAddExpr(S);
3883 // If stripped is SCEVUnknown, don't bother to save
3884 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
3885 // increase the complexity of the expansion code.
3886 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
3887 // because it may generate add/sub instead of GEP in SCEV expansion.
3888 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
3889 !isa<GetElementPtrInst>(V))
3890 ExprValueMap[Stripped].insert({V, Offset});
3891 }
3892 }
3893 return S;
3894 }
3895
getExistingSCEV(Value * V)3896 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
3897 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3898
3899 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3900 if (I != ValueExprMap.end()) {
3901 const SCEV *S = I->second;
3902 if (checkValidity(S))
3903 return S;
3904 eraseValueFromMap(V);
3905 forgetMemoizedResults(S);
3906 }
3907 return nullptr;
3908 }
3909
3910 /// Return a SCEV corresponding to -V = -1*V
getNegativeSCEV(const SCEV * V,SCEV::NoWrapFlags Flags)3911 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
3912 SCEV::NoWrapFlags Flags) {
3913 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3914 return getConstant(
3915 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
3916
3917 Type *Ty = V->getType();
3918 Ty = getEffectiveSCEVType(Ty);
3919 return getMulExpr(V, getMinusOne(Ty), Flags);
3920 }
3921
3922 /// If Expr computes ~A, return A else return nullptr
MatchNotExpr(const SCEV * Expr)3923 static const SCEV *MatchNotExpr(const SCEV *Expr) {
3924 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
3925 if (!Add || Add->getNumOperands() != 2 ||
3926 !Add->getOperand(0)->isAllOnesValue())
3927 return nullptr;
3928
3929 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
3930 if (!AddRHS || AddRHS->getNumOperands() != 2 ||
3931 !AddRHS->getOperand(0)->isAllOnesValue())
3932 return nullptr;
3933
3934 return AddRHS->getOperand(1);
3935 }
3936
3937 /// Return a SCEV corresponding to ~V = -1-V
getNotSCEV(const SCEV * V)3938 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
3939 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3940 return getConstant(
3941 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
3942
3943 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
3944 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
3945 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
3946 SmallVector<const SCEV *, 2> MatchedOperands;
3947 for (const SCEV *Operand : MME->operands()) {
3948 const SCEV *Matched = MatchNotExpr(Operand);
3949 if (!Matched)
3950 return (const SCEV *)nullptr;
3951 MatchedOperands.push_back(Matched);
3952 }
3953 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
3954 MatchedOperands);
3955 };
3956 if (const SCEV *Replaced = MatchMinMaxNegation(MME))
3957 return Replaced;
3958 }
3959
3960 Type *Ty = V->getType();
3961 Ty = getEffectiveSCEVType(Ty);
3962 return getMinusSCEV(getMinusOne(Ty), V);
3963 }
3964
getMinusSCEV(const SCEV * LHS,const SCEV * RHS,SCEV::NoWrapFlags Flags,unsigned Depth)3965 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
3966 SCEV::NoWrapFlags Flags,
3967 unsigned Depth) {
3968 // Fast path: X - X --> 0.
3969 if (LHS == RHS)
3970 return getZero(LHS->getType());
3971
3972 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
3973 // makes it so that we cannot make much use of NUW.
3974 auto AddFlags = SCEV::FlagAnyWrap;
3975 const bool RHSIsNotMinSigned =
3976 !getSignedRangeMin(RHS).isMinSignedValue();
3977 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) {
3978 // Let M be the minimum representable signed value. Then (-1)*RHS
3979 // signed-wraps if and only if RHS is M. That can happen even for
3980 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
3981 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
3982 // (-1)*RHS, we need to prove that RHS != M.
3983 //
3984 // If LHS is non-negative and we know that LHS - RHS does not
3985 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
3986 // either by proving that RHS > M or that LHS >= 0.
3987 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
3988 AddFlags = SCEV::FlagNSW;
3989 }
3990 }
3991
3992 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
3993 // RHS is NSW and LHS >= 0.
3994 //
3995 // The difficulty here is that the NSW flag may have been proven
3996 // relative to a loop that is to be found in a recurrence in LHS and
3997 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
3998 // larger scope than intended.
3999 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4000
4001 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4002 }
4003
getTruncateOrZeroExtend(const SCEV * V,Type * Ty,unsigned Depth)4004 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
4005 unsigned Depth) {
4006 Type *SrcTy = V->getType();
4007 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4008 "Cannot truncate or zero extend with non-integer arguments!");
4009 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4010 return V; // No conversion
4011 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4012 return getTruncateExpr(V, Ty, Depth);
4013 return getZeroExtendExpr(V, Ty, Depth);
4014 }
4015
getTruncateOrSignExtend(const SCEV * V,Type * Ty,unsigned Depth)4016 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
4017 unsigned Depth) {
4018 Type *SrcTy = V->getType();
4019 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4020 "Cannot truncate or zero extend with non-integer arguments!");
4021 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4022 return V; // No conversion
4023 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4024 return getTruncateExpr(V, Ty, Depth);
4025 return getSignExtendExpr(V, Ty, Depth);
4026 }
4027
4028 const SCEV *
getNoopOrZeroExtend(const SCEV * V,Type * Ty)4029 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
4030 Type *SrcTy = V->getType();
4031 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4032 "Cannot noop or zero extend with non-integer arguments!");
4033 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4034 "getNoopOrZeroExtend cannot truncate!");
4035 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4036 return V; // No conversion
4037 return getZeroExtendExpr(V, Ty);
4038 }
4039
4040 const SCEV *
getNoopOrSignExtend(const SCEV * V,Type * Ty)4041 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
4042 Type *SrcTy = V->getType();
4043 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4044 "Cannot noop or sign extend with non-integer arguments!");
4045 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4046 "getNoopOrSignExtend cannot truncate!");
4047 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4048 return V; // No conversion
4049 return getSignExtendExpr(V, Ty);
4050 }
4051
4052 const SCEV *
getNoopOrAnyExtend(const SCEV * V,Type * Ty)4053 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
4054 Type *SrcTy = V->getType();
4055 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4056 "Cannot noop or any extend with non-integer arguments!");
4057 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4058 "getNoopOrAnyExtend cannot truncate!");
4059 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4060 return V; // No conversion
4061 return getAnyExtendExpr(V, Ty);
4062 }
4063
4064 const SCEV *
getTruncateOrNoop(const SCEV * V,Type * Ty)4065 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
4066 Type *SrcTy = V->getType();
4067 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4068 "Cannot truncate or noop with non-integer arguments!");
4069 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4070 "getTruncateOrNoop cannot extend!");
4071 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4072 return V; // No conversion
4073 return getTruncateExpr(V, Ty);
4074 }
4075
getUMaxFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)4076 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
4077 const SCEV *RHS) {
4078 const SCEV *PromotedLHS = LHS;
4079 const SCEV *PromotedRHS = RHS;
4080
4081 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4082 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4083 else
4084 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4085
4086 return getUMaxExpr(PromotedLHS, PromotedRHS);
4087 }
4088
getUMinFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)4089 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
4090 const SCEV *RHS) {
4091 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4092 return getUMinFromMismatchedTypes(Ops);
4093 }
4094
getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV * > & Ops)4095 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
4096 SmallVectorImpl<const SCEV *> &Ops) {
4097 assert(!Ops.empty() && "At least one operand must be!");
4098 // Trivial case.
4099 if (Ops.size() == 1)
4100 return Ops[0];
4101
4102 // Find the max type first.
4103 Type *MaxType = nullptr;
4104 for (auto *S : Ops)
4105 if (MaxType)
4106 MaxType = getWiderType(MaxType, S->getType());
4107 else
4108 MaxType = S->getType();
4109 assert(MaxType && "Failed to find maximum type!");
4110
4111 // Extend all ops to max type.
4112 SmallVector<const SCEV *, 2> PromotedOps;
4113 for (auto *S : Ops)
4114 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4115
4116 // Generate umin.
4117 return getUMinExpr(PromotedOps);
4118 }
4119
getPointerBase(const SCEV * V)4120 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
4121 // A pointer operand may evaluate to a nonpointer expression, such as null.
4122 if (!V->getType()->isPointerTy())
4123 return V;
4124
4125 while (true) {
4126 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) {
4127 V = Cast->getOperand();
4128 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
4129 const SCEV *PtrOp = nullptr;
4130 for (const SCEV *NAryOp : NAry->operands()) {
4131 if (NAryOp->getType()->isPointerTy()) {
4132 // Cannot find the base of an expression with multiple pointer ops.
4133 if (PtrOp)
4134 return V;
4135 PtrOp = NAryOp;
4136 }
4137 }
4138 if (!PtrOp) // All operands were non-pointer.
4139 return V;
4140 V = PtrOp;
4141 } else // Not something we can look further into.
4142 return V;
4143 }
4144 }
4145
4146 /// Push users of the given Instruction onto the given Worklist.
4147 static void
PushDefUseChildren(Instruction * I,SmallVectorImpl<Instruction * > & Worklist)4148 PushDefUseChildren(Instruction *I,
4149 SmallVectorImpl<Instruction *> &Worklist) {
4150 // Push the def-use children onto the Worklist stack.
4151 for (User *U : I->users())
4152 Worklist.push_back(cast<Instruction>(U));
4153 }
4154
forgetSymbolicName(Instruction * PN,const SCEV * SymName)4155 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
4156 SmallVector<Instruction *, 16> Worklist;
4157 PushDefUseChildren(PN, Worklist);
4158
4159 SmallPtrSet<Instruction *, 8> Visited;
4160 Visited.insert(PN);
4161 while (!Worklist.empty()) {
4162 Instruction *I = Worklist.pop_back_val();
4163 if (!Visited.insert(I).second)
4164 continue;
4165
4166 auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4167 if (It != ValueExprMap.end()) {
4168 const SCEV *Old = It->second;
4169
4170 // Short-circuit the def-use traversal if the symbolic name
4171 // ceases to appear in expressions.
4172 if (Old != SymName && !hasOperand(Old, SymName))
4173 continue;
4174
4175 // SCEVUnknown for a PHI either means that it has an unrecognized
4176 // structure, it's a PHI that's in the progress of being computed
4177 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4178 // additional loop trip count information isn't going to change anything.
4179 // In the second case, createNodeForPHI will perform the necessary
4180 // updates on its own when it gets to that point. In the third, we do
4181 // want to forget the SCEVUnknown.
4182 if (!isa<PHINode>(I) ||
4183 !isa<SCEVUnknown>(Old) ||
4184 (I != PN && Old == SymName)) {
4185 eraseValueFromMap(It->first);
4186 forgetMemoizedResults(Old);
4187 }
4188 }
4189
4190 PushDefUseChildren(I, Worklist);
4191 }
4192 }
4193
4194 namespace {
4195
4196 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4197 /// expression in case its Loop is L. If it is not L then
4198 /// if IgnoreOtherLoops is true then use AddRec itself
4199 /// otherwise rewrite cannot be done.
4200 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4201 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4202 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool IgnoreOtherLoops=true)4203 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4204 bool IgnoreOtherLoops = true) {
4205 SCEVInitRewriter Rewriter(L, SE);
4206 const SCEV *Result = Rewriter.visit(S);
4207 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4208 return SE.getCouldNotCompute();
4209 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4210 ? SE.getCouldNotCompute()
4211 : Result;
4212 }
4213
visitUnknown(const SCEVUnknown * Expr)4214 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4215 if (!SE.isLoopInvariant(Expr, L))
4216 SeenLoopVariantSCEVUnknown = true;
4217 return Expr;
4218 }
4219
visitAddRecExpr(const SCEVAddRecExpr * Expr)4220 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4221 // Only re-write AddRecExprs for this loop.
4222 if (Expr->getLoop() == L)
4223 return Expr->getStart();
4224 SeenOtherLoops = true;
4225 return Expr;
4226 }
4227
hasSeenLoopVariantSCEVUnknown()4228 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4229
hasSeenOtherLoops()4230 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4231
4232 private:
SCEVInitRewriter(const Loop * L,ScalarEvolution & SE)4233 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4234 : SCEVRewriteVisitor(SE), L(L) {}
4235
4236 const Loop *L;
4237 bool SeenLoopVariantSCEVUnknown = false;
4238 bool SeenOtherLoops = false;
4239 };
4240
4241 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4242 /// increment expression in case its Loop is L. If it is not L then
4243 /// use AddRec itself.
4244 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4245 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4246 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4247 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4248 SCEVPostIncRewriter Rewriter(L, SE);
4249 const SCEV *Result = Rewriter.visit(S);
4250 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4251 ? SE.getCouldNotCompute()
4252 : Result;
4253 }
4254
visitUnknown(const SCEVUnknown * Expr)4255 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4256 if (!SE.isLoopInvariant(Expr, L))
4257 SeenLoopVariantSCEVUnknown = true;
4258 return Expr;
4259 }
4260
visitAddRecExpr(const SCEVAddRecExpr * Expr)4261 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4262 // Only re-write AddRecExprs for this loop.
4263 if (Expr->getLoop() == L)
4264 return Expr->getPostIncExpr(SE);
4265 SeenOtherLoops = true;
4266 return Expr;
4267 }
4268
hasSeenLoopVariantSCEVUnknown()4269 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4270
hasSeenOtherLoops()4271 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4272
4273 private:
SCEVPostIncRewriter(const Loop * L,ScalarEvolution & SE)4274 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4275 : SCEVRewriteVisitor(SE), L(L) {}
4276
4277 const Loop *L;
4278 bool SeenLoopVariantSCEVUnknown = false;
4279 bool SeenOtherLoops = false;
4280 };
4281
4282 /// This class evaluates the compare condition by matching it against the
4283 /// condition of loop latch. If there is a match we assume a true value
4284 /// for the condition while building SCEV nodes.
4285 class SCEVBackedgeConditionFolder
4286 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4287 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4288 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4289 ScalarEvolution &SE) {
4290 bool IsPosBECond = false;
4291 Value *BECond = nullptr;
4292 if (BasicBlock *Latch = L->getLoopLatch()) {
4293 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4294 if (BI && BI->isConditional()) {
4295 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
4296 "Both outgoing branches should not target same header!");
4297 BECond = BI->getCondition();
4298 IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4299 } else {
4300 return S;
4301 }
4302 }
4303 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4304 return Rewriter.visit(S);
4305 }
4306
visitUnknown(const SCEVUnknown * Expr)4307 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4308 const SCEV *Result = Expr;
4309 bool InvariantF = SE.isLoopInvariant(Expr, L);
4310
4311 if (!InvariantF) {
4312 Instruction *I = cast<Instruction>(Expr->getValue());
4313 switch (I->getOpcode()) {
4314 case Instruction::Select: {
4315 SelectInst *SI = cast<SelectInst>(I);
4316 Optional<const SCEV *> Res =
4317 compareWithBackedgeCondition(SI->getCondition());
4318 if (Res.hasValue()) {
4319 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4320 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4321 }
4322 break;
4323 }
4324 default: {
4325 Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4326 if (Res.hasValue())
4327 Result = Res.getValue();
4328 break;
4329 }
4330 }
4331 }
4332 return Result;
4333 }
4334
4335 private:
SCEVBackedgeConditionFolder(const Loop * L,Value * BECond,bool IsPosBECond,ScalarEvolution & SE)4336 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4337 bool IsPosBECond, ScalarEvolution &SE)
4338 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4339 IsPositiveBECond(IsPosBECond) {}
4340
4341 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4342
4343 const Loop *L;
4344 /// Loop back condition.
4345 Value *BackedgeCond = nullptr;
4346 /// Set to true if loop back is on positive branch condition.
4347 bool IsPositiveBECond;
4348 };
4349
4350 Optional<const SCEV *>
compareWithBackedgeCondition(Value * IC)4351 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4352
4353 // If value matches the backedge condition for loop latch,
4354 // then return a constant evolution node based on loopback
4355 // branch taken.
4356 if (BackedgeCond == IC)
4357 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4358 : SE.getZero(Type::getInt1Ty(SE.getContext()));
4359 return None;
4360 }
4361
4362 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4363 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4364 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4365 ScalarEvolution &SE) {
4366 SCEVShiftRewriter Rewriter(L, SE);
4367 const SCEV *Result = Rewriter.visit(S);
4368 return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4369 }
4370
visitUnknown(const SCEVUnknown * Expr)4371 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4372 // Only allow AddRecExprs for this loop.
4373 if (!SE.isLoopInvariant(Expr, L))
4374 Valid = false;
4375 return Expr;
4376 }
4377
visitAddRecExpr(const SCEVAddRecExpr * Expr)4378 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4379 if (Expr->getLoop() == L && Expr->isAffine())
4380 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4381 Valid = false;
4382 return Expr;
4383 }
4384
isValid()4385 bool isValid() { return Valid; }
4386
4387 private:
SCEVShiftRewriter(const Loop * L,ScalarEvolution & SE)4388 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4389 : SCEVRewriteVisitor(SE), L(L) {}
4390
4391 const Loop *L;
4392 bool Valid = true;
4393 };
4394
4395 } // end anonymous namespace
4396
4397 SCEV::NoWrapFlags
proveNoWrapViaConstantRanges(const SCEVAddRecExpr * AR)4398 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4399 if (!AR->isAffine())
4400 return SCEV::FlagAnyWrap;
4401
4402 using OBO = OverflowingBinaryOperator;
4403
4404 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
4405
4406 if (!AR->hasNoSignedWrap()) {
4407 ConstantRange AddRecRange = getSignedRange(AR);
4408 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4409
4410 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4411 Instruction::Add, IncRange, OBO::NoSignedWrap);
4412 if (NSWRegion.contains(AddRecRange))
4413 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4414 }
4415
4416 if (!AR->hasNoUnsignedWrap()) {
4417 ConstantRange AddRecRange = getUnsignedRange(AR);
4418 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4419
4420 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4421 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4422 if (NUWRegion.contains(AddRecRange))
4423 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4424 }
4425
4426 return Result;
4427 }
4428
4429 namespace {
4430
4431 /// Represents an abstract binary operation. This may exist as a
4432 /// normal instruction or constant expression, or may have been
4433 /// derived from an expression tree.
4434 struct BinaryOp {
4435 unsigned Opcode;
4436 Value *LHS;
4437 Value *RHS;
4438 bool IsNSW = false;
4439 bool IsNUW = false;
4440 bool IsExact = false;
4441
4442 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4443 /// constant expression.
4444 Operator *Op = nullptr;
4445
BinaryOp__anonb3a128371111::BinaryOp4446 explicit BinaryOp(Operator *Op)
4447 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
4448 Op(Op) {
4449 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4450 IsNSW = OBO->hasNoSignedWrap();
4451 IsNUW = OBO->hasNoUnsignedWrap();
4452 }
4453 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op))
4454 IsExact = PEO->isExact();
4455 }
4456
BinaryOp__anonb3a128371111::BinaryOp4457 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
4458 bool IsNUW = false, bool IsExact = false)
4459 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW),
4460 IsExact(IsExact) {}
4461 };
4462
4463 } // end anonymous namespace
4464
4465 /// Try to map \p V into a BinaryOp, and return \c None on failure.
MatchBinaryOp(Value * V,DominatorTree & DT)4466 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
4467 auto *Op = dyn_cast<Operator>(V);
4468 if (!Op)
4469 return None;
4470
4471 // Implementation detail: all the cleverness here should happen without
4472 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4473 // SCEV expressions when possible, and we should not break that.
4474
4475 switch (Op->getOpcode()) {
4476 case Instruction::Add:
4477 case Instruction::Sub:
4478 case Instruction::Mul:
4479 case Instruction::UDiv:
4480 case Instruction::URem:
4481 case Instruction::And:
4482 case Instruction::Or:
4483 case Instruction::AShr:
4484 case Instruction::Shl:
4485 return BinaryOp(Op);
4486
4487 case Instruction::Xor:
4488 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
4489 // If the RHS of the xor is a signmask, then this is just an add.
4490 // Instcombine turns add of signmask into xor as a strength reduction step.
4491 if (RHSC->getValue().isSignMask())
4492 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
4493 return BinaryOp(Op);
4494
4495 case Instruction::LShr:
4496 // Turn logical shift right of a constant into a unsigned divide.
4497 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
4498 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
4499
4500 // If the shift count is not less than the bitwidth, the result of
4501 // the shift is undefined. Don't try to analyze it, because the
4502 // resolution chosen here may differ from the resolution chosen in
4503 // other parts of the compiler.
4504 if (SA->getValue().ult(BitWidth)) {
4505 Constant *X =
4506 ConstantInt::get(SA->getContext(),
4507 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4508 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
4509 }
4510 }
4511 return BinaryOp(Op);
4512
4513 case Instruction::ExtractValue: {
4514 auto *EVI = cast<ExtractValueInst>(Op);
4515 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4516 break;
4517
4518 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
4519 if (!WO)
4520 break;
4521
4522 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4523 bool Signed = WO->isSigned();
4524 // TODO: Should add nuw/nsw flags for mul as well.
4525 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
4526 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
4527
4528 // Now that we know that all uses of the arithmetic-result component of
4529 // CI are guarded by the overflow check, we can go ahead and pretend
4530 // that the arithmetic is non-overflowing.
4531 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
4532 /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
4533 }
4534
4535 default:
4536 break;
4537 }
4538
4539 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
4540 // semantics as a Sub, return a binary sub expression.
4541 if (auto *II = dyn_cast<IntrinsicInst>(V))
4542 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
4543 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
4544
4545 return None;
4546 }
4547
4548 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4549 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4550 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4551 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4552 /// follows one of the following patterns:
4553 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4554 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4555 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4556 /// we return the type of the truncation operation, and indicate whether the
4557 /// truncated type should be treated as signed/unsigned by setting
4558 /// \p Signed to true/false, respectively.
isSimpleCastedPHI(const SCEV * Op,const SCEVUnknown * SymbolicPHI,bool & Signed,ScalarEvolution & SE)4559 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
4560 bool &Signed, ScalarEvolution &SE) {
4561 // The case where Op == SymbolicPHI (that is, with no type conversions on
4562 // the way) is handled by the regular add recurrence creating logic and
4563 // would have already been triggered in createAddRecForPHI. Reaching it here
4564 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4565 // because one of the other operands of the SCEVAddExpr updating this PHI is
4566 // not invariant).
4567 //
4568 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4569 // this case predicates that allow us to prove that Op == SymbolicPHI will
4570 // be added.
4571 if (Op == SymbolicPHI)
4572 return nullptr;
4573
4574 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
4575 unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
4576 if (SourceBits != NewBits)
4577 return nullptr;
4578
4579 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
4580 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
4581 if (!SExt && !ZExt)
4582 return nullptr;
4583 const SCEVTruncateExpr *Trunc =
4584 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
4585 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4586 if (!Trunc)
4587 return nullptr;
4588 const SCEV *X = Trunc->getOperand();
4589 if (X != SymbolicPHI)
4590 return nullptr;
4591 Signed = SExt != nullptr;
4592 return Trunc->getType();
4593 }
4594
isIntegerLoopHeaderPHI(const PHINode * PN,LoopInfo & LI)4595 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
4596 if (!PN->getType()->isIntegerTy())
4597 return nullptr;
4598 const Loop *L = LI.getLoopFor(PN->getParent());
4599 if (!L || L->getHeader() != PN->getParent())
4600 return nullptr;
4601 return L;
4602 }
4603
4604 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4605 // computation that updates the phi follows the following pattern:
4606 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4607 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4608 // If so, try to see if it can be rewritten as an AddRecExpr under some
4609 // Predicates. If successful, return them as a pair. Also cache the results
4610 // of the analysis.
4611 //
4612 // Example usage scenario:
4613 // Say the Rewriter is called for the following SCEV:
4614 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4615 // where:
4616 // %X = phi i64 (%Start, %BEValue)
4617 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4618 // and call this function with %SymbolicPHI = %X.
4619 //
4620 // The analysis will find that the value coming around the backedge has
4621 // the following SCEV:
4622 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4623 // Upon concluding that this matches the desired pattern, the function
4624 // will return the pair {NewAddRec, SmallPredsVec} where:
4625 // NewAddRec = {%Start,+,%Step}
4626 // SmallPredsVec = {P1, P2, P3} as follows:
4627 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4628 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4629 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4630 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4631 // under the predicates {P1,P2,P3}.
4632 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4633 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4634 //
4635 // TODO's:
4636 //
4637 // 1) Extend the Induction descriptor to also support inductions that involve
4638 // casts: When needed (namely, when we are called in the context of the
4639 // vectorizer induction analysis), a Set of cast instructions will be
4640 // populated by this method, and provided back to isInductionPHI. This is
4641 // needed to allow the vectorizer to properly record them to be ignored by
4642 // the cost model and to avoid vectorizing them (otherwise these casts,
4643 // which are redundant under the runtime overflow checks, will be
4644 // vectorized, which can be costly).
4645 //
4646 // 2) Support additional induction/PHISCEV patterns: We also want to support
4647 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4648 // after the induction update operation (the induction increment):
4649 //
4650 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4651 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4652 //
4653 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4654 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4655 //
4656 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4657 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCastsImpl(const SCEVUnknown * SymbolicPHI)4658 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
4659 SmallVector<const SCEVPredicate *, 3> Predicates;
4660
4661 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4662 // return an AddRec expression under some predicate.
4663
4664 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4665 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4666 assert(L && "Expecting an integer loop header phi");
4667
4668 // The loop may have multiple entrances or multiple exits; we can analyze
4669 // this phi as an addrec if it has a unique entry value and a unique
4670 // backedge value.
4671 Value *BEValueV = nullptr, *StartValueV = nullptr;
4672 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
4673 Value *V = PN->getIncomingValue(i);
4674 if (L->contains(PN->getIncomingBlock(i))) {
4675 if (!BEValueV) {
4676 BEValueV = V;
4677 } else if (BEValueV != V) {
4678 BEValueV = nullptr;
4679 break;
4680 }
4681 } else if (!StartValueV) {
4682 StartValueV = V;
4683 } else if (StartValueV != V) {
4684 StartValueV = nullptr;
4685 break;
4686 }
4687 }
4688 if (!BEValueV || !StartValueV)
4689 return None;
4690
4691 const SCEV *BEValue = getSCEV(BEValueV);
4692
4693 // If the value coming around the backedge is an add with the symbolic
4694 // value we just inserted, possibly with casts that we can ignore under
4695 // an appropriate runtime guard, then we found a simple induction variable!
4696 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
4697 if (!Add)
4698 return None;
4699
4700 // If there is a single occurrence of the symbolic value, possibly
4701 // casted, replace it with a recurrence.
4702 unsigned FoundIndex = Add->getNumOperands();
4703 Type *TruncTy = nullptr;
4704 bool Signed;
4705 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
4706 if ((TruncTy =
4707 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
4708 if (FoundIndex == e) {
4709 FoundIndex = i;
4710 break;
4711 }
4712
4713 if (FoundIndex == Add->getNumOperands())
4714 return None;
4715
4716 // Create an add with everything but the specified operand.
4717 SmallVector<const SCEV *, 8> Ops;
4718 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
4719 if (i != FoundIndex)
4720 Ops.push_back(Add->getOperand(i));
4721 const SCEV *Accum = getAddExpr(Ops);
4722
4723 // The runtime checks will not be valid if the step amount is
4724 // varying inside the loop.
4725 if (!isLoopInvariant(Accum, L))
4726 return None;
4727
4728 // *** Part2: Create the predicates
4729
4730 // Analysis was successful: we have a phi-with-cast pattern for which we
4731 // can return an AddRec expression under the following predicates:
4732 //
4733 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
4734 // fits within the truncated type (does not overflow) for i = 0 to n-1.
4735 // P2: An Equal predicate that guarantees that
4736 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
4737 // P3: An Equal predicate that guarantees that
4738 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
4739 //
4740 // As we next prove, the above predicates guarantee that:
4741 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
4742 //
4743 //
4744 // More formally, we want to prove that:
4745 // Expr(i+1) = Start + (i+1) * Accum
4746 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4747 //
4748 // Given that:
4749 // 1) Expr(0) = Start
4750 // 2) Expr(1) = Start + Accum
4751 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
4752 // 3) Induction hypothesis (step i):
4753 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
4754 //
4755 // Proof:
4756 // Expr(i+1) =
4757 // = Start + (i+1)*Accum
4758 // = (Start + i*Accum) + Accum
4759 // = Expr(i) + Accum
4760 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
4761 // :: from step i
4762 //
4763 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
4764 //
4765 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
4766 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
4767 // + Accum :: from P3
4768 //
4769 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
4770 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
4771 //
4772 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
4773 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4774 //
4775 // By induction, the same applies to all iterations 1<=i<n:
4776 //
4777
4778 // Create a truncated addrec for which we will add a no overflow check (P1).
4779 const SCEV *StartVal = getSCEV(StartValueV);
4780 const SCEV *PHISCEV =
4781 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
4782 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
4783
4784 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
4785 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
4786 // will be constant.
4787 //
4788 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
4789 // add P1.
4790 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
4791 SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
4792 Signed ? SCEVWrapPredicate::IncrementNSSW
4793 : SCEVWrapPredicate::IncrementNUSW;
4794 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
4795 Predicates.push_back(AddRecPred);
4796 }
4797
4798 // Create the Equal Predicates P2,P3:
4799
4800 // It is possible that the predicates P2 and/or P3 are computable at
4801 // compile time due to StartVal and/or Accum being constants.
4802 // If either one is, then we can check that now and escape if either P2
4803 // or P3 is false.
4804
4805 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
4806 // for each of StartVal and Accum
4807 auto getExtendedExpr = [&](const SCEV *Expr,
4808 bool CreateSignExtend) -> const SCEV * {
4809 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
4810 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
4811 const SCEV *ExtendedExpr =
4812 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
4813 : getZeroExtendExpr(TruncatedExpr, Expr->getType());
4814 return ExtendedExpr;
4815 };
4816
4817 // Given:
4818 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
4819 // = getExtendedExpr(Expr)
4820 // Determine whether the predicate P: Expr == ExtendedExpr
4821 // is known to be false at compile time
4822 auto PredIsKnownFalse = [&](const SCEV *Expr,
4823 const SCEV *ExtendedExpr) -> bool {
4824 return Expr != ExtendedExpr &&
4825 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
4826 };
4827
4828 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
4829 if (PredIsKnownFalse(StartVal, StartExtended)) {
4830 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
4831 return None;
4832 }
4833
4834 // The Step is always Signed (because the overflow checks are either
4835 // NSSW or NUSW)
4836 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
4837 if (PredIsKnownFalse(Accum, AccumExtended)) {
4838 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
4839 return None;
4840 }
4841
4842 auto AppendPredicate = [&](const SCEV *Expr,
4843 const SCEV *ExtendedExpr) -> void {
4844 if (Expr != ExtendedExpr &&
4845 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
4846 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
4847 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
4848 Predicates.push_back(Pred);
4849 }
4850 };
4851
4852 AppendPredicate(StartVal, StartExtended);
4853 AppendPredicate(Accum, AccumExtended);
4854
4855 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
4856 // which the casts had been folded away. The caller can rewrite SymbolicPHI
4857 // into NewAR if it will also add the runtime overflow checks specified in
4858 // Predicates.
4859 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
4860
4861 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
4862 std::make_pair(NewAR, Predicates);
4863 // Remember the result of the analysis for this SCEV at this locayyytion.
4864 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
4865 return PredRewrite;
4866 }
4867
4868 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCasts(const SCEVUnknown * SymbolicPHI)4869 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
4870 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4871 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4872 if (!L)
4873 return None;
4874
4875 // Check to see if we already analyzed this PHI.
4876 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
4877 if (I != PredicatedSCEVRewrites.end()) {
4878 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
4879 I->second;
4880 // Analysis was done before and failed to create an AddRec:
4881 if (Rewrite.first == SymbolicPHI)
4882 return None;
4883 // Analysis was done before and succeeded to create an AddRec under
4884 // a predicate:
4885 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
4886 assert(!(Rewrite.second).empty() && "Expected to find Predicates");
4887 return Rewrite;
4888 }
4889
4890 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
4891 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
4892
4893 // Record in the cache that the analysis failed
4894 if (!Rewrite) {
4895 SmallVector<const SCEVPredicate *, 3> Predicates;
4896 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
4897 return None;
4898 }
4899
4900 return Rewrite;
4901 }
4902
4903 // FIXME: This utility is currently required because the Rewriter currently
4904 // does not rewrite this expression:
4905 // {0, +, (sext ix (trunc iy to ix) to iy)}
4906 // into {0, +, %step},
4907 // even when the following Equal predicate exists:
4908 // "%step == (sext ix (trunc iy to ix) to iy)".
areAddRecsEqualWithPreds(const SCEVAddRecExpr * AR1,const SCEVAddRecExpr * AR2) const4909 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
4910 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
4911 if (AR1 == AR2)
4912 return true;
4913
4914 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
4915 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
4916 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
4917 return false;
4918 return true;
4919 };
4920
4921 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
4922 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
4923 return false;
4924 return true;
4925 }
4926
4927 /// A helper function for createAddRecFromPHI to handle simple cases.
4928 ///
4929 /// This function tries to find an AddRec expression for the simplest (yet most
4930 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
4931 /// If it fails, createAddRecFromPHI will use a more general, but slow,
4932 /// technique for finding the AddRec expression.
createSimpleAffineAddRec(PHINode * PN,Value * BEValueV,Value * StartValueV)4933 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
4934 Value *BEValueV,
4935 Value *StartValueV) {
4936 const Loop *L = LI.getLoopFor(PN->getParent());
4937 assert(L && L->getHeader() == PN->getParent());
4938 assert(BEValueV && StartValueV);
4939
4940 auto BO = MatchBinaryOp(BEValueV, DT);
4941 if (!BO)
4942 return nullptr;
4943
4944 if (BO->Opcode != Instruction::Add)
4945 return nullptr;
4946
4947 const SCEV *Accum = nullptr;
4948 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
4949 Accum = getSCEV(BO->RHS);
4950 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
4951 Accum = getSCEV(BO->LHS);
4952
4953 if (!Accum)
4954 return nullptr;
4955
4956 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
4957 if (BO->IsNUW)
4958 Flags = setFlags(Flags, SCEV::FlagNUW);
4959 if (BO->IsNSW)
4960 Flags = setFlags(Flags, SCEV::FlagNSW);
4961
4962 const SCEV *StartVal = getSCEV(StartValueV);
4963 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
4964
4965 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
4966
4967 // We can add Flags to the post-inc expression only if we
4968 // know that it is *undefined behavior* for BEValueV to
4969 // overflow.
4970 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
4971 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
4972 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
4973
4974 return PHISCEV;
4975 }
4976
createAddRecFromPHI(PHINode * PN)4977 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
4978 const Loop *L = LI.getLoopFor(PN->getParent());
4979 if (!L || L->getHeader() != PN->getParent())
4980 return nullptr;
4981
4982 // The loop may have multiple entrances or multiple exits; we can analyze
4983 // this phi as an addrec if it has a unique entry value and a unique
4984 // backedge value.
4985 Value *BEValueV = nullptr, *StartValueV = nullptr;
4986 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
4987 Value *V = PN->getIncomingValue(i);
4988 if (L->contains(PN->getIncomingBlock(i))) {
4989 if (!BEValueV) {
4990 BEValueV = V;
4991 } else if (BEValueV != V) {
4992 BEValueV = nullptr;
4993 break;
4994 }
4995 } else if (!StartValueV) {
4996 StartValueV = V;
4997 } else if (StartValueV != V) {
4998 StartValueV = nullptr;
4999 break;
5000 }
5001 }
5002 if (!BEValueV || !StartValueV)
5003 return nullptr;
5004
5005 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5006 "PHI node already processed?");
5007
5008 // First, try to find AddRec expression without creating a fictituos symbolic
5009 // value for PN.
5010 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5011 return S;
5012
5013 // Handle PHI node value symbolically.
5014 const SCEV *SymbolicName = getUnknown(PN);
5015 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
5016
5017 // Using this symbolic name for the PHI, analyze the value coming around
5018 // the back-edge.
5019 const SCEV *BEValue = getSCEV(BEValueV);
5020
5021 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5022 // has a special value for the first iteration of the loop.
5023
5024 // If the value coming around the backedge is an add with the symbolic
5025 // value we just inserted, then we found a simple induction variable!
5026 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5027 // If there is a single occurrence of the symbolic value, replace it
5028 // with a recurrence.
5029 unsigned FoundIndex = Add->getNumOperands();
5030 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5031 if (Add->getOperand(i) == SymbolicName)
5032 if (FoundIndex == e) {
5033 FoundIndex = i;
5034 break;
5035 }
5036
5037 if (FoundIndex != Add->getNumOperands()) {
5038 // Create an add with everything but the specified operand.
5039 SmallVector<const SCEV *, 8> Ops;
5040 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5041 if (i != FoundIndex)
5042 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5043 L, *this));
5044 const SCEV *Accum = getAddExpr(Ops);
5045
5046 // This is not a valid addrec if the step amount is varying each
5047 // loop iteration, but is not itself an addrec in this loop.
5048 if (isLoopInvariant(Accum, L) ||
5049 (isa<SCEVAddRecExpr>(Accum) &&
5050 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5051 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5052
5053 if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5054 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5055 if (BO->IsNUW)
5056 Flags = setFlags(Flags, SCEV::FlagNUW);
5057 if (BO->IsNSW)
5058 Flags = setFlags(Flags, SCEV::FlagNSW);
5059 }
5060 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5061 // If the increment is an inbounds GEP, then we know the address
5062 // space cannot be wrapped around. We cannot make any guarantee
5063 // about signed or unsigned overflow because pointers are
5064 // unsigned but we may have a negative index from the base
5065 // pointer. We can guarantee that no unsigned wrap occurs if the
5066 // indices form a positive value.
5067 if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5068 Flags = setFlags(Flags, SCEV::FlagNW);
5069
5070 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5071 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5072 Flags = setFlags(Flags, SCEV::FlagNUW);
5073 }
5074
5075 // We cannot transfer nuw and nsw flags from subtraction
5076 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5077 // for instance.
5078 }
5079
5080 const SCEV *StartVal = getSCEV(StartValueV);
5081 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5082
5083 // Okay, for the entire analysis of this edge we assumed the PHI
5084 // to be symbolic. We now need to go back and purge all of the
5085 // entries for the scalars that use the symbolic expression.
5086 forgetSymbolicName(PN, SymbolicName);
5087 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5088
5089 // We can add Flags to the post-inc expression only if we
5090 // know that it is *undefined behavior* for BEValueV to
5091 // overflow.
5092 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5093 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5094 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5095
5096 return PHISCEV;
5097 }
5098 }
5099 } else {
5100 // Otherwise, this could be a loop like this:
5101 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5102 // In this case, j = {1,+,1} and BEValue is j.
5103 // Because the other in-value of i (0) fits the evolution of BEValue
5104 // i really is an addrec evolution.
5105 //
5106 // We can generalize this saying that i is the shifted value of BEValue
5107 // by one iteration:
5108 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5109 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5110 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5111 if (Shifted != getCouldNotCompute() &&
5112 Start != getCouldNotCompute()) {
5113 const SCEV *StartVal = getSCEV(StartValueV);
5114 if (Start == StartVal) {
5115 // Okay, for the entire analysis of this edge we assumed the PHI
5116 // to be symbolic. We now need to go back and purge all of the
5117 // entries for the scalars that use the symbolic expression.
5118 forgetSymbolicName(PN, SymbolicName);
5119 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
5120 return Shifted;
5121 }
5122 }
5123 }
5124
5125 // Remove the temporary PHI node SCEV that has been inserted while intending
5126 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5127 // as it will prevent later (possibly simpler) SCEV expressions to be added
5128 // to the ValueExprMap.
5129 eraseValueFromMap(PN);
5130
5131 return nullptr;
5132 }
5133
5134 // Checks if the SCEV S is available at BB. S is considered available at BB
5135 // if S can be materialized at BB without introducing a fault.
IsAvailableOnEntry(const Loop * L,DominatorTree & DT,const SCEV * S,BasicBlock * BB)5136 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5137 BasicBlock *BB) {
5138 struct CheckAvailable {
5139 bool TraversalDone = false;
5140 bool Available = true;
5141
5142 const Loop *L = nullptr; // The loop BB is in (can be nullptr)
5143 BasicBlock *BB = nullptr;
5144 DominatorTree &DT;
5145
5146 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5147 : L(L), BB(BB), DT(DT) {}
5148
5149 bool setUnavailable() {
5150 TraversalDone = true;
5151 Available = false;
5152 return false;
5153 }
5154
5155 bool follow(const SCEV *S) {
5156 switch (S->getSCEVType()) {
5157 case scConstant:
5158 case scPtrToInt:
5159 case scTruncate:
5160 case scZeroExtend:
5161 case scSignExtend:
5162 case scAddExpr:
5163 case scMulExpr:
5164 case scUMaxExpr:
5165 case scSMaxExpr:
5166 case scUMinExpr:
5167 case scSMinExpr:
5168 // These expressions are available if their operand(s) is/are.
5169 return true;
5170
5171 case scAddRecExpr: {
5172 // We allow add recurrences that are on the loop BB is in, or some
5173 // outer loop. This guarantees availability because the value of the
5174 // add recurrence at BB is simply the "current" value of the induction
5175 // variable. We can relax this in the future; for instance an add
5176 // recurrence on a sibling dominating loop is also available at BB.
5177 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5178 if (L && (ARLoop == L || ARLoop->contains(L)))
5179 return true;
5180
5181 return setUnavailable();
5182 }
5183
5184 case scUnknown: {
5185 // For SCEVUnknown, we check for simple dominance.
5186 const auto *SU = cast<SCEVUnknown>(S);
5187 Value *V = SU->getValue();
5188
5189 if (isa<Argument>(V))
5190 return false;
5191
5192 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5193 return false;
5194
5195 return setUnavailable();
5196 }
5197
5198 case scUDivExpr:
5199 case scCouldNotCompute:
5200 // We do not try to smart about these at all.
5201 return setUnavailable();
5202 }
5203 llvm_unreachable("Unknown SCEV kind!");
5204 }
5205
5206 bool isDone() { return TraversalDone; }
5207 };
5208
5209 CheckAvailable CA(L, BB, DT);
5210 SCEVTraversal<CheckAvailable> ST(CA);
5211
5212 ST.visitAll(S);
5213 return CA.Available;
5214 }
5215
5216 // Try to match a control flow sequence that branches out at BI and merges back
5217 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5218 // match.
BrPHIToSelect(DominatorTree & DT,BranchInst * BI,PHINode * Merge,Value * & C,Value * & LHS,Value * & RHS)5219 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
5220 Value *&C, Value *&LHS, Value *&RHS) {
5221 C = BI->getCondition();
5222
5223 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5224 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5225
5226 if (!LeftEdge.isSingleEdge())
5227 return false;
5228
5229 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5230
5231 Use &LeftUse = Merge->getOperandUse(0);
5232 Use &RightUse = Merge->getOperandUse(1);
5233
5234 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5235 LHS = LeftUse;
5236 RHS = RightUse;
5237 return true;
5238 }
5239
5240 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5241 LHS = RightUse;
5242 RHS = LeftUse;
5243 return true;
5244 }
5245
5246 return false;
5247 }
5248
createNodeFromSelectLikePHI(PHINode * PN)5249 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5250 auto IsReachable =
5251 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5252 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5253 const Loop *L = LI.getLoopFor(PN->getParent());
5254
5255 // We don't want to break LCSSA, even in a SCEV expression tree.
5256 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5257 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5258 return nullptr;
5259
5260 // Try to match
5261 //
5262 // br %cond, label %left, label %right
5263 // left:
5264 // br label %merge
5265 // right:
5266 // br label %merge
5267 // merge:
5268 // V = phi [ %x, %left ], [ %y, %right ]
5269 //
5270 // as "select %cond, %x, %y"
5271
5272 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5273 assert(IDom && "At least the entry block should dominate PN");
5274
5275 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5276 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5277
5278 if (BI && BI->isConditional() &&
5279 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5280 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5281 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5282 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5283 }
5284
5285 return nullptr;
5286 }
5287
createNodeForPHI(PHINode * PN)5288 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5289 if (const SCEV *S = createAddRecFromPHI(PN))
5290 return S;
5291
5292 if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5293 return S;
5294
5295 // If the PHI has a single incoming value, follow that value, unless the
5296 // PHI's incoming blocks are in a different loop, in which case doing so
5297 // risks breaking LCSSA form. Instcombine would normally zap these, but
5298 // it doesn't have DominatorTree information, so it may miss cases.
5299 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5300 if (LI.replacementPreservesLCSSAForm(PN, V))
5301 return getSCEV(V);
5302
5303 // If it's not a loop phi, we can't handle it yet.
5304 return getUnknown(PN);
5305 }
5306
createNodeForSelectOrPHI(Instruction * I,Value * Cond,Value * TrueVal,Value * FalseVal)5307 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5308 Value *Cond,
5309 Value *TrueVal,
5310 Value *FalseVal) {
5311 // Handle "constant" branch or select. This can occur for instance when a
5312 // loop pass transforms an inner loop and moves on to process the outer loop.
5313 if (auto *CI = dyn_cast<ConstantInt>(Cond))
5314 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5315
5316 // Try to match some simple smax or umax patterns.
5317 auto *ICI = dyn_cast<ICmpInst>(Cond);
5318 if (!ICI)
5319 return getUnknown(I);
5320
5321 Value *LHS = ICI->getOperand(0);
5322 Value *RHS = ICI->getOperand(1);
5323
5324 switch (ICI->getPredicate()) {
5325 case ICmpInst::ICMP_SLT:
5326 case ICmpInst::ICMP_SLE:
5327 std::swap(LHS, RHS);
5328 LLVM_FALLTHROUGH;
5329 case ICmpInst::ICMP_SGT:
5330 case ICmpInst::ICMP_SGE:
5331 // a >s b ? a+x : b+x -> smax(a, b)+x
5332 // a >s b ? b+x : a+x -> smin(a, b)+x
5333 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5334 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType());
5335 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType());
5336 const SCEV *LA = getSCEV(TrueVal);
5337 const SCEV *RA = getSCEV(FalseVal);
5338 const SCEV *LDiff = getMinusSCEV(LA, LS);
5339 const SCEV *RDiff = getMinusSCEV(RA, RS);
5340 if (LDiff == RDiff)
5341 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
5342 LDiff = getMinusSCEV(LA, RS);
5343 RDiff = getMinusSCEV(RA, LS);
5344 if (LDiff == RDiff)
5345 return getAddExpr(getSMinExpr(LS, RS), LDiff);
5346 }
5347 break;
5348 case ICmpInst::ICMP_ULT:
5349 case ICmpInst::ICMP_ULE:
5350 std::swap(LHS, RHS);
5351 LLVM_FALLTHROUGH;
5352 case ICmpInst::ICMP_UGT:
5353 case ICmpInst::ICMP_UGE:
5354 // a >u b ? a+x : b+x -> umax(a, b)+x
5355 // a >u b ? b+x : a+x -> umin(a, b)+x
5356 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5357 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5358 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType());
5359 const SCEV *LA = getSCEV(TrueVal);
5360 const SCEV *RA = getSCEV(FalseVal);
5361 const SCEV *LDiff = getMinusSCEV(LA, LS);
5362 const SCEV *RDiff = getMinusSCEV(RA, RS);
5363 if (LDiff == RDiff)
5364 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
5365 LDiff = getMinusSCEV(LA, RS);
5366 RDiff = getMinusSCEV(RA, LS);
5367 if (LDiff == RDiff)
5368 return getAddExpr(getUMinExpr(LS, RS), LDiff);
5369 }
5370 break;
5371 case ICmpInst::ICMP_NE:
5372 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5373 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5374 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5375 const SCEV *One = getOne(I->getType());
5376 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5377 const SCEV *LA = getSCEV(TrueVal);
5378 const SCEV *RA = getSCEV(FalseVal);
5379 const SCEV *LDiff = getMinusSCEV(LA, LS);
5380 const SCEV *RDiff = getMinusSCEV(RA, One);
5381 if (LDiff == RDiff)
5382 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5383 }
5384 break;
5385 case ICmpInst::ICMP_EQ:
5386 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5387 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5388 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5389 const SCEV *One = getOne(I->getType());
5390 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5391 const SCEV *LA = getSCEV(TrueVal);
5392 const SCEV *RA = getSCEV(FalseVal);
5393 const SCEV *LDiff = getMinusSCEV(LA, One);
5394 const SCEV *RDiff = getMinusSCEV(RA, LS);
5395 if (LDiff == RDiff)
5396 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5397 }
5398 break;
5399 default:
5400 break;
5401 }
5402
5403 return getUnknown(I);
5404 }
5405
5406 /// Expand GEP instructions into add and multiply operations. This allows them
5407 /// to be analyzed by regular SCEV code.
createNodeForGEP(GEPOperator * GEP)5408 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
5409 // Don't attempt to analyze GEPs over unsized objects.
5410 if (!GEP->getSourceElementType()->isSized())
5411 return getUnknown(GEP);
5412
5413 SmallVector<const SCEV *, 4> IndexExprs;
5414 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
5415 IndexExprs.push_back(getSCEV(*Index));
5416 return getGEPExpr(GEP, IndexExprs);
5417 }
5418
GetMinTrailingZerosImpl(const SCEV * S)5419 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
5420 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5421 return C->getAPInt().countTrailingZeros();
5422
5423 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S))
5424 return GetMinTrailingZeros(I->getOperand());
5425
5426 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
5427 return std::min(GetMinTrailingZeros(T->getOperand()),
5428 (uint32_t)getTypeSizeInBits(T->getType()));
5429
5430 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
5431 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5432 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5433 ? getTypeSizeInBits(E->getType())
5434 : OpRes;
5435 }
5436
5437 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
5438 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5439 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5440 ? getTypeSizeInBits(E->getType())
5441 : OpRes;
5442 }
5443
5444 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
5445 // The result is the min of all operands results.
5446 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5447 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5448 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5449 return MinOpRes;
5450 }
5451
5452 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
5453 // The result is the sum of all operands results.
5454 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
5455 uint32_t BitWidth = getTypeSizeInBits(M->getType());
5456 for (unsigned i = 1, e = M->getNumOperands();
5457 SumOpRes != BitWidth && i != e; ++i)
5458 SumOpRes =
5459 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
5460 return SumOpRes;
5461 }
5462
5463 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
5464 // The result is the min of all operands results.
5465 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5466 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5467 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5468 return MinOpRes;
5469 }
5470
5471 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
5472 // The result is the min of all operands results.
5473 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5474 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5475 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5476 return MinOpRes;
5477 }
5478
5479 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
5480 // The result is the min of all operands results.
5481 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5482 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5483 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5484 return MinOpRes;
5485 }
5486
5487 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5488 // For a SCEVUnknown, ask ValueTracking.
5489 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
5490 return Known.countMinTrailingZeros();
5491 }
5492
5493 // SCEVUDivExpr
5494 return 0;
5495 }
5496
GetMinTrailingZeros(const SCEV * S)5497 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
5498 auto I = MinTrailingZerosCache.find(S);
5499 if (I != MinTrailingZerosCache.end())
5500 return I->second;
5501
5502 uint32_t Result = GetMinTrailingZerosImpl(S);
5503 auto InsertPair = MinTrailingZerosCache.insert({S, Result});
5504 assert(InsertPair.second && "Should insert a new key");
5505 return InsertPair.first->second;
5506 }
5507
5508 /// Helper method to assign a range to V from metadata present in the IR.
GetRangeFromMetadata(Value * V)5509 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
5510 if (Instruction *I = dyn_cast<Instruction>(V))
5511 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
5512 return getConstantRangeFromMetadata(*MD);
5513
5514 return None;
5515 }
5516
setNoWrapFlags(SCEVAddRecExpr * AddRec,SCEV::NoWrapFlags Flags)5517 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
5518 SCEV::NoWrapFlags Flags) {
5519 if (AddRec->getNoWrapFlags(Flags) != Flags) {
5520 AddRec->setNoWrapFlags(Flags);
5521 UnsignedRanges.erase(AddRec);
5522 SignedRanges.erase(AddRec);
5523 }
5524 }
5525
5526 /// Determine the range for a particular SCEV. If SignHint is
5527 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5528 /// with a "cleaner" unsigned (resp. signed) representation.
5529 const ConstantRange &
getRangeRef(const SCEV * S,ScalarEvolution::RangeSignHint SignHint)5530 ScalarEvolution::getRangeRef(const SCEV *S,
5531 ScalarEvolution::RangeSignHint SignHint) {
5532 DenseMap<const SCEV *, ConstantRange> &Cache =
5533 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
5534 : SignedRanges;
5535 ConstantRange::PreferredRangeType RangeType =
5536 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
5537 ? ConstantRange::Unsigned : ConstantRange::Signed;
5538
5539 // See if we've computed this range already.
5540 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
5541 if (I != Cache.end())
5542 return I->second;
5543
5544 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5545 return setRange(C, SignHint, ConstantRange(C->getAPInt()));
5546
5547 unsigned BitWidth = getTypeSizeInBits(S->getType());
5548 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
5549 using OBO = OverflowingBinaryOperator;
5550
5551 // If the value has known zeros, the maximum value will have those known zeros
5552 // as well.
5553 uint32_t TZ = GetMinTrailingZeros(S);
5554 if (TZ != 0) {
5555 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
5556 ConservativeResult =
5557 ConstantRange(APInt::getMinValue(BitWidth),
5558 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
5559 else
5560 ConservativeResult = ConstantRange(
5561 APInt::getSignedMinValue(BitWidth),
5562 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
5563 }
5564
5565 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
5566 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
5567 unsigned WrapType = OBO::AnyWrap;
5568 if (Add->hasNoSignedWrap())
5569 WrapType |= OBO::NoSignedWrap;
5570 if (Add->hasNoUnsignedWrap())
5571 WrapType |= OBO::NoUnsignedWrap;
5572 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
5573 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint),
5574 WrapType, RangeType);
5575 return setRange(Add, SignHint,
5576 ConservativeResult.intersectWith(X, RangeType));
5577 }
5578
5579 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
5580 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
5581 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
5582 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
5583 return setRange(Mul, SignHint,
5584 ConservativeResult.intersectWith(X, RangeType));
5585 }
5586
5587 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
5588 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
5589 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
5590 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
5591 return setRange(SMax, SignHint,
5592 ConservativeResult.intersectWith(X, RangeType));
5593 }
5594
5595 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
5596 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
5597 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
5598 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
5599 return setRange(UMax, SignHint,
5600 ConservativeResult.intersectWith(X, RangeType));
5601 }
5602
5603 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
5604 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
5605 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
5606 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
5607 return setRange(SMin, SignHint,
5608 ConservativeResult.intersectWith(X, RangeType));
5609 }
5610
5611 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
5612 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
5613 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
5614 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
5615 return setRange(UMin, SignHint,
5616 ConservativeResult.intersectWith(X, RangeType));
5617 }
5618
5619 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
5620 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
5621 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
5622 return setRange(UDiv, SignHint,
5623 ConservativeResult.intersectWith(X.udiv(Y), RangeType));
5624 }
5625
5626 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
5627 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
5628 return setRange(ZExt, SignHint,
5629 ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
5630 RangeType));
5631 }
5632
5633 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
5634 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
5635 return setRange(SExt, SignHint,
5636 ConservativeResult.intersectWith(X.signExtend(BitWidth),
5637 RangeType));
5638 }
5639
5640 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) {
5641 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint);
5642 return setRange(PtrToInt, SignHint, X);
5643 }
5644
5645 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
5646 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
5647 return setRange(Trunc, SignHint,
5648 ConservativeResult.intersectWith(X.truncate(BitWidth),
5649 RangeType));
5650 }
5651
5652 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
5653 // If there's no unsigned wrap, the value will never be less than its
5654 // initial value.
5655 if (AddRec->hasNoUnsignedWrap()) {
5656 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
5657 if (!UnsignedMinValue.isNullValue())
5658 ConservativeResult = ConservativeResult.intersectWith(
5659 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
5660 }
5661
5662 // If there's no signed wrap, and all the operands except initial value have
5663 // the same sign or zero, the value won't ever be:
5664 // 1: smaller than initial value if operands are non negative,
5665 // 2: bigger than initial value if operands are non positive.
5666 // For both cases, value can not cross signed min/max boundary.
5667 if (AddRec->hasNoSignedWrap()) {
5668 bool AllNonNeg = true;
5669 bool AllNonPos = true;
5670 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
5671 if (!isKnownNonNegative(AddRec->getOperand(i)))
5672 AllNonNeg = false;
5673 if (!isKnownNonPositive(AddRec->getOperand(i)))
5674 AllNonPos = false;
5675 }
5676 if (AllNonNeg)
5677 ConservativeResult = ConservativeResult.intersectWith(
5678 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()),
5679 APInt::getSignedMinValue(BitWidth)),
5680 RangeType);
5681 else if (AllNonPos)
5682 ConservativeResult = ConservativeResult.intersectWith(
5683 ConstantRange::getNonEmpty(
5684 APInt::getSignedMinValue(BitWidth),
5685 getSignedRangeMax(AddRec->getStart()) + 1),
5686 RangeType);
5687 }
5688
5689 // TODO: non-affine addrec
5690 if (AddRec->isAffine()) {
5691 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
5692 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
5693 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
5694 auto RangeFromAffine = getRangeForAffineAR(
5695 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
5696 BitWidth);
5697 ConservativeResult =
5698 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
5699
5700 auto RangeFromFactoring = getRangeViaFactoring(
5701 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
5702 BitWidth);
5703 ConservativeResult =
5704 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
5705 }
5706
5707 // Now try symbolic BE count and more powerful methods.
5708 if (UseExpensiveRangeSharpening) {
5709 const SCEV *SymbolicMaxBECount =
5710 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
5711 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
5712 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
5713 AddRec->hasNoSelfWrap()) {
5714 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
5715 AddRec, SymbolicMaxBECount, BitWidth, SignHint);
5716 ConservativeResult =
5717 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
5718 }
5719 }
5720 }
5721
5722 return setRange(AddRec, SignHint, std::move(ConservativeResult));
5723 }
5724
5725 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5726 // Check if the IR explicitly contains !range metadata.
5727 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
5728 if (MDRange.hasValue())
5729 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
5730 RangeType);
5731
5732 // Split here to avoid paying the compile-time cost of calling both
5733 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
5734 // if needed.
5735 const DataLayout &DL = getDataLayout();
5736 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
5737 // For a SCEVUnknown, ask ValueTracking.
5738 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
5739 if (Known.getBitWidth() != BitWidth)
5740 Known = Known.zextOrTrunc(BitWidth);
5741 // If Known does not result in full-set, intersect with it.
5742 if (Known.getMinValue() != Known.getMaxValue() + 1)
5743 ConservativeResult = ConservativeResult.intersectWith(
5744 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
5745 RangeType);
5746 } else {
5747 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
5748 "generalize as needed!");
5749 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
5750 // If the pointer size is larger than the index size type, this can cause
5751 // NS to be larger than BitWidth. So compensate for this.
5752 if (U->getType()->isPointerTy()) {
5753 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
5754 int ptrIdxDiff = ptrSize - BitWidth;
5755 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
5756 NS -= ptrIdxDiff;
5757 }
5758
5759 if (NS > 1)
5760 ConservativeResult = ConservativeResult.intersectWith(
5761 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
5762 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
5763 RangeType);
5764 }
5765
5766 // A range of Phi is a subset of union of all ranges of its input.
5767 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
5768 // Make sure that we do not run over cycled Phis.
5769 if (PendingPhiRanges.insert(Phi).second) {
5770 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
5771 for (auto &Op : Phi->operands()) {
5772 auto OpRange = getRangeRef(getSCEV(Op), SignHint);
5773 RangeFromOps = RangeFromOps.unionWith(OpRange);
5774 // No point to continue if we already have a full set.
5775 if (RangeFromOps.isFullSet())
5776 break;
5777 }
5778 ConservativeResult =
5779 ConservativeResult.intersectWith(RangeFromOps, RangeType);
5780 bool Erased = PendingPhiRanges.erase(Phi);
5781 assert(Erased && "Failed to erase Phi properly?");
5782 (void) Erased;
5783 }
5784 }
5785
5786 return setRange(U, SignHint, std::move(ConservativeResult));
5787 }
5788
5789 return setRange(S, SignHint, std::move(ConservativeResult));
5790 }
5791
5792 // Given a StartRange, Step and MaxBECount for an expression compute a range of
5793 // values that the expression can take. Initially, the expression has a value
5794 // from StartRange and then is changed by Step up to MaxBECount times. Signed
5795 // argument defines if we treat Step as signed or unsigned.
getRangeForAffineARHelper(APInt Step,const ConstantRange & StartRange,const APInt & MaxBECount,unsigned BitWidth,bool Signed)5796 static ConstantRange getRangeForAffineARHelper(APInt Step,
5797 const ConstantRange &StartRange,
5798 const APInt &MaxBECount,
5799 unsigned BitWidth, bool Signed) {
5800 // If either Step or MaxBECount is 0, then the expression won't change, and we
5801 // just need to return the initial range.
5802 if (Step == 0 || MaxBECount == 0)
5803 return StartRange;
5804
5805 // If we don't know anything about the initial value (i.e. StartRange is
5806 // FullRange), then we don't know anything about the final range either.
5807 // Return FullRange.
5808 if (StartRange.isFullSet())
5809 return ConstantRange::getFull(BitWidth);
5810
5811 // If Step is signed and negative, then we use its absolute value, but we also
5812 // note that we're moving in the opposite direction.
5813 bool Descending = Signed && Step.isNegative();
5814
5815 if (Signed)
5816 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
5817 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
5818 // This equations hold true due to the well-defined wrap-around behavior of
5819 // APInt.
5820 Step = Step.abs();
5821
5822 // Check if Offset is more than full span of BitWidth. If it is, the
5823 // expression is guaranteed to overflow.
5824 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
5825 return ConstantRange::getFull(BitWidth);
5826
5827 // Offset is by how much the expression can change. Checks above guarantee no
5828 // overflow here.
5829 APInt Offset = Step * MaxBECount;
5830
5831 // Minimum value of the final range will match the minimal value of StartRange
5832 // if the expression is increasing and will be decreased by Offset otherwise.
5833 // Maximum value of the final range will match the maximal value of StartRange
5834 // if the expression is decreasing and will be increased by Offset otherwise.
5835 APInt StartLower = StartRange.getLower();
5836 APInt StartUpper = StartRange.getUpper() - 1;
5837 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
5838 : (StartUpper + std::move(Offset));
5839
5840 // It's possible that the new minimum/maximum value will fall into the initial
5841 // range (due to wrap around). This means that the expression can take any
5842 // value in this bitwidth, and we have to return full range.
5843 if (StartRange.contains(MovedBoundary))
5844 return ConstantRange::getFull(BitWidth);
5845
5846 APInt NewLower =
5847 Descending ? std::move(MovedBoundary) : std::move(StartLower);
5848 APInt NewUpper =
5849 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
5850 NewUpper += 1;
5851
5852 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
5853 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
5854 }
5855
getRangeForAffineAR(const SCEV * Start,const SCEV * Step,const SCEV * MaxBECount,unsigned BitWidth)5856 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
5857 const SCEV *Step,
5858 const SCEV *MaxBECount,
5859 unsigned BitWidth) {
5860 assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
5861 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
5862 "Precondition!");
5863
5864 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
5865 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
5866
5867 // First, consider step signed.
5868 ConstantRange StartSRange = getSignedRange(Start);
5869 ConstantRange StepSRange = getSignedRange(Step);
5870
5871 // If Step can be both positive and negative, we need to find ranges for the
5872 // maximum absolute step values in both directions and union them.
5873 ConstantRange SR =
5874 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
5875 MaxBECountValue, BitWidth, /* Signed = */ true);
5876 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
5877 StartSRange, MaxBECountValue,
5878 BitWidth, /* Signed = */ true));
5879
5880 // Next, consider step unsigned.
5881 ConstantRange UR = getRangeForAffineARHelper(
5882 getUnsignedRangeMax(Step), getUnsignedRange(Start),
5883 MaxBECountValue, BitWidth, /* Signed = */ false);
5884
5885 // Finally, intersect signed and unsigned ranges.
5886 return SR.intersectWith(UR, ConstantRange::Smallest);
5887 }
5888
getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr * AddRec,const SCEV * MaxBECount,unsigned BitWidth,ScalarEvolution::RangeSignHint SignHint)5889 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
5890 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
5891 ScalarEvolution::RangeSignHint SignHint) {
5892 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
5893 assert(AddRec->hasNoSelfWrap() &&
5894 "This only works for non-self-wrapping AddRecs!");
5895 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
5896 const SCEV *Step = AddRec->getStepRecurrence(*this);
5897 // Only deal with constant step to save compile time.
5898 if (!isa<SCEVConstant>(Step))
5899 return ConstantRange::getFull(BitWidth);
5900 // Let's make sure that we can prove that we do not self-wrap during
5901 // MaxBECount iterations. We need this because MaxBECount is a maximum
5902 // iteration count estimate, and we might infer nw from some exit for which we
5903 // do not know max exit count (or any other side reasoning).
5904 // TODO: Turn into assert at some point.
5905 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
5906 const SCEV *RangeWidth = getMinusOne(AddRec->getType());
5907 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
5908 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
5909 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
5910 MaxItersWithoutWrap))
5911 return ConstantRange::getFull(BitWidth);
5912
5913 ICmpInst::Predicate LEPred =
5914 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
5915 ICmpInst::Predicate GEPred =
5916 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
5917 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
5918
5919 // We know that there is no self-wrap. Let's take Start and End values and
5920 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
5921 // the iteration. They either lie inside the range [Min(Start, End),
5922 // Max(Start, End)] or outside it:
5923 //
5924 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
5925 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
5926 //
5927 // No self wrap flag guarantees that the intermediate values cannot be BOTH
5928 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
5929 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
5930 // Start <= End and step is positive, or Start >= End and step is negative.
5931 const SCEV *Start = AddRec->getStart();
5932 ConstantRange StartRange = getRangeRef(Start, SignHint);
5933 ConstantRange EndRange = getRangeRef(End, SignHint);
5934 ConstantRange RangeBetween = StartRange.unionWith(EndRange);
5935 // If they already cover full iteration space, we will know nothing useful
5936 // even if we prove what we want to prove.
5937 if (RangeBetween.isFullSet())
5938 return RangeBetween;
5939 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
5940 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
5941 : RangeBetween.isWrappedSet();
5942 if (IsWrappedSet)
5943 return ConstantRange::getFull(BitWidth);
5944
5945 if (isKnownPositive(Step) &&
5946 isKnownPredicateViaConstantRanges(LEPred, Start, End))
5947 return RangeBetween;
5948 else if (isKnownNegative(Step) &&
5949 isKnownPredicateViaConstantRanges(GEPred, Start, End))
5950 return RangeBetween;
5951 return ConstantRange::getFull(BitWidth);
5952 }
5953
getRangeViaFactoring(const SCEV * Start,const SCEV * Step,const SCEV * MaxBECount,unsigned BitWidth)5954 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
5955 const SCEV *Step,
5956 const SCEV *MaxBECount,
5957 unsigned BitWidth) {
5958 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
5959 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
5960
5961 struct SelectPattern {
5962 Value *Condition = nullptr;
5963 APInt TrueValue;
5964 APInt FalseValue;
5965
5966 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
5967 const SCEV *S) {
5968 Optional<unsigned> CastOp;
5969 APInt Offset(BitWidth, 0);
5970
5971 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&
5972 "Should be!");
5973
5974 // Peel off a constant offset:
5975 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
5976 // In the future we could consider being smarter here and handle
5977 // {Start+Step,+,Step} too.
5978 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
5979 return;
5980
5981 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
5982 S = SA->getOperand(1);
5983 }
5984
5985 // Peel off a cast operation
5986 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
5987 CastOp = SCast->getSCEVType();
5988 S = SCast->getOperand();
5989 }
5990
5991 using namespace llvm::PatternMatch;
5992
5993 auto *SU = dyn_cast<SCEVUnknown>(S);
5994 const APInt *TrueVal, *FalseVal;
5995 if (!SU ||
5996 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
5997 m_APInt(FalseVal)))) {
5998 Condition = nullptr;
5999 return;
6000 }
6001
6002 TrueValue = *TrueVal;
6003 FalseValue = *FalseVal;
6004
6005 // Re-apply the cast we peeled off earlier
6006 if (CastOp.hasValue())
6007 switch (*CastOp) {
6008 default:
6009 llvm_unreachable("Unknown SCEV cast type!");
6010
6011 case scTruncate:
6012 TrueValue = TrueValue.trunc(BitWidth);
6013 FalseValue = FalseValue.trunc(BitWidth);
6014 break;
6015 case scZeroExtend:
6016 TrueValue = TrueValue.zext(BitWidth);
6017 FalseValue = FalseValue.zext(BitWidth);
6018 break;
6019 case scSignExtend:
6020 TrueValue = TrueValue.sext(BitWidth);
6021 FalseValue = FalseValue.sext(BitWidth);
6022 break;
6023 }
6024
6025 // Re-apply the constant offset we peeled off earlier
6026 TrueValue += Offset;
6027 FalseValue += Offset;
6028 }
6029
6030 bool isRecognized() { return Condition != nullptr; }
6031 };
6032
6033 SelectPattern StartPattern(*this, BitWidth, Start);
6034 if (!StartPattern.isRecognized())
6035 return ConstantRange::getFull(BitWidth);
6036
6037 SelectPattern StepPattern(*this, BitWidth, Step);
6038 if (!StepPattern.isRecognized())
6039 return ConstantRange::getFull(BitWidth);
6040
6041 if (StartPattern.Condition != StepPattern.Condition) {
6042 // We don't handle this case today; but we could, by considering four
6043 // possibilities below instead of two. I'm not sure if there are cases where
6044 // that will help over what getRange already does, though.
6045 return ConstantRange::getFull(BitWidth);
6046 }
6047
6048 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
6049 // construct arbitrary general SCEV expressions here. This function is called
6050 // from deep in the call stack, and calling getSCEV (on a sext instruction,
6051 // say) can end up caching a suboptimal value.
6052
6053 // FIXME: without the explicit `this` receiver below, MSVC errors out with
6054 // C2352 and C2512 (otherwise it isn't needed).
6055
6056 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
6057 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
6058 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
6059 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
6060
6061 ConstantRange TrueRange =
6062 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
6063 ConstantRange FalseRange =
6064 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
6065
6066 return TrueRange.unionWith(FalseRange);
6067 }
6068
getNoWrapFlagsFromUB(const Value * V)6069 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
6070 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
6071 const BinaryOperator *BinOp = cast<BinaryOperator>(V);
6072
6073 // Return early if there are no flags to propagate to the SCEV.
6074 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6075 if (BinOp->hasNoUnsignedWrap())
6076 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
6077 if (BinOp->hasNoSignedWrap())
6078 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
6079 if (Flags == SCEV::FlagAnyWrap)
6080 return SCEV::FlagAnyWrap;
6081
6082 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
6083 }
6084
isSCEVExprNeverPoison(const Instruction * I)6085 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
6086 // Here we check that I is in the header of the innermost loop containing I,
6087 // since we only deal with instructions in the loop header. The actual loop we
6088 // need to check later will come from an add recurrence, but getting that
6089 // requires computing the SCEV of the operands, which can be expensive. This
6090 // check we can do cheaply to rule out some cases early.
6091 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent());
6092 if (InnermostContainingLoop == nullptr ||
6093 InnermostContainingLoop->getHeader() != I->getParent())
6094 return false;
6095
6096 // Only proceed if we can prove that I does not yield poison.
6097 if (!programUndefinedIfPoison(I))
6098 return false;
6099
6100 // At this point we know that if I is executed, then it does not wrap
6101 // according to at least one of NSW or NUW. If I is not executed, then we do
6102 // not know if the calculation that I represents would wrap. Multiple
6103 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6104 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6105 // derived from other instructions that map to the same SCEV. We cannot make
6106 // that guarantee for cases where I is not executed. So we need to find the
6107 // loop that I is considered in relation to and prove that I is executed for
6108 // every iteration of that loop. That implies that the value that I
6109 // calculates does not wrap anywhere in the loop, so then we can apply the
6110 // flags to the SCEV.
6111 //
6112 // We check isLoopInvariant to disambiguate in case we are adding recurrences
6113 // from different loops, so that we know which loop to prove that I is
6114 // executed in.
6115 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) {
6116 // I could be an extractvalue from a call to an overflow intrinsic.
6117 // TODO: We can do better here in some cases.
6118 if (!isSCEVable(I->getOperand(OpIndex)->getType()))
6119 return false;
6120 const SCEV *Op = getSCEV(I->getOperand(OpIndex));
6121 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
6122 bool AllOtherOpsLoopInvariant = true;
6123 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands();
6124 ++OtherOpIndex) {
6125 if (OtherOpIndex != OpIndex) {
6126 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex));
6127 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) {
6128 AllOtherOpsLoopInvariant = false;
6129 break;
6130 }
6131 }
6132 }
6133 if (AllOtherOpsLoopInvariant &&
6134 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop()))
6135 return true;
6136 }
6137 }
6138 return false;
6139 }
6140
isAddRecNeverPoison(const Instruction * I,const Loop * L)6141 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
6142 // If we know that \c I can never be poison period, then that's enough.
6143 if (isSCEVExprNeverPoison(I))
6144 return true;
6145
6146 // For an add recurrence specifically, we assume that infinite loops without
6147 // side effects are undefined behavior, and then reason as follows:
6148 //
6149 // If the add recurrence is poison in any iteration, it is poison on all
6150 // future iterations (since incrementing poison yields poison). If the result
6151 // of the add recurrence is fed into the loop latch condition and the loop
6152 // does not contain any throws or exiting blocks other than the latch, we now
6153 // have the ability to "choose" whether the backedge is taken or not (by
6154 // choosing a sufficiently evil value for the poison feeding into the branch)
6155 // for every iteration including and after the one in which \p I first became
6156 // poison. There are two possibilities (let's call the iteration in which \p
6157 // I first became poison as K):
6158 //
6159 // 1. In the set of iterations including and after K, the loop body executes
6160 // no side effects. In this case executing the backege an infinte number
6161 // of times will yield undefined behavior.
6162 //
6163 // 2. In the set of iterations including and after K, the loop body executes
6164 // at least one side effect. In this case, that specific instance of side
6165 // effect is control dependent on poison, which also yields undefined
6166 // behavior.
6167
6168 auto *ExitingBB = L->getExitingBlock();
6169 auto *LatchBB = L->getLoopLatch();
6170 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6171 return false;
6172
6173 SmallPtrSet<const Instruction *, 16> Pushed;
6174 SmallVector<const Instruction *, 8> PoisonStack;
6175
6176 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6177 // things that are known to be poison under that assumption go on the
6178 // PoisonStack.
6179 Pushed.insert(I);
6180 PoisonStack.push_back(I);
6181
6182 bool LatchControlDependentOnPoison = false;
6183 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
6184 const Instruction *Poison = PoisonStack.pop_back_val();
6185
6186 for (auto *PoisonUser : Poison->users()) {
6187 if (propagatesPoison(cast<Operator>(PoisonUser))) {
6188 if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
6189 PoisonStack.push_back(cast<Instruction>(PoisonUser));
6190 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6191 assert(BI->isConditional() && "Only possibility!");
6192 if (BI->getParent() == LatchBB) {
6193 LatchControlDependentOnPoison = true;
6194 break;
6195 }
6196 }
6197 }
6198 }
6199
6200 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6201 }
6202
6203 ScalarEvolution::LoopProperties
getLoopProperties(const Loop * L)6204 ScalarEvolution::getLoopProperties(const Loop *L) {
6205 using LoopProperties = ScalarEvolution::LoopProperties;
6206
6207 auto Itr = LoopPropertiesCache.find(L);
6208 if (Itr == LoopPropertiesCache.end()) {
6209 auto HasSideEffects = [](Instruction *I) {
6210 if (auto *SI = dyn_cast<StoreInst>(I))
6211 return !SI->isSimple();
6212
6213 return I->mayHaveSideEffects();
6214 };
6215
6216 LoopProperties LP = {/* HasNoAbnormalExits */ true,
6217 /*HasNoSideEffects*/ true};
6218
6219 for (auto *BB : L->getBlocks())
6220 for (auto &I : *BB) {
6221 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6222 LP.HasNoAbnormalExits = false;
6223 if (HasSideEffects(&I))
6224 LP.HasNoSideEffects = false;
6225 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
6226 break; // We're already as pessimistic as we can get.
6227 }
6228
6229 auto InsertPair = LoopPropertiesCache.insert({L, LP});
6230 assert(InsertPair.second && "We just checked!");
6231 Itr = InsertPair.first;
6232 }
6233
6234 return Itr->second;
6235 }
6236
createSCEV(Value * V)6237 const SCEV *ScalarEvolution::createSCEV(Value *V) {
6238 if (!isSCEVable(V->getType()))
6239 return getUnknown(V);
6240
6241 if (Instruction *I = dyn_cast<Instruction>(V)) {
6242 // Don't attempt to analyze instructions in blocks that aren't
6243 // reachable. Such instructions don't matter, and they aren't required
6244 // to obey basic rules for definitions dominating uses which this
6245 // analysis depends on.
6246 if (!DT.isReachableFromEntry(I->getParent()))
6247 return getUnknown(UndefValue::get(V->getType()));
6248 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
6249 return getConstant(CI);
6250 else if (isa<ConstantPointerNull>(V))
6251 // FIXME: we shouldn't special-case null pointer constant.
6252 return getZero(V->getType());
6253 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
6254 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
6255 else if (!isa<ConstantExpr>(V))
6256 return getUnknown(V);
6257
6258 Operator *U = cast<Operator>(V);
6259 if (auto BO = MatchBinaryOp(U, DT)) {
6260 switch (BO->Opcode) {
6261 case Instruction::Add: {
6262 // The simple thing to do would be to just call getSCEV on both operands
6263 // and call getAddExpr with the result. However if we're looking at a
6264 // bunch of things all added together, this can be quite inefficient,
6265 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6266 // Instead, gather up all the operands and make a single getAddExpr call.
6267 // LLVM IR canonical form means we need only traverse the left operands.
6268 SmallVector<const SCEV *, 4> AddOps;
6269 do {
6270 if (BO->Op) {
6271 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6272 AddOps.push_back(OpSCEV);
6273 break;
6274 }
6275
6276 // If a NUW or NSW flag can be applied to the SCEV for this
6277 // addition, then compute the SCEV for this addition by itself
6278 // with a separate call to getAddExpr. We need to do that
6279 // instead of pushing the operands of the addition onto AddOps,
6280 // since the flags are only known to apply to this particular
6281 // addition - they may not apply to other additions that can be
6282 // formed with operands from AddOps.
6283 const SCEV *RHS = getSCEV(BO->RHS);
6284 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6285 if (Flags != SCEV::FlagAnyWrap) {
6286 const SCEV *LHS = getSCEV(BO->LHS);
6287 if (BO->Opcode == Instruction::Sub)
6288 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
6289 else
6290 AddOps.push_back(getAddExpr(LHS, RHS, Flags));
6291 break;
6292 }
6293 }
6294
6295 if (BO->Opcode == Instruction::Sub)
6296 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
6297 else
6298 AddOps.push_back(getSCEV(BO->RHS));
6299
6300 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6301 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
6302 NewBO->Opcode != Instruction::Sub)) {
6303 AddOps.push_back(getSCEV(BO->LHS));
6304 break;
6305 }
6306 BO = NewBO;
6307 } while (true);
6308
6309 return getAddExpr(AddOps);
6310 }
6311
6312 case Instruction::Mul: {
6313 SmallVector<const SCEV *, 4> MulOps;
6314 do {
6315 if (BO->Op) {
6316 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6317 MulOps.push_back(OpSCEV);
6318 break;
6319 }
6320
6321 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6322 if (Flags != SCEV::FlagAnyWrap) {
6323 MulOps.push_back(
6324 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
6325 break;
6326 }
6327 }
6328
6329 MulOps.push_back(getSCEV(BO->RHS));
6330 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6331 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
6332 MulOps.push_back(getSCEV(BO->LHS));
6333 break;
6334 }
6335 BO = NewBO;
6336 } while (true);
6337
6338 return getMulExpr(MulOps);
6339 }
6340 case Instruction::UDiv:
6341 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6342 case Instruction::URem:
6343 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6344 case Instruction::Sub: {
6345 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6346 if (BO->Op)
6347 Flags = getNoWrapFlagsFromUB(BO->Op);
6348 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
6349 }
6350 case Instruction::And:
6351 // For an expression like x&255 that merely masks off the high bits,
6352 // use zext(trunc(x)) as the SCEV expression.
6353 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6354 if (CI->isZero())
6355 return getSCEV(BO->RHS);
6356 if (CI->isMinusOne())
6357 return getSCEV(BO->LHS);
6358 const APInt &A = CI->getValue();
6359
6360 // Instcombine's ShrinkDemandedConstant may strip bits out of
6361 // constants, obscuring what would otherwise be a low-bits mask.
6362 // Use computeKnownBits to compute what ShrinkDemandedConstant
6363 // knew about to reconstruct a low-bits mask value.
6364 unsigned LZ = A.countLeadingZeros();
6365 unsigned TZ = A.countTrailingZeros();
6366 unsigned BitWidth = A.getBitWidth();
6367 KnownBits Known(BitWidth);
6368 computeKnownBits(BO->LHS, Known, getDataLayout(),
6369 0, &AC, nullptr, &DT);
6370
6371 APInt EffectiveMask =
6372 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
6373 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
6374 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
6375 const SCEV *LHS = getSCEV(BO->LHS);
6376 const SCEV *ShiftedLHS = nullptr;
6377 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
6378 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
6379 // For an expression like (x * 8) & 8, simplify the multiply.
6380 unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
6381 unsigned GCD = std::min(MulZeros, TZ);
6382 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
6383 SmallVector<const SCEV*, 4> MulOps;
6384 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
6385 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
6386 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
6387 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
6388 }
6389 }
6390 if (!ShiftedLHS)
6391 ShiftedLHS = getUDivExpr(LHS, MulCount);
6392 return getMulExpr(
6393 getZeroExtendExpr(
6394 getTruncateExpr(ShiftedLHS,
6395 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
6396 BO->LHS->getType()),
6397 MulCount);
6398 }
6399 }
6400 break;
6401
6402 case Instruction::Or:
6403 // If the RHS of the Or is a constant, we may have something like:
6404 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6405 // optimizations will transparently handle this case.
6406 //
6407 // In order for this transformation to be safe, the LHS must be of the
6408 // form X*(2^n) and the Or constant must be less than 2^n.
6409 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6410 const SCEV *LHS = getSCEV(BO->LHS);
6411 const APInt &CIVal = CI->getValue();
6412 if (GetMinTrailingZeros(LHS) >=
6413 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
6414 // Build a plain add SCEV.
6415 return getAddExpr(LHS, getSCEV(CI),
6416 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
6417 }
6418 }
6419 break;
6420
6421 case Instruction::Xor:
6422 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6423 // If the RHS of xor is -1, then this is a not operation.
6424 if (CI->isMinusOne())
6425 return getNotSCEV(getSCEV(BO->LHS));
6426
6427 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6428 // This is a variant of the check for xor with -1, and it handles
6429 // the case where instcombine has trimmed non-demanded bits out
6430 // of an xor with -1.
6431 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
6432 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
6433 if (LBO->getOpcode() == Instruction::And &&
6434 LCI->getValue() == CI->getValue())
6435 if (const SCEVZeroExtendExpr *Z =
6436 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
6437 Type *UTy = BO->LHS->getType();
6438 const SCEV *Z0 = Z->getOperand();
6439 Type *Z0Ty = Z0->getType();
6440 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
6441
6442 // If C is a low-bits mask, the zero extend is serving to
6443 // mask off the high bits. Complement the operand and
6444 // re-apply the zext.
6445 if (CI->getValue().isMask(Z0TySize))
6446 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
6447
6448 // If C is a single bit, it may be in the sign-bit position
6449 // before the zero-extend. In this case, represent the xor
6450 // using an add, which is equivalent, and re-apply the zext.
6451 APInt Trunc = CI->getValue().trunc(Z0TySize);
6452 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
6453 Trunc.isSignMask())
6454 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
6455 UTy);
6456 }
6457 }
6458 break;
6459
6460 case Instruction::Shl:
6461 // Turn shift left of a constant amount into a multiply.
6462 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
6463 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
6464
6465 // If the shift count is not less than the bitwidth, the result of
6466 // the shift is undefined. Don't try to analyze it, because the
6467 // resolution chosen here may differ from the resolution chosen in
6468 // other parts of the compiler.
6469 if (SA->getValue().uge(BitWidth))
6470 break;
6471
6472 // We can safely preserve the nuw flag in all cases. It's also safe to
6473 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
6474 // requires special handling. It can be preserved as long as we're not
6475 // left shifting by bitwidth - 1.
6476 auto Flags = SCEV::FlagAnyWrap;
6477 if (BO->Op) {
6478 auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
6479 if ((MulFlags & SCEV::FlagNSW) &&
6480 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1)))
6481 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW);
6482 if (MulFlags & SCEV::FlagNUW)
6483 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW);
6484 }
6485
6486 Constant *X = ConstantInt::get(
6487 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
6488 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
6489 }
6490 break;
6491
6492 case Instruction::AShr: {
6493 // AShr X, C, where C is a constant.
6494 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
6495 if (!CI)
6496 break;
6497
6498 Type *OuterTy = BO->LHS->getType();
6499 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
6500 // If the shift count is not less than the bitwidth, the result of
6501 // the shift is undefined. Don't try to analyze it, because the
6502 // resolution chosen here may differ from the resolution chosen in
6503 // other parts of the compiler.
6504 if (CI->getValue().uge(BitWidth))
6505 break;
6506
6507 if (CI->isZero())
6508 return getSCEV(BO->LHS); // shift by zero --> noop
6509
6510 uint64_t AShrAmt = CI->getZExtValue();
6511 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
6512
6513 Operator *L = dyn_cast<Operator>(BO->LHS);
6514 if (L && L->getOpcode() == Instruction::Shl) {
6515 // X = Shl A, n
6516 // Y = AShr X, m
6517 // Both n and m are constant.
6518
6519 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
6520 if (L->getOperand(1) == BO->RHS)
6521 // For a two-shift sext-inreg, i.e. n = m,
6522 // use sext(trunc(x)) as the SCEV expression.
6523 return getSignExtendExpr(
6524 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
6525
6526 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
6527 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
6528 uint64_t ShlAmt = ShlAmtCI->getZExtValue();
6529 if (ShlAmt > AShrAmt) {
6530 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
6531 // expression. We already checked that ShlAmt < BitWidth, so
6532 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
6533 // ShlAmt - AShrAmt < Amt.
6534 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
6535 ShlAmt - AShrAmt);
6536 return getSignExtendExpr(
6537 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
6538 getConstant(Mul)), OuterTy);
6539 }
6540 }
6541 }
6542 if (BO->IsExact) {
6543 // Given exact arithmetic in-bounds right-shift by a constant,
6544 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x)
6545 const SCEV *X = getSCEV(BO->LHS);
6546 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false);
6547 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt);
6548 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult));
6549 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW);
6550 }
6551 break;
6552 }
6553 }
6554 }
6555
6556 switch (U->getOpcode()) {
6557 case Instruction::Trunc:
6558 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
6559
6560 case Instruction::ZExt:
6561 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
6562
6563 case Instruction::SExt:
6564 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
6565 // The NSW flag of a subtract does not always survive the conversion to
6566 // A + (-1)*B. By pushing sign extension onto its operands we are much
6567 // more likely to preserve NSW and allow later AddRec optimisations.
6568 //
6569 // NOTE: This is effectively duplicating this logic from getSignExtend:
6570 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
6571 // but by that point the NSW information has potentially been lost.
6572 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
6573 Type *Ty = U->getType();
6574 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
6575 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
6576 return getMinusSCEV(V1, V2, SCEV::FlagNSW);
6577 }
6578 }
6579 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
6580
6581 case Instruction::BitCast:
6582 // BitCasts are no-op casts so we just eliminate the cast.
6583 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
6584 return getSCEV(U->getOperand(0));
6585 break;
6586
6587 case Instruction::PtrToInt: {
6588 // Pointer to integer cast is straight-forward, so do model it.
6589 Value *Ptr = U->getOperand(0);
6590 const SCEV *Op = getSCEV(Ptr);
6591 Type *DstIntTy = U->getType();
6592 // SCEV doesn't have constant pointer expression type, but it supports
6593 // nullptr constant (and only that one), which is modelled in SCEV as a
6594 // zero integer constant. So just skip the ptrtoint cast for constants.
6595 if (isa<SCEVConstant>(Op))
6596 return getTruncateOrZeroExtend(Op, DstIntTy);
6597 Type *PtrTy = Ptr->getType();
6598 Type *IntPtrTy = getDataLayout().getIntPtrType(PtrTy);
6599 // But only if effective SCEV (integer) type is wide enough to represent
6600 // all possible pointer values.
6601 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(PtrTy)) !=
6602 getDataLayout().getTypeSizeInBits(IntPtrTy))
6603 return getUnknown(V);
6604 return getPtrToIntExpr(Op, DstIntTy);
6605 }
6606 case Instruction::IntToPtr:
6607 // Just don't deal with inttoptr casts.
6608 return getUnknown(V);
6609
6610 case Instruction::SDiv:
6611 // If both operands are non-negative, this is just an udiv.
6612 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
6613 isKnownNonNegative(getSCEV(U->getOperand(1))))
6614 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
6615 break;
6616
6617 case Instruction::SRem:
6618 // If both operands are non-negative, this is just an urem.
6619 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
6620 isKnownNonNegative(getSCEV(U->getOperand(1))))
6621 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
6622 break;
6623
6624 case Instruction::GetElementPtr:
6625 return createNodeForGEP(cast<GEPOperator>(U));
6626
6627 case Instruction::PHI:
6628 return createNodeForPHI(cast<PHINode>(U));
6629
6630 case Instruction::Select:
6631 // U can also be a select constant expr, which let fall through. Since
6632 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
6633 // constant expressions cannot have instructions as operands, we'd have
6634 // returned getUnknown for a select constant expressions anyway.
6635 if (isa<Instruction>(U))
6636 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
6637 U->getOperand(1), U->getOperand(2));
6638 break;
6639
6640 case Instruction::Call:
6641 case Instruction::Invoke:
6642 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
6643 return getSCEV(RV);
6644
6645 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
6646 switch (II->getIntrinsicID()) {
6647 case Intrinsic::abs:
6648 return getAbsExpr(
6649 getSCEV(II->getArgOperand(0)),
6650 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
6651 case Intrinsic::umax:
6652 return getUMaxExpr(getSCEV(II->getArgOperand(0)),
6653 getSCEV(II->getArgOperand(1)));
6654 case Intrinsic::umin:
6655 return getUMinExpr(getSCEV(II->getArgOperand(0)),
6656 getSCEV(II->getArgOperand(1)));
6657 case Intrinsic::smax:
6658 return getSMaxExpr(getSCEV(II->getArgOperand(0)),
6659 getSCEV(II->getArgOperand(1)));
6660 case Intrinsic::smin:
6661 return getSMinExpr(getSCEV(II->getArgOperand(0)),
6662 getSCEV(II->getArgOperand(1)));
6663 case Intrinsic::usub_sat: {
6664 const SCEV *X = getSCEV(II->getArgOperand(0));
6665 const SCEV *Y = getSCEV(II->getArgOperand(1));
6666 const SCEV *ClampedY = getUMinExpr(X, Y);
6667 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
6668 }
6669 case Intrinsic::uadd_sat: {
6670 const SCEV *X = getSCEV(II->getArgOperand(0));
6671 const SCEV *Y = getSCEV(II->getArgOperand(1));
6672 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
6673 return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
6674 }
6675 case Intrinsic::start_loop_iterations:
6676 // A start_loop_iterations is just equivalent to the first operand for
6677 // SCEV purposes.
6678 return getSCEV(II->getArgOperand(0));
6679 default:
6680 break;
6681 }
6682 }
6683 break;
6684 }
6685
6686 return getUnknown(V);
6687 }
6688
6689 //===----------------------------------------------------------------------===//
6690 // Iteration Count Computation Code
6691 //
6692
getConstantTripCount(const SCEVConstant * ExitCount)6693 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
6694 if (!ExitCount)
6695 return 0;
6696
6697 ConstantInt *ExitConst = ExitCount->getValue();
6698
6699 // Guard against huge trip counts.
6700 if (ExitConst->getValue().getActiveBits() > 32)
6701 return 0;
6702
6703 // In case of integer overflow, this returns 0, which is correct.
6704 return ((unsigned)ExitConst->getZExtValue()) + 1;
6705 }
6706
getSmallConstantTripCount(const Loop * L)6707 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
6708 if (BasicBlock *ExitingBB = L->getExitingBlock())
6709 return getSmallConstantTripCount(L, ExitingBB);
6710
6711 // No trip count information for multiple exits.
6712 return 0;
6713 }
6714
6715 unsigned
getSmallConstantTripCount(const Loop * L,const BasicBlock * ExitingBlock)6716 ScalarEvolution::getSmallConstantTripCount(const Loop *L,
6717 const BasicBlock *ExitingBlock) {
6718 assert(ExitingBlock && "Must pass a non-null exiting block!");
6719 assert(L->isLoopExiting(ExitingBlock) &&
6720 "Exiting block must actually branch out of the loop!");
6721 const SCEVConstant *ExitCount =
6722 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
6723 return getConstantTripCount(ExitCount);
6724 }
6725
getSmallConstantMaxTripCount(const Loop * L)6726 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
6727 const auto *MaxExitCount =
6728 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
6729 return getConstantTripCount(MaxExitCount);
6730 }
6731
getSmallConstantTripMultiple(const Loop * L)6732 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
6733 if (BasicBlock *ExitingBB = L->getExitingBlock())
6734 return getSmallConstantTripMultiple(L, ExitingBB);
6735
6736 // No trip multiple information for multiple exits.
6737 return 0;
6738 }
6739
6740 /// Returns the largest constant divisor of the trip count of this loop as a
6741 /// normal unsigned value, if possible. This means that the actual trip count is
6742 /// always a multiple of the returned value (don't forget the trip count could
6743 /// very well be zero as well!).
6744 ///
6745 /// Returns 1 if the trip count is unknown or not guaranteed to be the
6746 /// multiple of a constant (which is also the case if the trip count is simply
6747 /// constant, use getSmallConstantTripCount for that case), Will also return 1
6748 /// if the trip count is very large (>= 2^32).
6749 ///
6750 /// As explained in the comments for getSmallConstantTripCount, this assumes
6751 /// that control exits the loop via ExitingBlock.
6752 unsigned
getSmallConstantTripMultiple(const Loop * L,const BasicBlock * ExitingBlock)6753 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
6754 const BasicBlock *ExitingBlock) {
6755 assert(ExitingBlock && "Must pass a non-null exiting block!");
6756 assert(L->isLoopExiting(ExitingBlock) &&
6757 "Exiting block must actually branch out of the loop!");
6758 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
6759 if (ExitCount == getCouldNotCompute())
6760 return 1;
6761
6762 // Get the trip count from the BE count by adding 1.
6763 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType()));
6764
6765 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
6766 if (!TC)
6767 // Attempt to factor more general cases. Returns the greatest power of
6768 // two divisor. If overflow happens, the trip count expression is still
6769 // divisible by the greatest power of 2 divisor returned.
6770 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr));
6771
6772 ConstantInt *Result = TC->getValue();
6773
6774 // Guard against huge trip counts (this requires checking
6775 // for zero to handle the case where the trip count == -1 and the
6776 // addition wraps).
6777 if (!Result || Result->getValue().getActiveBits() > 32 ||
6778 Result->getValue().getActiveBits() == 0)
6779 return 1;
6780
6781 return (unsigned)Result->getZExtValue();
6782 }
6783
getExitCount(const Loop * L,const BasicBlock * ExitingBlock,ExitCountKind Kind)6784 const SCEV *ScalarEvolution::getExitCount(const Loop *L,
6785 const BasicBlock *ExitingBlock,
6786 ExitCountKind Kind) {
6787 switch (Kind) {
6788 case Exact:
6789 case SymbolicMaximum:
6790 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
6791 case ConstantMaximum:
6792 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
6793 };
6794 llvm_unreachable("Invalid ExitCountKind!");
6795 }
6796
6797 const SCEV *
getPredicatedBackedgeTakenCount(const Loop * L,SCEVUnionPredicate & Preds)6798 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
6799 SCEVUnionPredicate &Preds) {
6800 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
6801 }
6802
getBackedgeTakenCount(const Loop * L,ExitCountKind Kind)6803 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
6804 ExitCountKind Kind) {
6805 switch (Kind) {
6806 case Exact:
6807 return getBackedgeTakenInfo(L).getExact(L, this);
6808 case ConstantMaximum:
6809 return getBackedgeTakenInfo(L).getConstantMax(this);
6810 case SymbolicMaximum:
6811 return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
6812 };
6813 llvm_unreachable("Invalid ExitCountKind!");
6814 }
6815
isBackedgeTakenCountMaxOrZero(const Loop * L)6816 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
6817 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
6818 }
6819
6820 /// Push PHI nodes in the header of the given loop onto the given Worklist.
6821 static void
PushLoopPHIs(const Loop * L,SmallVectorImpl<Instruction * > & Worklist)6822 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
6823 BasicBlock *Header = L->getHeader();
6824
6825 // Push all Loop-header PHIs onto the Worklist stack.
6826 for (PHINode &PN : Header->phis())
6827 Worklist.push_back(&PN);
6828 }
6829
6830 const ScalarEvolution::BackedgeTakenInfo &
getPredicatedBackedgeTakenInfo(const Loop * L)6831 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
6832 auto &BTI = getBackedgeTakenInfo(L);
6833 if (BTI.hasFullInfo())
6834 return BTI;
6835
6836 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
6837
6838 if (!Pair.second)
6839 return Pair.first->second;
6840
6841 BackedgeTakenInfo Result =
6842 computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
6843
6844 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
6845 }
6846
6847 ScalarEvolution::BackedgeTakenInfo &
getBackedgeTakenInfo(const Loop * L)6848 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
6849 // Initially insert an invalid entry for this loop. If the insertion
6850 // succeeds, proceed to actually compute a backedge-taken count and
6851 // update the value. The temporary CouldNotCompute value tells SCEV
6852 // code elsewhere that it shouldn't attempt to request a new
6853 // backedge-taken count, which could result in infinite recursion.
6854 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
6855 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
6856 if (!Pair.second)
6857 return Pair.first->second;
6858
6859 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
6860 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
6861 // must be cleared in this scope.
6862 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
6863
6864 // In product build, there are no usage of statistic.
6865 (void)NumTripCountsComputed;
6866 (void)NumTripCountsNotComputed;
6867 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
6868 const SCEV *BEExact = Result.getExact(L, this);
6869 if (BEExact != getCouldNotCompute()) {
6870 assert(isLoopInvariant(BEExact, L) &&
6871 isLoopInvariant(Result.getConstantMax(this), L) &&
6872 "Computed backedge-taken count isn't loop invariant for loop!");
6873 ++NumTripCountsComputed;
6874 } else if (Result.getConstantMax(this) == getCouldNotCompute() &&
6875 isa<PHINode>(L->getHeader()->begin())) {
6876 // Only count loops that have phi nodes as not being computable.
6877 ++NumTripCountsNotComputed;
6878 }
6879 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
6880
6881 // Now that we know more about the trip count for this loop, forget any
6882 // existing SCEV values for PHI nodes in this loop since they are only
6883 // conservative estimates made without the benefit of trip count
6884 // information. This is similar to the code in forgetLoop, except that
6885 // it handles SCEVUnknown PHI nodes specially.
6886 if (Result.hasAnyInfo()) {
6887 SmallVector<Instruction *, 16> Worklist;
6888 PushLoopPHIs(L, Worklist);
6889
6890 SmallPtrSet<Instruction *, 8> Discovered;
6891 while (!Worklist.empty()) {
6892 Instruction *I = Worklist.pop_back_val();
6893
6894 ValueExprMapType::iterator It =
6895 ValueExprMap.find_as(static_cast<Value *>(I));
6896 if (It != ValueExprMap.end()) {
6897 const SCEV *Old = It->second;
6898
6899 // SCEVUnknown for a PHI either means that it has an unrecognized
6900 // structure, or it's a PHI that's in the progress of being computed
6901 // by createNodeForPHI. In the former case, additional loop trip
6902 // count information isn't going to change anything. In the later
6903 // case, createNodeForPHI will perform the necessary updates on its
6904 // own when it gets to that point.
6905 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
6906 eraseValueFromMap(It->first);
6907 forgetMemoizedResults(Old);
6908 }
6909 if (PHINode *PN = dyn_cast<PHINode>(I))
6910 ConstantEvolutionLoopExitValue.erase(PN);
6911 }
6912
6913 // Since we don't need to invalidate anything for correctness and we're
6914 // only invalidating to make SCEV's results more precise, we get to stop
6915 // early to avoid invalidating too much. This is especially important in
6916 // cases like:
6917 //
6918 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
6919 // loop0:
6920 // %pn0 = phi
6921 // ...
6922 // loop1:
6923 // %pn1 = phi
6924 // ...
6925 //
6926 // where both loop0 and loop1's backedge taken count uses the SCEV
6927 // expression for %v. If we don't have the early stop below then in cases
6928 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
6929 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
6930 // count for loop1, effectively nullifying SCEV's trip count cache.
6931 for (auto *U : I->users())
6932 if (auto *I = dyn_cast<Instruction>(U)) {
6933 auto *LoopForUser = LI.getLoopFor(I->getParent());
6934 if (LoopForUser && L->contains(LoopForUser) &&
6935 Discovered.insert(I).second)
6936 Worklist.push_back(I);
6937 }
6938 }
6939 }
6940
6941 // Re-lookup the insert position, since the call to
6942 // computeBackedgeTakenCount above could result in a
6943 // recusive call to getBackedgeTakenInfo (on a different
6944 // loop), which would invalidate the iterator computed
6945 // earlier.
6946 return BackedgeTakenCounts.find(L)->second = std::move(Result);
6947 }
6948
forgetAllLoops()6949 void ScalarEvolution::forgetAllLoops() {
6950 // This method is intended to forget all info about loops. It should
6951 // invalidate caches as if the following happened:
6952 // - The trip counts of all loops have changed arbitrarily
6953 // - Every llvm::Value has been updated in place to produce a different
6954 // result.
6955 BackedgeTakenCounts.clear();
6956 PredicatedBackedgeTakenCounts.clear();
6957 LoopPropertiesCache.clear();
6958 ConstantEvolutionLoopExitValue.clear();
6959 ValueExprMap.clear();
6960 ValuesAtScopes.clear();
6961 LoopDispositions.clear();
6962 BlockDispositions.clear();
6963 UnsignedRanges.clear();
6964 SignedRanges.clear();
6965 ExprValueMap.clear();
6966 HasRecMap.clear();
6967 MinTrailingZerosCache.clear();
6968 PredicatedSCEVRewrites.clear();
6969 }
6970
forgetLoop(const Loop * L)6971 void ScalarEvolution::forgetLoop(const Loop *L) {
6972 // Drop any stored trip count value.
6973 auto RemoveLoopFromBackedgeMap =
6974 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) {
6975 auto BTCPos = Map.find(L);
6976 if (BTCPos != Map.end()) {
6977 BTCPos->second.clear();
6978 Map.erase(BTCPos);
6979 }
6980 };
6981
6982 SmallVector<const Loop *, 16> LoopWorklist(1, L);
6983 SmallVector<Instruction *, 32> Worklist;
6984 SmallPtrSet<Instruction *, 16> Visited;
6985
6986 // Iterate over all the loops and sub-loops to drop SCEV information.
6987 while (!LoopWorklist.empty()) {
6988 auto *CurrL = LoopWorklist.pop_back_val();
6989
6990 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL);
6991 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL);
6992
6993 // Drop information about predicated SCEV rewrites for this loop.
6994 for (auto I = PredicatedSCEVRewrites.begin();
6995 I != PredicatedSCEVRewrites.end();) {
6996 std::pair<const SCEV *, const Loop *> Entry = I->first;
6997 if (Entry.second == CurrL)
6998 PredicatedSCEVRewrites.erase(I++);
6999 else
7000 ++I;
7001 }
7002
7003 auto LoopUsersItr = LoopUsers.find(CurrL);
7004 if (LoopUsersItr != LoopUsers.end()) {
7005 for (auto *S : LoopUsersItr->second)
7006 forgetMemoizedResults(S);
7007 LoopUsers.erase(LoopUsersItr);
7008 }
7009
7010 // Drop information about expressions based on loop-header PHIs.
7011 PushLoopPHIs(CurrL, Worklist);
7012
7013 while (!Worklist.empty()) {
7014 Instruction *I = Worklist.pop_back_val();
7015 if (!Visited.insert(I).second)
7016 continue;
7017
7018 ValueExprMapType::iterator It =
7019 ValueExprMap.find_as(static_cast<Value *>(I));
7020 if (It != ValueExprMap.end()) {
7021 eraseValueFromMap(It->first);
7022 forgetMemoizedResults(It->second);
7023 if (PHINode *PN = dyn_cast<PHINode>(I))
7024 ConstantEvolutionLoopExitValue.erase(PN);
7025 }
7026
7027 PushDefUseChildren(I, Worklist);
7028 }
7029
7030 LoopPropertiesCache.erase(CurrL);
7031 // Forget all contained loops too, to avoid dangling entries in the
7032 // ValuesAtScopes map.
7033 LoopWorklist.append(CurrL->begin(), CurrL->end());
7034 }
7035 }
7036
forgetTopmostLoop(const Loop * L)7037 void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
7038 while (Loop *Parent = L->getParentLoop())
7039 L = Parent;
7040 forgetLoop(L);
7041 }
7042
forgetValue(Value * V)7043 void ScalarEvolution::forgetValue(Value *V) {
7044 Instruction *I = dyn_cast<Instruction>(V);
7045 if (!I) return;
7046
7047 // Drop information about expressions based on loop-header PHIs.
7048 SmallVector<Instruction *, 16> Worklist;
7049 Worklist.push_back(I);
7050
7051 SmallPtrSet<Instruction *, 8> Visited;
7052 while (!Worklist.empty()) {
7053 I = Worklist.pop_back_val();
7054 if (!Visited.insert(I).second)
7055 continue;
7056
7057 ValueExprMapType::iterator It =
7058 ValueExprMap.find_as(static_cast<Value *>(I));
7059 if (It != ValueExprMap.end()) {
7060 eraseValueFromMap(It->first);
7061 forgetMemoizedResults(It->second);
7062 if (PHINode *PN = dyn_cast<PHINode>(I))
7063 ConstantEvolutionLoopExitValue.erase(PN);
7064 }
7065
7066 PushDefUseChildren(I, Worklist);
7067 }
7068 }
7069
forgetLoopDispositions(const Loop * L)7070 void ScalarEvolution::forgetLoopDispositions(const Loop *L) {
7071 LoopDispositions.clear();
7072 }
7073
7074 /// Get the exact loop backedge taken count considering all loop exits. A
7075 /// computable result can only be returned for loops with all exiting blocks
7076 /// dominating the latch. howFarToZero assumes that the limit of each loop test
7077 /// is never skipped. This is a valid assumption as long as the loop exits via
7078 /// that test. For precise results, it is the caller's responsibility to specify
7079 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
7080 const SCEV *
getExact(const Loop * L,ScalarEvolution * SE,SCEVUnionPredicate * Preds) const7081 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
7082 SCEVUnionPredicate *Preds) const {
7083 // If any exits were not computable, the loop is not computable.
7084 if (!isComplete() || ExitNotTaken.empty())
7085 return SE->getCouldNotCompute();
7086
7087 const BasicBlock *Latch = L->getLoopLatch();
7088 // All exiting blocks we have collected must dominate the only backedge.
7089 if (!Latch)
7090 return SE->getCouldNotCompute();
7091
7092 // All exiting blocks we have gathered dominate loop's latch, so exact trip
7093 // count is simply a minimum out of all these calculated exit counts.
7094 SmallVector<const SCEV *, 2> Ops;
7095 for (auto &ENT : ExitNotTaken) {
7096 const SCEV *BECount = ENT.ExactNotTaken;
7097 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
7098 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
7099 "We should only have known counts for exiting blocks that dominate "
7100 "latch!");
7101
7102 Ops.push_back(BECount);
7103
7104 if (Preds && !ENT.hasAlwaysTruePredicate())
7105 Preds->add(ENT.Predicate.get());
7106
7107 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
7108 "Predicate should be always true!");
7109 }
7110
7111 return SE->getUMinFromMismatchedTypes(Ops);
7112 }
7113
7114 /// Get the exact not taken count for this loop exit.
7115 const SCEV *
getExact(const BasicBlock * ExitingBlock,ScalarEvolution * SE) const7116 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
7117 ScalarEvolution *SE) const {
7118 for (auto &ENT : ExitNotTaken)
7119 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7120 return ENT.ExactNotTaken;
7121
7122 return SE->getCouldNotCompute();
7123 }
7124
getConstantMax(const BasicBlock * ExitingBlock,ScalarEvolution * SE) const7125 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
7126 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
7127 for (auto &ENT : ExitNotTaken)
7128 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7129 return ENT.MaxNotTaken;
7130
7131 return SE->getCouldNotCompute();
7132 }
7133
7134 /// getConstantMax - Get the constant max backedge taken count for the loop.
7135 const SCEV *
getConstantMax(ScalarEvolution * SE) const7136 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
7137 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7138 return !ENT.hasAlwaysTruePredicate();
7139 };
7140
7141 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax())
7142 return SE->getCouldNotCompute();
7143
7144 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
7145 isa<SCEVConstant>(getConstantMax())) &&
7146 "No point in having a non-constant max backedge taken count!");
7147 return getConstantMax();
7148 }
7149
7150 const SCEV *
getSymbolicMax(const Loop * L,ScalarEvolution * SE)7151 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
7152 ScalarEvolution *SE) {
7153 if (!SymbolicMax)
7154 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
7155 return SymbolicMax;
7156 }
7157
isConstantMaxOrZero(ScalarEvolution * SE) const7158 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
7159 ScalarEvolution *SE) const {
7160 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7161 return !ENT.hasAlwaysTruePredicate();
7162 };
7163 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
7164 }
7165
hasOperand(const SCEV * S,ScalarEvolution * SE) const7166 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
7167 ScalarEvolution *SE) const {
7168 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() &&
7169 SE->hasOperand(getConstantMax(), S))
7170 return true;
7171
7172 for (auto &ENT : ExitNotTaken)
7173 if (ENT.ExactNotTaken != SE->getCouldNotCompute() &&
7174 SE->hasOperand(ENT.ExactNotTaken, S))
7175 return true;
7176
7177 return false;
7178 }
7179
ExitLimit(const SCEV * E)7180 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
7181 : ExactNotTaken(E), MaxNotTaken(E) {
7182 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7183 isa<SCEVConstant>(MaxNotTaken)) &&
7184 "No point in having a non-constant max backedge taken count!");
7185 }
7186
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero,ArrayRef<const SmallPtrSetImpl<const SCEVPredicate * > * > PredSetList)7187 ScalarEvolution::ExitLimit::ExitLimit(
7188 const SCEV *E, const SCEV *M, bool MaxOrZero,
7189 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
7190 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
7191 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
7192 !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
7193 "Exact is not allowed to be less precise than Max");
7194 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7195 isa<SCEVConstant>(MaxNotTaken)) &&
7196 "No point in having a non-constant max backedge taken count!");
7197 for (auto *PredSet : PredSetList)
7198 for (auto *P : *PredSet)
7199 addPredicate(P);
7200 }
7201
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero,const SmallPtrSetImpl<const SCEVPredicate * > & PredSet)7202 ScalarEvolution::ExitLimit::ExitLimit(
7203 const SCEV *E, const SCEV *M, bool MaxOrZero,
7204 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
7205 : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
7206 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7207 isa<SCEVConstant>(MaxNotTaken)) &&
7208 "No point in having a non-constant max backedge taken count!");
7209 }
7210
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero)7211 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
7212 bool MaxOrZero)
7213 : ExitLimit(E, M, MaxOrZero, None) {
7214 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7215 isa<SCEVConstant>(MaxNotTaken)) &&
7216 "No point in having a non-constant max backedge taken count!");
7217 }
7218
7219 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
7220 /// computable exit into a persistent ExitNotTakenInfo array.
BackedgeTakenInfo(ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,bool IsComplete,const SCEV * ConstantMax,bool MaxOrZero)7221 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
7222 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
7223 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
7224 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
7225 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7226
7227 ExitNotTaken.reserve(ExitCounts.size());
7228 std::transform(
7229 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
7230 [&](const EdgeExitInfo &EEI) {
7231 BasicBlock *ExitBB = EEI.first;
7232 const ExitLimit &EL = EEI.second;
7233 if (EL.Predicates.empty())
7234 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7235 nullptr);
7236
7237 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate);
7238 for (auto *Pred : EL.Predicates)
7239 Predicate->add(Pred);
7240
7241 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7242 std::move(Predicate));
7243 });
7244 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
7245 isa<SCEVConstant>(ConstantMax)) &&
7246 "No point in having a non-constant max backedge taken count!");
7247 }
7248
7249 /// Invalidate this result and free the ExitNotTakenInfo array.
clear()7250 void ScalarEvolution::BackedgeTakenInfo::clear() {
7251 ExitNotTaken.clear();
7252 }
7253
7254 /// Compute the number of times the backedge of the specified loop will execute.
7255 ScalarEvolution::BackedgeTakenInfo
computeBackedgeTakenCount(const Loop * L,bool AllowPredicates)7256 ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
7257 bool AllowPredicates) {
7258 SmallVector<BasicBlock *, 8> ExitingBlocks;
7259 L->getExitingBlocks(ExitingBlocks);
7260
7261 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7262
7263 SmallVector<EdgeExitInfo, 4> ExitCounts;
7264 bool CouldComputeBECount = true;
7265 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
7266 const SCEV *MustExitMaxBECount = nullptr;
7267 const SCEV *MayExitMaxBECount = nullptr;
7268 bool MustExitMaxOrZero = false;
7269
7270 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7271 // and compute maxBECount.
7272 // Do a union of all the predicates here.
7273 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
7274 BasicBlock *ExitBB = ExitingBlocks[i];
7275
7276 // We canonicalize untaken exits to br (constant), ignore them so that
7277 // proving an exit untaken doesn't negatively impact our ability to reason
7278 // about the loop as whole.
7279 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
7280 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
7281 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7282 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne()))
7283 continue;
7284 }
7285
7286 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
7287
7288 assert((AllowPredicates || EL.Predicates.empty()) &&
7289 "Predicated exit limit when predicates are not allowed!");
7290
7291 // 1. For each exit that can be computed, add an entry to ExitCounts.
7292 // CouldComputeBECount is true only if all exits can be computed.
7293 if (EL.ExactNotTaken == getCouldNotCompute())
7294 // We couldn't compute an exact value for this exit, so
7295 // we won't be able to compute an exact value for the loop.
7296 CouldComputeBECount = false;
7297 else
7298 ExitCounts.emplace_back(ExitBB, EL);
7299
7300 // 2. Derive the loop's MaxBECount from each exit's max number of
7301 // non-exiting iterations. Partition the loop exits into two kinds:
7302 // LoopMustExits and LoopMayExits.
7303 //
7304 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7305 // is a LoopMayExit. If any computable LoopMustExit is found, then
7306 // MaxBECount is the minimum EL.MaxNotTaken of computable
7307 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7308 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7309 // computable EL.MaxNotTaken.
7310 if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
7311 DT.dominates(ExitBB, Latch)) {
7312 if (!MustExitMaxBECount) {
7313 MustExitMaxBECount = EL.MaxNotTaken;
7314 MustExitMaxOrZero = EL.MaxOrZero;
7315 } else {
7316 MustExitMaxBECount =
7317 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
7318 }
7319 } else if (MayExitMaxBECount != getCouldNotCompute()) {
7320 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
7321 MayExitMaxBECount = EL.MaxNotTaken;
7322 else {
7323 MayExitMaxBECount =
7324 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
7325 }
7326 }
7327 }
7328 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
7329 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
7330 // The loop backedge will be taken the maximum or zero times if there's
7331 // a single exit that must be taken the maximum or zero times.
7332 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
7333 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
7334 MaxBECount, MaxOrZero);
7335 }
7336
7337 ScalarEvolution::ExitLimit
computeExitLimit(const Loop * L,BasicBlock * ExitingBlock,bool AllowPredicates)7338 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
7339 bool AllowPredicates) {
7340 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
7341 // If our exiting block does not dominate the latch, then its connection with
7342 // loop's exit limit may be far from trivial.
7343 const BasicBlock *Latch = L->getLoopLatch();
7344 if (!Latch || !DT.dominates(ExitingBlock, Latch))
7345 return getCouldNotCompute();
7346
7347 bool IsOnlyExit = (L->getExitingBlock() != nullptr);
7348 Instruction *Term = ExitingBlock->getTerminator();
7349 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
7350 assert(BI->isConditional() && "If unconditional, it can't be in loop!");
7351 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7352 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
7353 "It should have one successor in loop and one exit block!");
7354 // Proceed to the next level to examine the exit condition expression.
7355 return computeExitLimitFromCond(
7356 L, BI->getCondition(), ExitIfTrue,
7357 /*ControlsExit=*/IsOnlyExit, AllowPredicates);
7358 }
7359
7360 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
7361 // For switch, make sure that there is a single exit from the loop.
7362 BasicBlock *Exit = nullptr;
7363 for (auto *SBB : successors(ExitingBlock))
7364 if (!L->contains(SBB)) {
7365 if (Exit) // Multiple exit successors.
7366 return getCouldNotCompute();
7367 Exit = SBB;
7368 }
7369 assert(Exit && "Exiting block must have at least one exit");
7370 return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
7371 /*ControlsExit=*/IsOnlyExit);
7372 }
7373
7374 return getCouldNotCompute();
7375 }
7376
computeExitLimitFromCond(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7377 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
7378 const Loop *L, Value *ExitCond, bool ExitIfTrue,
7379 bool ControlsExit, bool AllowPredicates) {
7380 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
7381 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
7382 ControlsExit, AllowPredicates);
7383 }
7384
7385 Optional<ScalarEvolution::ExitLimit>
find(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7386 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
7387 bool ExitIfTrue, bool ControlsExit,
7388 bool AllowPredicates) {
7389 (void)this->L;
7390 (void)this->ExitIfTrue;
7391 (void)this->AllowPredicates;
7392
7393 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7394 this->AllowPredicates == AllowPredicates &&
7395 "Variance in assumed invariant key components!");
7396 auto Itr = TripCountMap.find({ExitCond, ControlsExit});
7397 if (Itr == TripCountMap.end())
7398 return None;
7399 return Itr->second;
7400 }
7401
insert(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates,const ExitLimit & EL)7402 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
7403 bool ExitIfTrue,
7404 bool ControlsExit,
7405 bool AllowPredicates,
7406 const ExitLimit &EL) {
7407 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7408 this->AllowPredicates == AllowPredicates &&
7409 "Variance in assumed invariant key components!");
7410
7411 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
7412 assert(InsertResult.second && "Expected successful insertion!");
7413 (void)InsertResult;
7414 (void)ExitIfTrue;
7415 }
7416
computeExitLimitFromCondCached(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7417 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
7418 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7419 bool ControlsExit, bool AllowPredicates) {
7420
7421 if (auto MaybeEL =
7422 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7423 return *MaybeEL;
7424
7425 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
7426 ControlsExit, AllowPredicates);
7427 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
7428 return EL;
7429 }
7430
computeExitLimitFromCondImpl(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7431 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
7432 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7433 bool ControlsExit, bool AllowPredicates) {
7434 // Check if the controlling expression for this loop is an And or Or.
7435 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
7436 if (BO->getOpcode() == Instruction::And) {
7437 // Recurse on the operands of the and.
7438 bool EitherMayExit = !ExitIfTrue;
7439 ExitLimit EL0 = computeExitLimitFromCondCached(
7440 Cache, L, BO->getOperand(0), ExitIfTrue,
7441 ControlsExit && !EitherMayExit, AllowPredicates);
7442 ExitLimit EL1 = computeExitLimitFromCondCached(
7443 Cache, L, BO->getOperand(1), ExitIfTrue,
7444 ControlsExit && !EitherMayExit, AllowPredicates);
7445 // Be robust against unsimplified IR for the form "and i1 X, true"
7446 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1)))
7447 return CI->isOne() ? EL0 : EL1;
7448 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0)))
7449 return CI->isOne() ? EL1 : EL0;
7450 const SCEV *BECount = getCouldNotCompute();
7451 const SCEV *MaxBECount = getCouldNotCompute();
7452 if (EitherMayExit) {
7453 // Both conditions must be true for the loop to continue executing.
7454 // Choose the less conservative count.
7455 if (EL0.ExactNotTaken == getCouldNotCompute() ||
7456 EL1.ExactNotTaken == getCouldNotCompute())
7457 BECount = getCouldNotCompute();
7458 else
7459 BECount =
7460 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7461 if (EL0.MaxNotTaken == getCouldNotCompute())
7462 MaxBECount = EL1.MaxNotTaken;
7463 else if (EL1.MaxNotTaken == getCouldNotCompute())
7464 MaxBECount = EL0.MaxNotTaken;
7465 else
7466 MaxBECount =
7467 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
7468 } else {
7469 // Both conditions must be true at the same time for the loop to exit.
7470 // For now, be conservative.
7471 if (EL0.MaxNotTaken == EL1.MaxNotTaken)
7472 MaxBECount = EL0.MaxNotTaken;
7473 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
7474 BECount = EL0.ExactNotTaken;
7475 }
7476
7477 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7478 // to be more aggressive when computing BECount than when computing
7479 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7480 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7481 // to not.
7482 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
7483 !isa<SCEVCouldNotCompute>(BECount))
7484 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
7485
7486 return ExitLimit(BECount, MaxBECount, false,
7487 {&EL0.Predicates, &EL1.Predicates});
7488 }
7489 if (BO->getOpcode() == Instruction::Or) {
7490 // Recurse on the operands of the or.
7491 bool EitherMayExit = ExitIfTrue;
7492 ExitLimit EL0 = computeExitLimitFromCondCached(
7493 Cache, L, BO->getOperand(0), ExitIfTrue,
7494 ControlsExit && !EitherMayExit, AllowPredicates);
7495 ExitLimit EL1 = computeExitLimitFromCondCached(
7496 Cache, L, BO->getOperand(1), ExitIfTrue,
7497 ControlsExit && !EitherMayExit, AllowPredicates);
7498 // Be robust against unsimplified IR for the form "or i1 X, true"
7499 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1)))
7500 return CI->isZero() ? EL0 : EL1;
7501 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0)))
7502 return CI->isZero() ? EL1 : EL0;
7503 const SCEV *BECount = getCouldNotCompute();
7504 const SCEV *MaxBECount = getCouldNotCompute();
7505 if (EitherMayExit) {
7506 // Both conditions must be false for the loop to continue executing.
7507 // Choose the less conservative count.
7508 if (EL0.ExactNotTaken == getCouldNotCompute() ||
7509 EL1.ExactNotTaken == getCouldNotCompute())
7510 BECount = getCouldNotCompute();
7511 else
7512 BECount =
7513 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7514 if (EL0.MaxNotTaken == getCouldNotCompute())
7515 MaxBECount = EL1.MaxNotTaken;
7516 else if (EL1.MaxNotTaken == getCouldNotCompute())
7517 MaxBECount = EL0.MaxNotTaken;
7518 else
7519 MaxBECount =
7520 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
7521 } else {
7522 // Both conditions must be false at the same time for the loop to exit.
7523 // For now, be conservative.
7524 if (EL0.MaxNotTaken == EL1.MaxNotTaken)
7525 MaxBECount = EL0.MaxNotTaken;
7526 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
7527 BECount = EL0.ExactNotTaken;
7528 }
7529 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7530 // to be more aggressive when computing BECount than when computing
7531 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7532 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7533 // to not.
7534 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
7535 !isa<SCEVCouldNotCompute>(BECount))
7536 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
7537
7538 return ExitLimit(BECount, MaxBECount, false,
7539 {&EL0.Predicates, &EL1.Predicates});
7540 }
7541 }
7542
7543 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7544 // Proceed to the next level to examine the icmp.
7545 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
7546 ExitLimit EL =
7547 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
7548 if (EL.hasFullInfo() || !AllowPredicates)
7549 return EL;
7550
7551 // Try again, but use SCEV predicates this time.
7552 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
7553 /*AllowPredicates=*/true);
7554 }
7555
7556 // Check for a constant condition. These are normally stripped out by
7557 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7558 // preserve the CFG and is temporarily leaving constant conditions
7559 // in place.
7560 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
7561 if (ExitIfTrue == !CI->getZExtValue())
7562 // The backedge is always taken.
7563 return getCouldNotCompute();
7564 else
7565 // The backedge is never taken.
7566 return getZero(CI->getType());
7567 }
7568
7569 // If it's not an integer or pointer comparison then compute it the hard way.
7570 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7571 }
7572
7573 ScalarEvolution::ExitLimit
computeExitLimitFromICmp(const Loop * L,ICmpInst * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7574 ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
7575 ICmpInst *ExitCond,
7576 bool ExitIfTrue,
7577 bool ControlsExit,
7578 bool AllowPredicates) {
7579 // If the condition was exit on true, convert the condition to exit on false
7580 ICmpInst::Predicate Pred;
7581 if (!ExitIfTrue)
7582 Pred = ExitCond->getPredicate();
7583 else
7584 Pred = ExitCond->getInversePredicate();
7585 const ICmpInst::Predicate OriginalPred = Pred;
7586
7587 // Handle common loops like: for (X = "string"; *X; ++X)
7588 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
7589 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
7590 ExitLimit ItCnt =
7591 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
7592 if (ItCnt.hasAnyInfo())
7593 return ItCnt;
7594 }
7595
7596 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
7597 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
7598
7599 // Try to evaluate any dependencies out of the loop.
7600 LHS = getSCEVAtScope(LHS, L);
7601 RHS = getSCEVAtScope(RHS, L);
7602
7603 // At this point, we would like to compute how many iterations of the
7604 // loop the predicate will return true for these inputs.
7605 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
7606 // If there is a loop-invariant, force it into the RHS.
7607 std::swap(LHS, RHS);
7608 Pred = ICmpInst::getSwappedPredicate(Pred);
7609 }
7610
7611 // Simplify the operands before analyzing them.
7612 (void)SimplifyICmpOperands(Pred, LHS, RHS);
7613
7614 // If we have a comparison of a chrec against a constant, try to use value
7615 // ranges to answer this query.
7616 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
7617 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
7618 if (AddRec->getLoop() == L) {
7619 // Form the constant range.
7620 ConstantRange CompRange =
7621 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
7622
7623 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
7624 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
7625 }
7626
7627 switch (Pred) {
7628 case ICmpInst::ICMP_NE: { // while (X != Y)
7629 // Convert to: while (X-Y != 0)
7630 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
7631 AllowPredicates);
7632 if (EL.hasAnyInfo()) return EL;
7633 break;
7634 }
7635 case ICmpInst::ICMP_EQ: { // while (X == Y)
7636 // Convert to: while (X-Y == 0)
7637 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
7638 if (EL.hasAnyInfo()) return EL;
7639 break;
7640 }
7641 case ICmpInst::ICMP_SLT:
7642 case ICmpInst::ICMP_ULT: { // while (X < Y)
7643 bool IsSigned = Pred == ICmpInst::ICMP_SLT;
7644 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
7645 AllowPredicates);
7646 if (EL.hasAnyInfo()) return EL;
7647 break;
7648 }
7649 case ICmpInst::ICMP_SGT:
7650 case ICmpInst::ICMP_UGT: { // while (X > Y)
7651 bool IsSigned = Pred == ICmpInst::ICMP_SGT;
7652 ExitLimit EL =
7653 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
7654 AllowPredicates);
7655 if (EL.hasAnyInfo()) return EL;
7656 break;
7657 }
7658 default:
7659 break;
7660 }
7661
7662 auto *ExhaustiveCount =
7663 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7664
7665 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
7666 return ExhaustiveCount;
7667
7668 return computeShiftCompareExitLimit(ExitCond->getOperand(0),
7669 ExitCond->getOperand(1), L, OriginalPred);
7670 }
7671
7672 ScalarEvolution::ExitLimit
computeExitLimitFromSingleExitSwitch(const Loop * L,SwitchInst * Switch,BasicBlock * ExitingBlock,bool ControlsExit)7673 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
7674 SwitchInst *Switch,
7675 BasicBlock *ExitingBlock,
7676 bool ControlsExit) {
7677 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
7678
7679 // Give up if the exit is the default dest of a switch.
7680 if (Switch->getDefaultDest() == ExitingBlock)
7681 return getCouldNotCompute();
7682
7683 assert(L->contains(Switch->getDefaultDest()) &&
7684 "Default case must not exit the loop!");
7685 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
7686 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
7687
7688 // while (X != Y) --> while (X-Y != 0)
7689 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
7690 if (EL.hasAnyInfo())
7691 return EL;
7692
7693 return getCouldNotCompute();
7694 }
7695
7696 static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr * AddRec,ConstantInt * C,ScalarEvolution & SE)7697 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
7698 ScalarEvolution &SE) {
7699 const SCEV *InVal = SE.getConstant(C);
7700 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
7701 assert(isa<SCEVConstant>(Val) &&
7702 "Evaluation of SCEV at constant didn't fold correctly?");
7703 return cast<SCEVConstant>(Val)->getValue();
7704 }
7705
7706 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
7707 /// compute the backedge execution count.
7708 ScalarEvolution::ExitLimit
computeLoadConstantCompareExitLimit(LoadInst * LI,Constant * RHS,const Loop * L,ICmpInst::Predicate predicate)7709 ScalarEvolution::computeLoadConstantCompareExitLimit(
7710 LoadInst *LI,
7711 Constant *RHS,
7712 const Loop *L,
7713 ICmpInst::Predicate predicate) {
7714 if (LI->isVolatile()) return getCouldNotCompute();
7715
7716 // Check to see if the loaded pointer is a getelementptr of a global.
7717 // TODO: Use SCEV instead of manually grubbing with GEPs.
7718 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
7719 if (!GEP) return getCouldNotCompute();
7720
7721 // Make sure that it is really a constant global we are gepping, with an
7722 // initializer, and make sure the first IDX is really 0.
7723 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
7724 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
7725 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
7726 !cast<Constant>(GEP->getOperand(1))->isNullValue())
7727 return getCouldNotCompute();
7728
7729 // Okay, we allow one non-constant index into the GEP instruction.
7730 Value *VarIdx = nullptr;
7731 std::vector<Constant*> Indexes;
7732 unsigned VarIdxNum = 0;
7733 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
7734 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
7735 Indexes.push_back(CI);
7736 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
7737 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
7738 VarIdx = GEP->getOperand(i);
7739 VarIdxNum = i-2;
7740 Indexes.push_back(nullptr);
7741 }
7742
7743 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
7744 if (!VarIdx)
7745 return getCouldNotCompute();
7746
7747 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
7748 // Check to see if X is a loop variant variable value now.
7749 const SCEV *Idx = getSCEV(VarIdx);
7750 Idx = getSCEVAtScope(Idx, L);
7751
7752 // We can only recognize very limited forms of loop index expressions, in
7753 // particular, only affine AddRec's like {C1,+,C2}.
7754 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
7755 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
7756 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
7757 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
7758 return getCouldNotCompute();
7759
7760 unsigned MaxSteps = MaxBruteForceIterations;
7761 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
7762 ConstantInt *ItCst = ConstantInt::get(
7763 cast<IntegerType>(IdxExpr->getType()), IterationNum);
7764 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
7765
7766 // Form the GEP offset.
7767 Indexes[VarIdxNum] = Val;
7768
7769 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
7770 Indexes);
7771 if (!Result) break; // Cannot compute!
7772
7773 // Evaluate the condition for this iteration.
7774 Result = ConstantExpr::getICmp(predicate, Result, RHS);
7775 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
7776 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
7777 ++NumArrayLenItCounts;
7778 return getConstant(ItCst); // Found terminating iteration!
7779 }
7780 }
7781 return getCouldNotCompute();
7782 }
7783
computeShiftCompareExitLimit(Value * LHS,Value * RHSV,const Loop * L,ICmpInst::Predicate Pred)7784 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
7785 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
7786 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
7787 if (!RHS)
7788 return getCouldNotCompute();
7789
7790 const BasicBlock *Latch = L->getLoopLatch();
7791 if (!Latch)
7792 return getCouldNotCompute();
7793
7794 const BasicBlock *Predecessor = L->getLoopPredecessor();
7795 if (!Predecessor)
7796 return getCouldNotCompute();
7797
7798 // Return true if V is of the form "LHS `shift_op` <positive constant>".
7799 // Return LHS in OutLHS and shift_opt in OutOpCode.
7800 auto MatchPositiveShift =
7801 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
7802
7803 using namespace PatternMatch;
7804
7805 ConstantInt *ShiftAmt;
7806 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
7807 OutOpCode = Instruction::LShr;
7808 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
7809 OutOpCode = Instruction::AShr;
7810 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
7811 OutOpCode = Instruction::Shl;
7812 else
7813 return false;
7814
7815 return ShiftAmt->getValue().isStrictlyPositive();
7816 };
7817
7818 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
7819 //
7820 // loop:
7821 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
7822 // %iv.shifted = lshr i32 %iv, <positive constant>
7823 //
7824 // Return true on a successful match. Return the corresponding PHI node (%iv
7825 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
7826 auto MatchShiftRecurrence =
7827 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
7828 Optional<Instruction::BinaryOps> PostShiftOpCode;
7829
7830 {
7831 Instruction::BinaryOps OpC;
7832 Value *V;
7833
7834 // If we encounter a shift instruction, "peel off" the shift operation,
7835 // and remember that we did so. Later when we inspect %iv's backedge
7836 // value, we will make sure that the backedge value uses the same
7837 // operation.
7838 //
7839 // Note: the peeled shift operation does not have to be the same
7840 // instruction as the one feeding into the PHI's backedge value. We only
7841 // really care about it being the same *kind* of shift instruction --
7842 // that's all that is required for our later inferences to hold.
7843 if (MatchPositiveShift(LHS, V, OpC)) {
7844 PostShiftOpCode = OpC;
7845 LHS = V;
7846 }
7847 }
7848
7849 PNOut = dyn_cast<PHINode>(LHS);
7850 if (!PNOut || PNOut->getParent() != L->getHeader())
7851 return false;
7852
7853 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
7854 Value *OpLHS;
7855
7856 return
7857 // The backedge value for the PHI node must be a shift by a positive
7858 // amount
7859 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
7860
7861 // of the PHI node itself
7862 OpLHS == PNOut &&
7863
7864 // and the kind of shift should be match the kind of shift we peeled
7865 // off, if any.
7866 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
7867 };
7868
7869 PHINode *PN;
7870 Instruction::BinaryOps OpCode;
7871 if (!MatchShiftRecurrence(LHS, PN, OpCode))
7872 return getCouldNotCompute();
7873
7874 const DataLayout &DL = getDataLayout();
7875
7876 // The key rationale for this optimization is that for some kinds of shift
7877 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
7878 // within a finite number of iterations. If the condition guarding the
7879 // backedge (in the sense that the backedge is taken if the condition is true)
7880 // is false for the value the shift recurrence stabilizes to, then we know
7881 // that the backedge is taken only a finite number of times.
7882
7883 ConstantInt *StableValue = nullptr;
7884 switch (OpCode) {
7885 default:
7886 llvm_unreachable("Impossible case!");
7887
7888 case Instruction::AShr: {
7889 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
7890 // bitwidth(K) iterations.
7891 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
7892 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr,
7893 Predecessor->getTerminator(), &DT);
7894 auto *Ty = cast<IntegerType>(RHS->getType());
7895 if (Known.isNonNegative())
7896 StableValue = ConstantInt::get(Ty, 0);
7897 else if (Known.isNegative())
7898 StableValue = ConstantInt::get(Ty, -1, true);
7899 else
7900 return getCouldNotCompute();
7901
7902 break;
7903 }
7904 case Instruction::LShr:
7905 case Instruction::Shl:
7906 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
7907 // stabilize to 0 in at most bitwidth(K) iterations.
7908 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
7909 break;
7910 }
7911
7912 auto *Result =
7913 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
7914 assert(Result->getType()->isIntegerTy(1) &&
7915 "Otherwise cannot be an operand to a branch instruction");
7916
7917 if (Result->isZeroValue()) {
7918 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
7919 const SCEV *UpperBound =
7920 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
7921 return ExitLimit(getCouldNotCompute(), UpperBound, false);
7922 }
7923
7924 return getCouldNotCompute();
7925 }
7926
7927 /// Return true if we can constant fold an instruction of the specified type,
7928 /// assuming that all operands were constants.
CanConstantFold(const Instruction * I)7929 static bool CanConstantFold(const Instruction *I) {
7930 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
7931 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
7932 isa<LoadInst>(I) || isa<ExtractValueInst>(I))
7933 return true;
7934
7935 if (const CallInst *CI = dyn_cast<CallInst>(I))
7936 if (const Function *F = CI->getCalledFunction())
7937 return canConstantFoldCallTo(CI, F);
7938 return false;
7939 }
7940
7941 /// Determine whether this instruction can constant evolve within this loop
7942 /// assuming its operands can all constant evolve.
canConstantEvolve(Instruction * I,const Loop * L)7943 static bool canConstantEvolve(Instruction *I, const Loop *L) {
7944 // An instruction outside of the loop can't be derived from a loop PHI.
7945 if (!L->contains(I)) return false;
7946
7947 if (isa<PHINode>(I)) {
7948 // We don't currently keep track of the control flow needed to evaluate
7949 // PHIs, so we cannot handle PHIs inside of loops.
7950 return L->getHeader() == I->getParent();
7951 }
7952
7953 // If we won't be able to constant fold this expression even if the operands
7954 // are constants, bail early.
7955 return CanConstantFold(I);
7956 }
7957
7958 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
7959 /// recursing through each instruction operand until reaching a loop header phi.
7960 static PHINode *
getConstantEvolvingPHIOperands(Instruction * UseInst,const Loop * L,DenseMap<Instruction *,PHINode * > & PHIMap,unsigned Depth)7961 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
7962 DenseMap<Instruction *, PHINode *> &PHIMap,
7963 unsigned Depth) {
7964 if (Depth > MaxConstantEvolvingDepth)
7965 return nullptr;
7966
7967 // Otherwise, we can evaluate this instruction if all of its operands are
7968 // constant or derived from a PHI node themselves.
7969 PHINode *PHI = nullptr;
7970 for (Value *Op : UseInst->operands()) {
7971 if (isa<Constant>(Op)) continue;
7972
7973 Instruction *OpInst = dyn_cast<Instruction>(Op);
7974 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
7975
7976 PHINode *P = dyn_cast<PHINode>(OpInst);
7977 if (!P)
7978 // If this operand is already visited, reuse the prior result.
7979 // We may have P != PHI if this is the deepest point at which the
7980 // inconsistent paths meet.
7981 P = PHIMap.lookup(OpInst);
7982 if (!P) {
7983 // Recurse and memoize the results, whether a phi is found or not.
7984 // This recursive call invalidates pointers into PHIMap.
7985 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
7986 PHIMap[OpInst] = P;
7987 }
7988 if (!P)
7989 return nullptr; // Not evolving from PHI
7990 if (PHI && PHI != P)
7991 return nullptr; // Evolving from multiple different PHIs.
7992 PHI = P;
7993 }
7994 // This is a expression evolving from a constant PHI!
7995 return PHI;
7996 }
7997
7998 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
7999 /// in the loop that V is derived from. We allow arbitrary operations along the
8000 /// way, but the operands of an operation must either be constants or a value
8001 /// derived from a constant PHI. If this expression does not fit with these
8002 /// constraints, return null.
getConstantEvolvingPHI(Value * V,const Loop * L)8003 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
8004 Instruction *I = dyn_cast<Instruction>(V);
8005 if (!I || !canConstantEvolve(I, L)) return nullptr;
8006
8007 if (PHINode *PN = dyn_cast<PHINode>(I))
8008 return PN;
8009
8010 // Record non-constant instructions contained by the loop.
8011 DenseMap<Instruction *, PHINode *> PHIMap;
8012 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
8013 }
8014
8015 /// EvaluateExpression - Given an expression that passes the
8016 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
8017 /// in the loop has the value PHIVal. If we can't fold this expression for some
8018 /// reason, return null.
EvaluateExpression(Value * V,const Loop * L,DenseMap<Instruction *,Constant * > & Vals,const DataLayout & DL,const TargetLibraryInfo * TLI)8019 static Constant *EvaluateExpression(Value *V, const Loop *L,
8020 DenseMap<Instruction *, Constant *> &Vals,
8021 const DataLayout &DL,
8022 const TargetLibraryInfo *TLI) {
8023 // Convenient constant check, but redundant for recursive calls.
8024 if (Constant *C = dyn_cast<Constant>(V)) return C;
8025 Instruction *I = dyn_cast<Instruction>(V);
8026 if (!I) return nullptr;
8027
8028 if (Constant *C = Vals.lookup(I)) return C;
8029
8030 // An instruction inside the loop depends on a value outside the loop that we
8031 // weren't given a mapping for, or a value such as a call inside the loop.
8032 if (!canConstantEvolve(I, L)) return nullptr;
8033
8034 // An unmapped PHI can be due to a branch or another loop inside this loop,
8035 // or due to this not being the initial iteration through a loop where we
8036 // couldn't compute the evolution of this particular PHI last time.
8037 if (isa<PHINode>(I)) return nullptr;
8038
8039 std::vector<Constant*> Operands(I->getNumOperands());
8040
8041 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
8042 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
8043 if (!Operand) {
8044 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
8045 if (!Operands[i]) return nullptr;
8046 continue;
8047 }
8048 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
8049 Vals[Operand] = C;
8050 if (!C) return nullptr;
8051 Operands[i] = C;
8052 }
8053
8054 if (CmpInst *CI = dyn_cast<CmpInst>(I))
8055 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8056 Operands[1], DL, TLI);
8057 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8058 if (!LI->isVolatile())
8059 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
8060 }
8061 return ConstantFoldInstOperands(I, Operands, DL, TLI);
8062 }
8063
8064
8065 // If every incoming value to PN except the one for BB is a specific Constant,
8066 // return that, else return nullptr.
getOtherIncomingValue(PHINode * PN,BasicBlock * BB)8067 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
8068 Constant *IncomingVal = nullptr;
8069
8070 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
8071 if (PN->getIncomingBlock(i) == BB)
8072 continue;
8073
8074 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
8075 if (!CurrentVal)
8076 return nullptr;
8077
8078 if (IncomingVal != CurrentVal) {
8079 if (IncomingVal)
8080 return nullptr;
8081 IncomingVal = CurrentVal;
8082 }
8083 }
8084
8085 return IncomingVal;
8086 }
8087
8088 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
8089 /// in the header of its containing loop, we know the loop executes a
8090 /// constant number of times, and the PHI node is just a recurrence
8091 /// involving constants, fold it.
8092 Constant *
getConstantEvolutionLoopExitValue(PHINode * PN,const APInt & BEs,const Loop * L)8093 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
8094 const APInt &BEs,
8095 const Loop *L) {
8096 auto I = ConstantEvolutionLoopExitValue.find(PN);
8097 if (I != ConstantEvolutionLoopExitValue.end())
8098 return I->second;
8099
8100 if (BEs.ugt(MaxBruteForceIterations))
8101 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
8102
8103 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
8104
8105 DenseMap<Instruction *, Constant *> CurrentIterVals;
8106 BasicBlock *Header = L->getHeader();
8107 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8108
8109 BasicBlock *Latch = L->getLoopLatch();
8110 if (!Latch)
8111 return nullptr;
8112
8113 for (PHINode &PHI : Header->phis()) {
8114 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8115 CurrentIterVals[&PHI] = StartCST;
8116 }
8117 if (!CurrentIterVals.count(PN))
8118 return RetVal = nullptr;
8119
8120 Value *BEValue = PN->getIncomingValueForBlock(Latch);
8121
8122 // Execute the loop symbolically to determine the exit value.
8123 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
8124 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
8125
8126 unsigned NumIterations = BEs.getZExtValue(); // must be in range
8127 unsigned IterationNum = 0;
8128 const DataLayout &DL = getDataLayout();
8129 for (; ; ++IterationNum) {
8130 if (IterationNum == NumIterations)
8131 return RetVal = CurrentIterVals[PN]; // Got exit value!
8132
8133 // Compute the value of the PHIs for the next iteration.
8134 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
8135 DenseMap<Instruction *, Constant *> NextIterVals;
8136 Constant *NextPHI =
8137 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8138 if (!NextPHI)
8139 return nullptr; // Couldn't evaluate!
8140 NextIterVals[PN] = NextPHI;
8141
8142 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
8143
8144 // Also evaluate the other PHI nodes. However, we don't get to stop if we
8145 // cease to be able to evaluate one of them or if they stop evolving,
8146 // because that doesn't necessarily prevent us from computing PN.
8147 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
8148 for (const auto &I : CurrentIterVals) {
8149 PHINode *PHI = dyn_cast<PHINode>(I.first);
8150 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
8151 PHIsToCompute.emplace_back(PHI, I.second);
8152 }
8153 // We use two distinct loops because EvaluateExpression may invalidate any
8154 // iterators into CurrentIterVals.
8155 for (const auto &I : PHIsToCompute) {
8156 PHINode *PHI = I.first;
8157 Constant *&NextPHI = NextIterVals[PHI];
8158 if (!NextPHI) { // Not already computed.
8159 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8160 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8161 }
8162 if (NextPHI != I.second)
8163 StoppedEvolving = false;
8164 }
8165
8166 // If all entries in CurrentIterVals == NextIterVals then we can stop
8167 // iterating, the loop can't continue to change.
8168 if (StoppedEvolving)
8169 return RetVal = CurrentIterVals[PN];
8170
8171 CurrentIterVals.swap(NextIterVals);
8172 }
8173 }
8174
computeExitCountExhaustively(const Loop * L,Value * Cond,bool ExitWhen)8175 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
8176 Value *Cond,
8177 bool ExitWhen) {
8178 PHINode *PN = getConstantEvolvingPHI(Cond, L);
8179 if (!PN) return getCouldNotCompute();
8180
8181 // If the loop is canonicalized, the PHI will have exactly two entries.
8182 // That's the only form we support here.
8183 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
8184
8185 DenseMap<Instruction *, Constant *> CurrentIterVals;
8186 BasicBlock *Header = L->getHeader();
8187 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8188
8189 BasicBlock *Latch = L->getLoopLatch();
8190 assert(Latch && "Should follow from NumIncomingValues == 2!");
8191
8192 for (PHINode &PHI : Header->phis()) {
8193 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8194 CurrentIterVals[&PHI] = StartCST;
8195 }
8196 if (!CurrentIterVals.count(PN))
8197 return getCouldNotCompute();
8198
8199 // Okay, we find a PHI node that defines the trip count of this loop. Execute
8200 // the loop symbolically to determine when the condition gets a value of
8201 // "ExitWhen".
8202 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
8203 const DataLayout &DL = getDataLayout();
8204 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
8205 auto *CondVal = dyn_cast_or_null<ConstantInt>(
8206 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
8207
8208 // Couldn't symbolically evaluate.
8209 if (!CondVal) return getCouldNotCompute();
8210
8211 if (CondVal->getValue() == uint64_t(ExitWhen)) {
8212 ++NumBruteForceTripCountsComputed;
8213 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
8214 }
8215
8216 // Update all the PHI nodes for the next iteration.
8217 DenseMap<Instruction *, Constant *> NextIterVals;
8218
8219 // Create a list of which PHIs we need to compute. We want to do this before
8220 // calling EvaluateExpression on them because that may invalidate iterators
8221 // into CurrentIterVals.
8222 SmallVector<PHINode *, 8> PHIsToCompute;
8223 for (const auto &I : CurrentIterVals) {
8224 PHINode *PHI = dyn_cast<PHINode>(I.first);
8225 if (!PHI || PHI->getParent() != Header) continue;
8226 PHIsToCompute.push_back(PHI);
8227 }
8228 for (PHINode *PHI : PHIsToCompute) {
8229 Constant *&NextPHI = NextIterVals[PHI];
8230 if (NextPHI) continue; // Already computed!
8231
8232 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8233 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8234 }
8235 CurrentIterVals.swap(NextIterVals);
8236 }
8237
8238 // Too many iterations were needed to evaluate.
8239 return getCouldNotCompute();
8240 }
8241
getSCEVAtScope(const SCEV * V,const Loop * L)8242 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
8243 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
8244 ValuesAtScopes[V];
8245 // Check to see if we've folded this expression at this loop before.
8246 for (auto &LS : Values)
8247 if (LS.first == L)
8248 return LS.second ? LS.second : V;
8249
8250 Values.emplace_back(L, nullptr);
8251
8252 // Otherwise compute it.
8253 const SCEV *C = computeSCEVAtScope(V, L);
8254 for (auto &LS : reverse(ValuesAtScopes[V]))
8255 if (LS.first == L) {
8256 LS.second = C;
8257 break;
8258 }
8259 return C;
8260 }
8261
8262 /// This builds up a Constant using the ConstantExpr interface. That way, we
8263 /// will return Constants for objects which aren't represented by a
8264 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8265 /// Returns NULL if the SCEV isn't representable as a Constant.
BuildConstantFromSCEV(const SCEV * V)8266 static Constant *BuildConstantFromSCEV(const SCEV *V) {
8267 switch (V->getSCEVType()) {
8268 case scCouldNotCompute:
8269 case scAddRecExpr:
8270 return nullptr;
8271 case scConstant:
8272 return cast<SCEVConstant>(V)->getValue();
8273 case scUnknown:
8274 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
8275 case scSignExtend: {
8276 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
8277 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
8278 return ConstantExpr::getSExt(CastOp, SS->getType());
8279 return nullptr;
8280 }
8281 case scZeroExtend: {
8282 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
8283 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
8284 return ConstantExpr::getZExt(CastOp, SZ->getType());
8285 return nullptr;
8286 }
8287 case scPtrToInt: {
8288 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V);
8289 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
8290 return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
8291
8292 return nullptr;
8293 }
8294 case scTruncate: {
8295 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
8296 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
8297 return ConstantExpr::getTrunc(CastOp, ST->getType());
8298 return nullptr;
8299 }
8300 case scAddExpr: {
8301 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
8302 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
8303 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8304 unsigned AS = PTy->getAddressSpace();
8305 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8306 C = ConstantExpr::getBitCast(C, DestPtrTy);
8307 }
8308 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
8309 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
8310 if (!C2)
8311 return nullptr;
8312
8313 // First pointer!
8314 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
8315 unsigned AS = C2->getType()->getPointerAddressSpace();
8316 std::swap(C, C2);
8317 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8318 // The offsets have been converted to bytes. We can add bytes to an
8319 // i8* by GEP with the byte count in the first index.
8320 C = ConstantExpr::getBitCast(C, DestPtrTy);
8321 }
8322
8323 // Don't bother trying to sum two pointers. We probably can't
8324 // statically compute a load that results from it anyway.
8325 if (C2->getType()->isPointerTy())
8326 return nullptr;
8327
8328 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8329 if (PTy->getElementType()->isStructTy())
8330 C2 = ConstantExpr::getIntegerCast(
8331 C2, Type::getInt32Ty(C->getContext()), true);
8332 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
8333 } else
8334 C = ConstantExpr::getAdd(C, C2);
8335 }
8336 return C;
8337 }
8338 return nullptr;
8339 }
8340 case scMulExpr: {
8341 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
8342 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
8343 // Don't bother with pointers at all.
8344 if (C->getType()->isPointerTy())
8345 return nullptr;
8346 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
8347 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
8348 if (!C2 || C2->getType()->isPointerTy())
8349 return nullptr;
8350 C = ConstantExpr::getMul(C, C2);
8351 }
8352 return C;
8353 }
8354 return nullptr;
8355 }
8356 case scUDivExpr: {
8357 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
8358 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
8359 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
8360 if (LHS->getType() == RHS->getType())
8361 return ConstantExpr::getUDiv(LHS, RHS);
8362 return nullptr;
8363 }
8364 case scSMaxExpr:
8365 case scUMaxExpr:
8366 case scSMinExpr:
8367 case scUMinExpr:
8368 return nullptr; // TODO: smax, umax, smin, umax.
8369 }
8370 llvm_unreachable("Unknown SCEV kind!");
8371 }
8372
computeSCEVAtScope(const SCEV * V,const Loop * L)8373 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
8374 if (isa<SCEVConstant>(V)) return V;
8375
8376 // If this instruction is evolved from a constant-evolving PHI, compute the
8377 // exit value from the loop without using SCEVs.
8378 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
8379 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
8380 if (PHINode *PN = dyn_cast<PHINode>(I)) {
8381 const Loop *CurrLoop = this->LI[I->getParent()];
8382 // Looking for loop exit value.
8383 if (CurrLoop && CurrLoop->getParentLoop() == L &&
8384 PN->getParent() == CurrLoop->getHeader()) {
8385 // Okay, there is no closed form solution for the PHI node. Check
8386 // to see if the loop that contains it has a known backedge-taken
8387 // count. If so, we may be able to force computation of the exit
8388 // value.
8389 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
8390 // This trivial case can show up in some degenerate cases where
8391 // the incoming IR has not yet been fully simplified.
8392 if (BackedgeTakenCount->isZero()) {
8393 Value *InitValue = nullptr;
8394 bool MultipleInitValues = false;
8395 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
8396 if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
8397 if (!InitValue)
8398 InitValue = PN->getIncomingValue(i);
8399 else if (InitValue != PN->getIncomingValue(i)) {
8400 MultipleInitValues = true;
8401 break;
8402 }
8403 }
8404 }
8405 if (!MultipleInitValues && InitValue)
8406 return getSCEV(InitValue);
8407 }
8408 // Do we have a loop invariant value flowing around the backedge
8409 // for a loop which must execute the backedge?
8410 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
8411 isKnownPositive(BackedgeTakenCount) &&
8412 PN->getNumIncomingValues() == 2) {
8413
8414 unsigned InLoopPred =
8415 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
8416 Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
8417 if (CurrLoop->isLoopInvariant(BackedgeVal))
8418 return getSCEV(BackedgeVal);
8419 }
8420 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
8421 // Okay, we know how many times the containing loop executes. If
8422 // this is a constant evolving PHI node, get the final value at
8423 // the specified iteration number.
8424 Constant *RV = getConstantEvolutionLoopExitValue(
8425 PN, BTCC->getAPInt(), CurrLoop);
8426 if (RV) return getSCEV(RV);
8427 }
8428 }
8429
8430 // If there is a single-input Phi, evaluate it at our scope. If we can
8431 // prove that this replacement does not break LCSSA form, use new value.
8432 if (PN->getNumOperands() == 1) {
8433 const SCEV *Input = getSCEV(PN->getOperand(0));
8434 const SCEV *InputAtScope = getSCEVAtScope(Input, L);
8435 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
8436 // for the simplest case just support constants.
8437 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
8438 }
8439 }
8440
8441 // Okay, this is an expression that we cannot symbolically evaluate
8442 // into a SCEV. Check to see if it's possible to symbolically evaluate
8443 // the arguments into constants, and if so, try to constant propagate the
8444 // result. This is particularly useful for computing loop exit values.
8445 if (CanConstantFold(I)) {
8446 SmallVector<Constant *, 4> Operands;
8447 bool MadeImprovement = false;
8448 for (Value *Op : I->operands()) {
8449 if (Constant *C = dyn_cast<Constant>(Op)) {
8450 Operands.push_back(C);
8451 continue;
8452 }
8453
8454 // If any of the operands is non-constant and if they are
8455 // non-integer and non-pointer, don't even try to analyze them
8456 // with scev techniques.
8457 if (!isSCEVable(Op->getType()))
8458 return V;
8459
8460 const SCEV *OrigV = getSCEV(Op);
8461 const SCEV *OpV = getSCEVAtScope(OrigV, L);
8462 MadeImprovement |= OrigV != OpV;
8463
8464 Constant *C = BuildConstantFromSCEV(OpV);
8465 if (!C) return V;
8466 if (C->getType() != Op->getType())
8467 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
8468 Op->getType(),
8469 false),
8470 C, Op->getType());
8471 Operands.push_back(C);
8472 }
8473
8474 // Check to see if getSCEVAtScope actually made an improvement.
8475 if (MadeImprovement) {
8476 Constant *C = nullptr;
8477 const DataLayout &DL = getDataLayout();
8478 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
8479 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8480 Operands[1], DL, &TLI);
8481 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) {
8482 if (!Load->isVolatile())
8483 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(),
8484 DL);
8485 } else
8486 C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
8487 if (!C) return V;
8488 return getSCEV(C);
8489 }
8490 }
8491 }
8492
8493 // This is some other type of SCEVUnknown, just return it.
8494 return V;
8495 }
8496
8497 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
8498 // Avoid performing the look-up in the common case where the specified
8499 // expression has no loop-variant portions.
8500 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
8501 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8502 if (OpAtScope != Comm->getOperand(i)) {
8503 // Okay, at least one of these operands is loop variant but might be
8504 // foldable. Build a new instance of the folded commutative expression.
8505 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
8506 Comm->op_begin()+i);
8507 NewOps.push_back(OpAtScope);
8508
8509 for (++i; i != e; ++i) {
8510 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8511 NewOps.push_back(OpAtScope);
8512 }
8513 if (isa<SCEVAddExpr>(Comm))
8514 return getAddExpr(NewOps, Comm->getNoWrapFlags());
8515 if (isa<SCEVMulExpr>(Comm))
8516 return getMulExpr(NewOps, Comm->getNoWrapFlags());
8517 if (isa<SCEVMinMaxExpr>(Comm))
8518 return getMinMaxExpr(Comm->getSCEVType(), NewOps);
8519 llvm_unreachable("Unknown commutative SCEV type!");
8520 }
8521 }
8522 // If we got here, all operands are loop invariant.
8523 return Comm;
8524 }
8525
8526 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
8527 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
8528 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
8529 if (LHS == Div->getLHS() && RHS == Div->getRHS())
8530 return Div; // must be loop invariant
8531 return getUDivExpr(LHS, RHS);
8532 }
8533
8534 // If this is a loop recurrence for a loop that does not contain L, then we
8535 // are dealing with the final value computed by the loop.
8536 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
8537 // First, attempt to evaluate each operand.
8538 // Avoid performing the look-up in the common case where the specified
8539 // expression has no loop-variant portions.
8540 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
8541 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
8542 if (OpAtScope == AddRec->getOperand(i))
8543 continue;
8544
8545 // Okay, at least one of these operands is loop variant but might be
8546 // foldable. Build a new instance of the folded commutative expression.
8547 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
8548 AddRec->op_begin()+i);
8549 NewOps.push_back(OpAtScope);
8550 for (++i; i != e; ++i)
8551 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
8552
8553 const SCEV *FoldedRec =
8554 getAddRecExpr(NewOps, AddRec->getLoop(),
8555 AddRec->getNoWrapFlags(SCEV::FlagNW));
8556 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
8557 // The addrec may be folded to a nonrecurrence, for example, if the
8558 // induction variable is multiplied by zero after constant folding. Go
8559 // ahead and return the folded value.
8560 if (!AddRec)
8561 return FoldedRec;
8562 break;
8563 }
8564
8565 // If the scope is outside the addrec's loop, evaluate it by using the
8566 // loop exit value of the addrec.
8567 if (!AddRec->getLoop()->contains(L)) {
8568 // To evaluate this recurrence, we need to know how many times the AddRec
8569 // loop iterates. Compute this now.
8570 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
8571 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
8572
8573 // Then, evaluate the AddRec.
8574 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
8575 }
8576
8577 return AddRec;
8578 }
8579
8580 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
8581 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8582 if (Op == Cast->getOperand())
8583 return Cast; // must be loop invariant
8584 return getZeroExtendExpr(Op, Cast->getType());
8585 }
8586
8587 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
8588 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8589 if (Op == Cast->getOperand())
8590 return Cast; // must be loop invariant
8591 return getSignExtendExpr(Op, Cast->getType());
8592 }
8593
8594 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
8595 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8596 if (Op == Cast->getOperand())
8597 return Cast; // must be loop invariant
8598 return getTruncateExpr(Op, Cast->getType());
8599 }
8600
8601 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) {
8602 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
8603 if (Op == Cast->getOperand())
8604 return Cast; // must be loop invariant
8605 return getPtrToIntExpr(Op, Cast->getType());
8606 }
8607
8608 llvm_unreachable("Unknown SCEV type!");
8609 }
8610
getSCEVAtScope(Value * V,const Loop * L)8611 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
8612 return getSCEVAtScope(getSCEV(V), L);
8613 }
8614
stripInjectiveFunctions(const SCEV * S) const8615 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
8616 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
8617 return stripInjectiveFunctions(ZExt->getOperand());
8618 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
8619 return stripInjectiveFunctions(SExt->getOperand());
8620 return S;
8621 }
8622
8623 /// Finds the minimum unsigned root of the following equation:
8624 ///
8625 /// A * X = B (mod N)
8626 ///
8627 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
8628 /// A and B isn't important.
8629 ///
8630 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
SolveLinEquationWithOverflow(const APInt & A,const SCEV * B,ScalarEvolution & SE)8631 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
8632 ScalarEvolution &SE) {
8633 uint32_t BW = A.getBitWidth();
8634 assert(BW == SE.getTypeSizeInBits(B->getType()));
8635 assert(A != 0 && "A must be non-zero.");
8636
8637 // 1. D = gcd(A, N)
8638 //
8639 // The gcd of A and N may have only one prime factor: 2. The number of
8640 // trailing zeros in A is its multiplicity
8641 uint32_t Mult2 = A.countTrailingZeros();
8642 // D = 2^Mult2
8643
8644 // 2. Check if B is divisible by D.
8645 //
8646 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
8647 // is not less than multiplicity of this prime factor for D.
8648 if (SE.GetMinTrailingZeros(B) < Mult2)
8649 return SE.getCouldNotCompute();
8650
8651 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
8652 // modulo (N / D).
8653 //
8654 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
8655 // (N / D) in general. The inverse itself always fits into BW bits, though,
8656 // so we immediately truncate it.
8657 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
8658 APInt Mod(BW + 1, 0);
8659 Mod.setBit(BW - Mult2); // Mod = N / D
8660 APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
8661
8662 // 4. Compute the minimum unsigned root of the equation:
8663 // I * (B / D) mod (N / D)
8664 // To simplify the computation, we factor out the divide by D:
8665 // (I * B mod N) / D
8666 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
8667 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
8668 }
8669
8670 /// For a given quadratic addrec, generate coefficients of the corresponding
8671 /// quadratic equation, multiplied by a common value to ensure that they are
8672 /// integers.
8673 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
8674 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
8675 /// were multiplied by, and BitWidth is the bit width of the original addrec
8676 /// coefficients.
8677 /// This function returns None if the addrec coefficients are not compile-
8678 /// time constants.
8679 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
GetQuadraticEquation(const SCEVAddRecExpr * AddRec)8680 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
8681 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
8682 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
8683 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
8684 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
8685 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
8686 << *AddRec << '\n');
8687
8688 // We currently can only solve this if the coefficients are constants.
8689 if (!LC || !MC || !NC) {
8690 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
8691 return None;
8692 }
8693
8694 APInt L = LC->getAPInt();
8695 APInt M = MC->getAPInt();
8696 APInt N = NC->getAPInt();
8697 assert(!N.isNullValue() && "This is not a quadratic addrec");
8698
8699 unsigned BitWidth = LC->getAPInt().getBitWidth();
8700 unsigned NewWidth = BitWidth + 1;
8701 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
8702 << BitWidth << '\n');
8703 // The sign-extension (as opposed to a zero-extension) here matches the
8704 // extension used in SolveQuadraticEquationWrap (with the same motivation).
8705 N = N.sext(NewWidth);
8706 M = M.sext(NewWidth);
8707 L = L.sext(NewWidth);
8708
8709 // The increments are M, M+N, M+2N, ..., so the accumulated values are
8710 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
8711 // L+M, L+2M+N, L+3M+3N, ...
8712 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
8713 //
8714 // The equation Acc = 0 is then
8715 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
8716 // In a quadratic form it becomes:
8717 // N n^2 + (2M-N) n + 2L = 0.
8718
8719 APInt A = N;
8720 APInt B = 2 * M - A;
8721 APInt C = 2 * L;
8722 APInt T = APInt(NewWidth, 2);
8723 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
8724 << "x + " << C << ", coeff bw: " << NewWidth
8725 << ", multiplied by " << T << '\n');
8726 return std::make_tuple(A, B, C, T, BitWidth);
8727 }
8728
8729 /// Helper function to compare optional APInts:
8730 /// (a) if X and Y both exist, return min(X, Y),
8731 /// (b) if neither X nor Y exist, return None,
8732 /// (c) if exactly one of X and Y exists, return that value.
MinOptional(Optional<APInt> X,Optional<APInt> Y)8733 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
8734 if (X.hasValue() && Y.hasValue()) {
8735 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
8736 APInt XW = X->sextOrSelf(W);
8737 APInt YW = Y->sextOrSelf(W);
8738 return XW.slt(YW) ? *X : *Y;
8739 }
8740 if (!X.hasValue() && !Y.hasValue())
8741 return None;
8742 return X.hasValue() ? *X : *Y;
8743 }
8744
8745 /// Helper function to truncate an optional APInt to a given BitWidth.
8746 /// When solving addrec-related equations, it is preferable to return a value
8747 /// that has the same bit width as the original addrec's coefficients. If the
8748 /// solution fits in the original bit width, truncate it (except for i1).
8749 /// Returning a value of a different bit width may inhibit some optimizations.
8750 ///
8751 /// In general, a solution to a quadratic equation generated from an addrec
8752 /// may require BW+1 bits, where BW is the bit width of the addrec's
8753 /// coefficients. The reason is that the coefficients of the quadratic
8754 /// equation are BW+1 bits wide (to avoid truncation when converting from
8755 /// the addrec to the equation).
TruncIfPossible(Optional<APInt> X,unsigned BitWidth)8756 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
8757 if (!X.hasValue())
8758 return None;
8759 unsigned W = X->getBitWidth();
8760 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
8761 return X->trunc(BitWidth);
8762 return X;
8763 }
8764
8765 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
8766 /// iterations. The values L, M, N are assumed to be signed, and they
8767 /// should all have the same bit widths.
8768 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
8769 /// where BW is the bit width of the addrec's coefficients.
8770 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
8771 /// returned as such, otherwise the bit width of the returned value may
8772 /// be greater than BW.
8773 ///
8774 /// This function returns None if
8775 /// (a) the addrec coefficients are not constant, or
8776 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
8777 /// like x^2 = 5, no integer solutions exist, in other cases an integer
8778 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
8779 static Optional<APInt>
SolveQuadraticAddRecExact(const SCEVAddRecExpr * AddRec,ScalarEvolution & SE)8780 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
8781 APInt A, B, C, M;
8782 unsigned BitWidth;
8783 auto T = GetQuadraticEquation(AddRec);
8784 if (!T.hasValue())
8785 return None;
8786
8787 std::tie(A, B, C, M, BitWidth) = *T;
8788 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
8789 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
8790 if (!X.hasValue())
8791 return None;
8792
8793 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
8794 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
8795 if (!V->isZero())
8796 return None;
8797
8798 return TruncIfPossible(X, BitWidth);
8799 }
8800
8801 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
8802 /// iterations. The values M, N are assumed to be signed, and they
8803 /// should all have the same bit widths.
8804 /// Find the least n such that c(n) does not belong to the given range,
8805 /// while c(n-1) does.
8806 ///
8807 /// This function returns None if
8808 /// (a) the addrec coefficients are not constant, or
8809 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
8810 /// bounds of the range.
8811 static Optional<APInt>
SolveQuadraticAddRecRange(const SCEVAddRecExpr * AddRec,const ConstantRange & Range,ScalarEvolution & SE)8812 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
8813 const ConstantRange &Range, ScalarEvolution &SE) {
8814 assert(AddRec->getOperand(0)->isZero() &&
8815 "Starting value of addrec should be 0");
8816 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
8817 << Range << ", addrec " << *AddRec << '\n');
8818 // This case is handled in getNumIterationsInRange. Here we can assume that
8819 // we start in the range.
8820 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
8821 "Addrec's initial value should be in range");
8822
8823 APInt A, B, C, M;
8824 unsigned BitWidth;
8825 auto T = GetQuadraticEquation(AddRec);
8826 if (!T.hasValue())
8827 return None;
8828
8829 // Be careful about the return value: there can be two reasons for not
8830 // returning an actual number. First, if no solutions to the equations
8831 // were found, and second, if the solutions don't leave the given range.
8832 // The first case means that the actual solution is "unknown", the second
8833 // means that it's known, but not valid. If the solution is unknown, we
8834 // cannot make any conclusions.
8835 // Return a pair: the optional solution and a flag indicating if the
8836 // solution was found.
8837 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
8838 // Solve for signed overflow and unsigned overflow, pick the lower
8839 // solution.
8840 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
8841 << Bound << " (before multiplying by " << M << ")\n");
8842 Bound *= M; // The quadratic equation multiplier.
8843
8844 Optional<APInt> SO = None;
8845 if (BitWidth > 1) {
8846 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8847 "signed overflow\n");
8848 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
8849 }
8850 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8851 "unsigned overflow\n");
8852 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
8853 BitWidth+1);
8854
8855 auto LeavesRange = [&] (const APInt &X) {
8856 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
8857 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
8858 if (Range.contains(V0->getValue()))
8859 return false;
8860 // X should be at least 1, so X-1 is non-negative.
8861 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
8862 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
8863 if (Range.contains(V1->getValue()))
8864 return true;
8865 return false;
8866 };
8867
8868 // If SolveQuadraticEquationWrap returns None, it means that there can
8869 // be a solution, but the function failed to find it. We cannot treat it
8870 // as "no solution".
8871 if (!SO.hasValue() || !UO.hasValue())
8872 return { None, false };
8873
8874 // Check the smaller value first to see if it leaves the range.
8875 // At this point, both SO and UO must have values.
8876 Optional<APInt> Min = MinOptional(SO, UO);
8877 if (LeavesRange(*Min))
8878 return { Min, true };
8879 Optional<APInt> Max = Min == SO ? UO : SO;
8880 if (LeavesRange(*Max))
8881 return { Max, true };
8882
8883 // Solutions were found, but were eliminated, hence the "true".
8884 return { None, true };
8885 };
8886
8887 std::tie(A, B, C, M, BitWidth) = *T;
8888 // Lower bound is inclusive, subtract 1 to represent the exiting value.
8889 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
8890 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
8891 auto SL = SolveForBoundary(Lower);
8892 auto SU = SolveForBoundary(Upper);
8893 // If any of the solutions was unknown, no meaninigful conclusions can
8894 // be made.
8895 if (!SL.second || !SU.second)
8896 return None;
8897
8898 // Claim: The correct solution is not some value between Min and Max.
8899 //
8900 // Justification: Assuming that Min and Max are different values, one of
8901 // them is when the first signed overflow happens, the other is when the
8902 // first unsigned overflow happens. Crossing the range boundary is only
8903 // possible via an overflow (treating 0 as a special case of it, modeling
8904 // an overflow as crossing k*2^W for some k).
8905 //
8906 // The interesting case here is when Min was eliminated as an invalid
8907 // solution, but Max was not. The argument is that if there was another
8908 // overflow between Min and Max, it would also have been eliminated if
8909 // it was considered.
8910 //
8911 // For a given boundary, it is possible to have two overflows of the same
8912 // type (signed/unsigned) without having the other type in between: this
8913 // can happen when the vertex of the parabola is between the iterations
8914 // corresponding to the overflows. This is only possible when the two
8915 // overflows cross k*2^W for the same k. In such case, if the second one
8916 // left the range (and was the first one to do so), the first overflow
8917 // would have to enter the range, which would mean that either we had left
8918 // the range before or that we started outside of it. Both of these cases
8919 // are contradictions.
8920 //
8921 // Claim: In the case where SolveForBoundary returns None, the correct
8922 // solution is not some value between the Max for this boundary and the
8923 // Min of the other boundary.
8924 //
8925 // Justification: Assume that we had such Max_A and Min_B corresponding
8926 // to range boundaries A and B and such that Max_A < Min_B. If there was
8927 // a solution between Max_A and Min_B, it would have to be caused by an
8928 // overflow corresponding to either A or B. It cannot correspond to B,
8929 // since Min_B is the first occurrence of such an overflow. If it
8930 // corresponded to A, it would have to be either a signed or an unsigned
8931 // overflow that is larger than both eliminated overflows for A. But
8932 // between the eliminated overflows and this overflow, the values would
8933 // cover the entire value space, thus crossing the other boundary, which
8934 // is a contradiction.
8935
8936 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
8937 }
8938
8939 ScalarEvolution::ExitLimit
howFarToZero(const SCEV * V,const Loop * L,bool ControlsExit,bool AllowPredicates)8940 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
8941 bool AllowPredicates) {
8942
8943 // This is only used for loops with a "x != y" exit test. The exit condition
8944 // is now expressed as a single expression, V = x-y. So the exit test is
8945 // effectively V != 0. We know and take advantage of the fact that this
8946 // expression only being used in a comparison by zero context.
8947
8948 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
8949 // If the value is a constant
8950 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
8951 // If the value is already zero, the branch will execute zero times.
8952 if (C->getValue()->isZero()) return C;
8953 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8954 }
8955
8956 const SCEVAddRecExpr *AddRec =
8957 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
8958
8959 if (!AddRec && AllowPredicates)
8960 // Try to make this an AddRec using runtime tests, in the first X
8961 // iterations of this loop, where X is the SCEV expression found by the
8962 // algorithm below.
8963 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
8964
8965 if (!AddRec || AddRec->getLoop() != L)
8966 return getCouldNotCompute();
8967
8968 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
8969 // the quadratic equation to solve it.
8970 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
8971 // We can only use this value if the chrec ends up with an exact zero
8972 // value at this index. When solving for "X*X != 5", for example, we
8973 // should not accept a root of 2.
8974 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
8975 const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
8976 return ExitLimit(R, R, false, Predicates);
8977 }
8978 return getCouldNotCompute();
8979 }
8980
8981 // Otherwise we can only handle this if it is affine.
8982 if (!AddRec->isAffine())
8983 return getCouldNotCompute();
8984
8985 // If this is an affine expression, the execution count of this branch is
8986 // the minimum unsigned root of the following equation:
8987 //
8988 // Start + Step*N = 0 (mod 2^BW)
8989 //
8990 // equivalent to:
8991 //
8992 // Step*N = -Start (mod 2^BW)
8993 //
8994 // where BW is the common bit width of Start and Step.
8995
8996 // Get the initial value for the loop.
8997 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
8998 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
8999
9000 // For now we handle only constant steps.
9001 //
9002 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
9003 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
9004 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
9005 // We have not yet seen any such cases.
9006 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
9007 if (!StepC || StepC->getValue()->isZero())
9008 return getCouldNotCompute();
9009
9010 // For positive steps (counting up until unsigned overflow):
9011 // N = -Start/Step (as unsigned)
9012 // For negative steps (counting down to zero):
9013 // N = Start/-Step
9014 // First compute the unsigned distance from zero in the direction of Step.
9015 bool CountDown = StepC->getAPInt().isNegative();
9016 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
9017
9018 // Handle unitary steps, which cannot wraparound.
9019 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
9020 // N = Distance (as unsigned)
9021 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
9022 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
9023 APInt MaxBECountBase = getUnsignedRangeMax(Distance);
9024 if (MaxBECountBase.ult(MaxBECount))
9025 MaxBECount = MaxBECountBase;
9026
9027 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
9028 // we end up with a loop whose backedge-taken count is n - 1. Detect this
9029 // case, and see if we can improve the bound.
9030 //
9031 // Explicitly handling this here is necessary because getUnsignedRange
9032 // isn't context-sensitive; it doesn't know that we only care about the
9033 // range inside the loop.
9034 const SCEV *Zero = getZero(Distance->getType());
9035 const SCEV *One = getOne(Distance->getType());
9036 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
9037 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
9038 // If Distance + 1 doesn't overflow, we can compute the maximum distance
9039 // as "unsigned_max(Distance + 1) - 1".
9040 ConstantRange CR = getUnsignedRange(DistancePlusOne);
9041 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
9042 }
9043 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
9044 }
9045
9046 // If the condition controls loop exit (the loop exits only if the expression
9047 // is true) and the addition is no-wrap we can use unsigned divide to
9048 // compute the backedge count. In this case, the step may not divide the
9049 // distance, but we don't care because if the condition is "missed" the loop
9050 // will have undefined behavior due to wrapping.
9051 if (ControlsExit && AddRec->hasNoSelfWrap() &&
9052 loopHasNoAbnormalExits(AddRec->getLoop())) {
9053 const SCEV *Exact =
9054 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
9055 const SCEV *Max =
9056 Exact == getCouldNotCompute()
9057 ? Exact
9058 : getConstant(getUnsignedRangeMax(Exact));
9059 return ExitLimit(Exact, Max, false, Predicates);
9060 }
9061
9062 // Solve the general equation.
9063 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
9064 getNegativeSCEV(Start), *this);
9065 const SCEV *M = E == getCouldNotCompute()
9066 ? E
9067 : getConstant(getUnsignedRangeMax(E));
9068 return ExitLimit(E, M, false, Predicates);
9069 }
9070
9071 ScalarEvolution::ExitLimit
howFarToNonZero(const SCEV * V,const Loop * L)9072 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
9073 // Loops that look like: while (X == 0) are very strange indeed. We don't
9074 // handle them yet except for the trivial case. This could be expanded in the
9075 // future as needed.
9076
9077 // If the value is a constant, check to see if it is known to be non-zero
9078 // already. If so, the backedge will execute zero times.
9079 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9080 if (!C->getValue()->isZero())
9081 return getZero(C->getType());
9082 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9083 }
9084
9085 // We could implement others, but I really doubt anyone writes loops like
9086 // this, and if they did, they would already be constant folded.
9087 return getCouldNotCompute();
9088 }
9089
9090 std::pair<const BasicBlock *, const BasicBlock *>
getPredecessorWithUniqueSuccessorForBB(const BasicBlock * BB) const9091 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
9092 const {
9093 // If the block has a unique predecessor, then there is no path from the
9094 // predecessor to the block that does not go through the direct edge
9095 // from the predecessor to the block.
9096 if (const BasicBlock *Pred = BB->getSinglePredecessor())
9097 return {Pred, BB};
9098
9099 // A loop's header is defined to be a block that dominates the loop.
9100 // If the header has a unique predecessor outside the loop, it must be
9101 // a block that has exactly one successor that can reach the loop.
9102 if (const Loop *L = LI.getLoopFor(BB))
9103 return {L->getLoopPredecessor(), L->getHeader()};
9104
9105 return {nullptr, nullptr};
9106 }
9107
9108 /// SCEV structural equivalence is usually sufficient for testing whether two
9109 /// expressions are equal, however for the purposes of looking for a condition
9110 /// guarding a loop, it can be useful to be a little more general, since a
9111 /// front-end may have replicated the controlling expression.
HasSameValue(const SCEV * A,const SCEV * B)9112 static bool HasSameValue(const SCEV *A, const SCEV *B) {
9113 // Quick check to see if they are the same SCEV.
9114 if (A == B) return true;
9115
9116 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
9117 // Not all instructions that are "identical" compute the same value. For
9118 // instance, two distinct alloca instructions allocating the same type are
9119 // identical and do not read memory; but compute distinct values.
9120 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
9121 };
9122
9123 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
9124 // two different instructions with the same value. Check for this case.
9125 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
9126 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
9127 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
9128 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
9129 if (ComputesEqualValues(AI, BI))
9130 return true;
9131
9132 // Otherwise assume they may have a different value.
9133 return false;
9134 }
9135
SimplifyICmpOperands(ICmpInst::Predicate & Pred,const SCEV * & LHS,const SCEV * & RHS,unsigned Depth)9136 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
9137 const SCEV *&LHS, const SCEV *&RHS,
9138 unsigned Depth) {
9139 bool Changed = false;
9140 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
9141 // '0 != 0'.
9142 auto TrivialCase = [&](bool TriviallyTrue) {
9143 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
9144 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
9145 return true;
9146 };
9147 // If we hit the max recursion limit bail out.
9148 if (Depth >= 3)
9149 return false;
9150
9151 // Canonicalize a constant to the right side.
9152 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
9153 // Check for both operands constant.
9154 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
9155 if (ConstantExpr::getICmp(Pred,
9156 LHSC->getValue(),
9157 RHSC->getValue())->isNullValue())
9158 return TrivialCase(false);
9159 else
9160 return TrivialCase(true);
9161 }
9162 // Otherwise swap the operands to put the constant on the right.
9163 std::swap(LHS, RHS);
9164 Pred = ICmpInst::getSwappedPredicate(Pred);
9165 Changed = true;
9166 }
9167
9168 // If we're comparing an addrec with a value which is loop-invariant in the
9169 // addrec's loop, put the addrec on the left. Also make a dominance check,
9170 // as both operands could be addrecs loop-invariant in each other's loop.
9171 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
9172 const Loop *L = AR->getLoop();
9173 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
9174 std::swap(LHS, RHS);
9175 Pred = ICmpInst::getSwappedPredicate(Pred);
9176 Changed = true;
9177 }
9178 }
9179
9180 // If there's a constant operand, canonicalize comparisons with boundary
9181 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
9182 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
9183 const APInt &RA = RC->getAPInt();
9184
9185 bool SimplifiedByConstantRange = false;
9186
9187 if (!ICmpInst::isEquality(Pred)) {
9188 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
9189 if (ExactCR.isFullSet())
9190 return TrivialCase(true);
9191 else if (ExactCR.isEmptySet())
9192 return TrivialCase(false);
9193
9194 APInt NewRHS;
9195 CmpInst::Predicate NewPred;
9196 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
9197 ICmpInst::isEquality(NewPred)) {
9198 // We were able to convert an inequality to an equality.
9199 Pred = NewPred;
9200 RHS = getConstant(NewRHS);
9201 Changed = SimplifiedByConstantRange = true;
9202 }
9203 }
9204
9205 if (!SimplifiedByConstantRange) {
9206 switch (Pred) {
9207 default:
9208 break;
9209 case ICmpInst::ICMP_EQ:
9210 case ICmpInst::ICMP_NE:
9211 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
9212 if (!RA)
9213 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
9214 if (const SCEVMulExpr *ME =
9215 dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
9216 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
9217 ME->getOperand(0)->isAllOnesValue()) {
9218 RHS = AE->getOperand(1);
9219 LHS = ME->getOperand(1);
9220 Changed = true;
9221 }
9222 break;
9223
9224
9225 // The "Should have been caught earlier!" messages refer to the fact
9226 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
9227 // should have fired on the corresponding cases, and canonicalized the
9228 // check to trivial case.
9229
9230 case ICmpInst::ICMP_UGE:
9231 assert(!RA.isMinValue() && "Should have been caught earlier!");
9232 Pred = ICmpInst::ICMP_UGT;
9233 RHS = getConstant(RA - 1);
9234 Changed = true;
9235 break;
9236 case ICmpInst::ICMP_ULE:
9237 assert(!RA.isMaxValue() && "Should have been caught earlier!");
9238 Pred = ICmpInst::ICMP_ULT;
9239 RHS = getConstant(RA + 1);
9240 Changed = true;
9241 break;
9242 case ICmpInst::ICMP_SGE:
9243 assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
9244 Pred = ICmpInst::ICMP_SGT;
9245 RHS = getConstant(RA - 1);
9246 Changed = true;
9247 break;
9248 case ICmpInst::ICMP_SLE:
9249 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
9250 Pred = ICmpInst::ICMP_SLT;
9251 RHS = getConstant(RA + 1);
9252 Changed = true;
9253 break;
9254 }
9255 }
9256 }
9257
9258 // Check for obvious equality.
9259 if (HasSameValue(LHS, RHS)) {
9260 if (ICmpInst::isTrueWhenEqual(Pred))
9261 return TrivialCase(true);
9262 if (ICmpInst::isFalseWhenEqual(Pred))
9263 return TrivialCase(false);
9264 }
9265
9266 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
9267 // adding or subtracting 1 from one of the operands.
9268 switch (Pred) {
9269 case ICmpInst::ICMP_SLE:
9270 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
9271 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9272 SCEV::FlagNSW);
9273 Pred = ICmpInst::ICMP_SLT;
9274 Changed = true;
9275 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
9276 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
9277 SCEV::FlagNSW);
9278 Pred = ICmpInst::ICMP_SLT;
9279 Changed = true;
9280 }
9281 break;
9282 case ICmpInst::ICMP_SGE:
9283 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
9284 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
9285 SCEV::FlagNSW);
9286 Pred = ICmpInst::ICMP_SGT;
9287 Changed = true;
9288 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
9289 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9290 SCEV::FlagNSW);
9291 Pred = ICmpInst::ICMP_SGT;
9292 Changed = true;
9293 }
9294 break;
9295 case ICmpInst::ICMP_ULE:
9296 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
9297 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9298 SCEV::FlagNUW);
9299 Pred = ICmpInst::ICMP_ULT;
9300 Changed = true;
9301 } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
9302 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
9303 Pred = ICmpInst::ICMP_ULT;
9304 Changed = true;
9305 }
9306 break;
9307 case ICmpInst::ICMP_UGE:
9308 if (!getUnsignedRangeMin(RHS).isMinValue()) {
9309 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
9310 Pred = ICmpInst::ICMP_UGT;
9311 Changed = true;
9312 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
9313 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9314 SCEV::FlagNUW);
9315 Pred = ICmpInst::ICMP_UGT;
9316 Changed = true;
9317 }
9318 break;
9319 default:
9320 break;
9321 }
9322
9323 // TODO: More simplifications are possible here.
9324
9325 // Recursively simplify until we either hit a recursion limit or nothing
9326 // changes.
9327 if (Changed)
9328 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
9329
9330 return Changed;
9331 }
9332
isKnownNegative(const SCEV * S)9333 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
9334 return getSignedRangeMax(S).isNegative();
9335 }
9336
isKnownPositive(const SCEV * S)9337 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
9338 return getSignedRangeMin(S).isStrictlyPositive();
9339 }
9340
isKnownNonNegative(const SCEV * S)9341 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
9342 return !getSignedRangeMin(S).isNegative();
9343 }
9344
isKnownNonPositive(const SCEV * S)9345 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
9346 return !getSignedRangeMax(S).isStrictlyPositive();
9347 }
9348
isKnownNonZero(const SCEV * S)9349 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
9350 return isKnownNegative(S) || isKnownPositive(S);
9351 }
9352
9353 std::pair<const SCEV *, const SCEV *>
SplitIntoInitAndPostInc(const Loop * L,const SCEV * S)9354 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
9355 // Compute SCEV on entry of loop L.
9356 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
9357 if (Start == getCouldNotCompute())
9358 return { Start, Start };
9359 // Compute post increment SCEV for loop L.
9360 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
9361 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
9362 return { Start, PostInc };
9363 }
9364
isKnownViaInduction(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9365 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
9366 const SCEV *LHS, const SCEV *RHS) {
9367 // First collect all loops.
9368 SmallPtrSet<const Loop *, 8> LoopsUsed;
9369 getUsedLoops(LHS, LoopsUsed);
9370 getUsedLoops(RHS, LoopsUsed);
9371
9372 if (LoopsUsed.empty())
9373 return false;
9374
9375 // Domination relationship must be a linear order on collected loops.
9376 #ifndef NDEBUG
9377 for (auto *L1 : LoopsUsed)
9378 for (auto *L2 : LoopsUsed)
9379 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
9380 DT.dominates(L2->getHeader(), L1->getHeader())) &&
9381 "Domination relationship is not a linear order");
9382 #endif
9383
9384 const Loop *MDL =
9385 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
9386 [&](const Loop *L1, const Loop *L2) {
9387 return DT.properlyDominates(L1->getHeader(), L2->getHeader());
9388 });
9389
9390 // Get init and post increment value for LHS.
9391 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
9392 // if LHS contains unknown non-invariant SCEV then bail out.
9393 if (SplitLHS.first == getCouldNotCompute())
9394 return false;
9395 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
9396 // Get init and post increment value for RHS.
9397 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
9398 // if RHS contains unknown non-invariant SCEV then bail out.
9399 if (SplitRHS.first == getCouldNotCompute())
9400 return false;
9401 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
9402 // It is possible that init SCEV contains an invariant load but it does
9403 // not dominate MDL and is not available at MDL loop entry, so we should
9404 // check it here.
9405 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
9406 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
9407 return false;
9408
9409 // It seems backedge guard check is faster than entry one so in some cases
9410 // it can speed up whole estimation by short circuit
9411 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
9412 SplitRHS.second) &&
9413 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
9414 }
9415
isKnownPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9416 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
9417 const SCEV *LHS, const SCEV *RHS) {
9418 // Canonicalize the inputs first.
9419 (void)SimplifyICmpOperands(Pred, LHS, RHS);
9420
9421 if (isKnownViaInduction(Pred, LHS, RHS))
9422 return true;
9423
9424 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
9425 return true;
9426
9427 // Otherwise see what can be done with some simple reasoning.
9428 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
9429 }
9430
isKnownPredicateAt(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Instruction * Context)9431 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
9432 const SCEV *LHS, const SCEV *RHS,
9433 const Instruction *Context) {
9434 // TODO: Analyze guards and assumes from Context's block.
9435 return isKnownPredicate(Pred, LHS, RHS) ||
9436 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS);
9437 }
9438
isKnownOnEveryIteration(ICmpInst::Predicate Pred,const SCEVAddRecExpr * LHS,const SCEV * RHS)9439 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
9440 const SCEVAddRecExpr *LHS,
9441 const SCEV *RHS) {
9442 const Loop *L = LHS->getLoop();
9443 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
9444 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
9445 }
9446
9447 Optional<ScalarEvolution::MonotonicPredicateType>
getMonotonicPredicateType(const SCEVAddRecExpr * LHS,ICmpInst::Predicate Pred)9448 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
9449 ICmpInst::Predicate Pred) {
9450 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
9451
9452 #ifndef NDEBUG
9453 // Verify an invariant: inverting the predicate should turn a monotonically
9454 // increasing change to a monotonically decreasing one, and vice versa.
9455 if (Result) {
9456 auto ResultSwapped =
9457 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
9458
9459 assert(ResultSwapped.hasValue() && "should be able to analyze both!");
9460 assert(ResultSwapped.getValue() != Result.getValue() &&
9461 "monotonicity should flip as we flip the predicate");
9462 }
9463 #endif
9464
9465 return Result;
9466 }
9467
9468 Optional<ScalarEvolution::MonotonicPredicateType>
getMonotonicPredicateTypeImpl(const SCEVAddRecExpr * LHS,ICmpInst::Predicate Pred)9469 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
9470 ICmpInst::Predicate Pred) {
9471 // A zero step value for LHS means the induction variable is essentially a
9472 // loop invariant value. We don't really depend on the predicate actually
9473 // flipping from false to true (for increasing predicates, and the other way
9474 // around for decreasing predicates), all we care about is that *if* the
9475 // predicate changes then it only changes from false to true.
9476 //
9477 // A zero step value in itself is not very useful, but there may be places
9478 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9479 // as general as possible.
9480
9481 // Only handle LE/LT/GE/GT predicates.
9482 if (!ICmpInst::isRelational(Pred))
9483 return None;
9484
9485 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
9486 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
9487 "Should be greater or less!");
9488
9489 // Check that AR does not wrap.
9490 if (ICmpInst::isUnsigned(Pred)) {
9491 if (!LHS->hasNoUnsignedWrap())
9492 return None;
9493 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9494 } else {
9495 assert(ICmpInst::isSigned(Pred) &&
9496 "Relational predicate is either signed or unsigned!");
9497 if (!LHS->hasNoSignedWrap())
9498 return None;
9499
9500 const SCEV *Step = LHS->getStepRecurrence(*this);
9501
9502 if (isKnownNonNegative(Step))
9503 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9504
9505 if (isKnownNonPositive(Step))
9506 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9507
9508 return None;
9509 }
9510 }
9511
isLoopInvariantPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Loop * L,ICmpInst::Predicate & InvariantPred,const SCEV * & InvariantLHS,const SCEV * & InvariantRHS)9512 bool ScalarEvolution::isLoopInvariantPredicate(
9513 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
9514 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS,
9515 const SCEV *&InvariantRHS) {
9516
9517 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9518 if (!isLoopInvariant(RHS, L)) {
9519 if (!isLoopInvariant(LHS, L))
9520 return false;
9521
9522 std::swap(LHS, RHS);
9523 Pred = ICmpInst::getSwappedPredicate(Pred);
9524 }
9525
9526 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
9527 if (!ArLHS || ArLHS->getLoop() != L)
9528 return false;
9529
9530 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
9531 if (!MonotonicType)
9532 return false;
9533 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
9534 // true as the loop iterates, and the backedge is control dependent on
9535 // "ArLHS `Pred` RHS" == true then we can reason as follows:
9536 //
9537 // * if the predicate was false in the first iteration then the predicate
9538 // is never evaluated again, since the loop exits without taking the
9539 // backedge.
9540 // * if the predicate was true in the first iteration then it will
9541 // continue to be true for all future iterations since it is
9542 // monotonically increasing.
9543 //
9544 // For both the above possibilities, we can replace the loop varying
9545 // predicate with its value on the first iteration of the loop (which is
9546 // loop invariant).
9547 //
9548 // A similar reasoning applies for a monotonically decreasing predicate, by
9549 // replacing true with false and false with true in the above two bullets.
9550 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing;
9551 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
9552
9553 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
9554 return false;
9555
9556 InvariantPred = Pred;
9557 InvariantLHS = ArLHS->getStart();
9558 InvariantRHS = RHS;
9559 return true;
9560 }
9561
isLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Loop * L,const Instruction * Context,const SCEV * MaxIter,ICmpInst::Predicate & InvariantPred,const SCEV * & InvariantLHS,const SCEV * & InvariantRHS)9562 bool ScalarEvolution::isLoopInvariantExitCondDuringFirstIterations(
9563 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
9564 const Instruction *Context, const SCEV *MaxIter,
9565 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS,
9566 const SCEV *&InvariantRHS) {
9567 // Try to prove the following set of facts:
9568 // - The predicate is monotonic.
9569 // - If the check does not fail on the 1st iteration:
9570 // - No overflow will happen during first MaxIter iterations;
9571 // - It will not fail on the MaxIter'th iteration.
9572 // If the check does fail on the 1st iteration, we leave the loop and no
9573 // other checks matter.
9574
9575 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9576 if (!isLoopInvariant(RHS, L)) {
9577 if (!isLoopInvariant(LHS, L))
9578 return false;
9579
9580 std::swap(LHS, RHS);
9581 Pred = ICmpInst::getSwappedPredicate(Pred);
9582 }
9583
9584 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
9585 if (!AR || AR->getLoop() != L)
9586 return false;
9587
9588 // The predicate must be relational (i.e. <, <=, >=, >).
9589 if (!ICmpInst::isRelational(Pred))
9590 return false;
9591
9592 const SCEV *Step = AR->getStepRecurrence(*this);
9593 bool IsStepNonPositive = isKnownNonPositive(Step);
9594 if (!IsStepNonPositive && !isKnownNonNegative(Step))
9595 return false;
9596 bool HasNoSelfWrap = AR->hasNoSelfWrap();
9597 if (!HasNoSelfWrap)
9598 // If num iter has same type as the AddRec, and step is +/- 1, even max
9599 // possible number of iterations is not enough to self-wrap.
9600 if (MaxIter->getType() == AR->getType())
9601 if (Step == getOne(AR->getType()) || Step == getMinusOne(AR->getType()))
9602 HasNoSelfWrap = true;
9603 // Only proceed with non-self-wrapping ARs.
9604 if (!HasNoSelfWrap)
9605 return false;
9606
9607 // Value of IV on suggested last iteration.
9608 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
9609 // Does it still meet the requirement?
9610 if (!isKnownPredicateAt(Pred, Last, RHS, Context))
9611 return false;
9612 // We know that the addrec does not have a self-wrap. To prove that there is
9613 // no signed/unsigned wrap, we need to check that
9614 // Start <= Last for positive step or Start >= Last for negative step. Either
9615 // works for zero step.
9616 ICmpInst::Predicate NoOverflowPred =
9617 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
9618 if (IsStepNonPositive)
9619 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
9620 const SCEV *Start = AR->getStart();
9621 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context))
9622 return false;
9623
9624 // Everything is fine.
9625 InvariantPred = Pred;
9626 InvariantLHS = Start;
9627 InvariantRHS = RHS;
9628 return true;
9629 }
9630
isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9631 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
9632 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
9633 if (HasSameValue(LHS, RHS))
9634 return ICmpInst::isTrueWhenEqual(Pred);
9635
9636 // This code is split out from isKnownPredicate because it is called from
9637 // within isLoopEntryGuardedByCond.
9638
9639 auto CheckRanges =
9640 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) {
9641 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS)
9642 .contains(RangeLHS);
9643 };
9644
9645 // The check at the top of the function catches the case where the values are
9646 // known to be equal.
9647 if (Pred == CmpInst::ICMP_EQ)
9648 return false;
9649
9650 if (Pred == CmpInst::ICMP_NE)
9651 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
9652 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) ||
9653 isKnownNonZero(getMinusSCEV(LHS, RHS));
9654
9655 if (CmpInst::isSigned(Pred))
9656 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
9657
9658 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
9659 }
9660
isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9661 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
9662 const SCEV *LHS,
9663 const SCEV *RHS) {
9664 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
9665 // Return Y via OutY.
9666 auto MatchBinaryAddToConst =
9667 [this](const SCEV *Result, const SCEV *X, APInt &OutY,
9668 SCEV::NoWrapFlags ExpectedFlags) {
9669 const SCEV *NonConstOp, *ConstOp;
9670 SCEV::NoWrapFlags FlagsPresent;
9671
9672 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) ||
9673 !isa<SCEVConstant>(ConstOp) || NonConstOp != X)
9674 return false;
9675
9676 OutY = cast<SCEVConstant>(ConstOp)->getAPInt();
9677 return (FlagsPresent & ExpectedFlags) == ExpectedFlags;
9678 };
9679
9680 APInt C;
9681
9682 switch (Pred) {
9683 default:
9684 break;
9685
9686 case ICmpInst::ICMP_SGE:
9687 std::swap(LHS, RHS);
9688 LLVM_FALLTHROUGH;
9689 case ICmpInst::ICMP_SLE:
9690 // X s<= (X + C)<nsw> if C >= 0
9691 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative())
9692 return true;
9693
9694 // (X + C)<nsw> s<= X if C <= 0
9695 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) &&
9696 !C.isStrictlyPositive())
9697 return true;
9698 break;
9699
9700 case ICmpInst::ICMP_SGT:
9701 std::swap(LHS, RHS);
9702 LLVM_FALLTHROUGH;
9703 case ICmpInst::ICMP_SLT:
9704 // X s< (X + C)<nsw> if C > 0
9705 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) &&
9706 C.isStrictlyPositive())
9707 return true;
9708
9709 // (X + C)<nsw> s< X if C < 0
9710 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative())
9711 return true;
9712 break;
9713
9714 case ICmpInst::ICMP_UGE:
9715 std::swap(LHS, RHS);
9716 LLVM_FALLTHROUGH;
9717 case ICmpInst::ICMP_ULE:
9718 // X u<= (X + C)<nuw> for any C
9719 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW))
9720 return true;
9721 break;
9722
9723 case ICmpInst::ICMP_UGT:
9724 std::swap(LHS, RHS);
9725 LLVM_FALLTHROUGH;
9726 case ICmpInst::ICMP_ULT:
9727 // X u< (X + C)<nuw> if C != 0
9728 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue())
9729 return true;
9730 break;
9731 }
9732
9733 return false;
9734 }
9735
isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9736 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
9737 const SCEV *LHS,
9738 const SCEV *RHS) {
9739 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
9740 return false;
9741
9742 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
9743 // the stack can result in exponential time complexity.
9744 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);
9745
9746 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
9747 //
9748 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
9749 // isKnownPredicate. isKnownPredicate is more powerful, but also more
9750 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
9751 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
9752 // use isKnownPredicate later if needed.
9753 return isKnownNonNegative(RHS) &&
9754 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
9755 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
9756 }
9757
isImpliedViaGuard(const BasicBlock * BB,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9758 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
9759 ICmpInst::Predicate Pred,
9760 const SCEV *LHS, const SCEV *RHS) {
9761 // No need to even try if we know the module has no guards.
9762 if (!HasGuards)
9763 return false;
9764
9765 return any_of(*BB, [&](const Instruction &I) {
9766 using namespace llvm::PatternMatch;
9767
9768 Value *Condition;
9769 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
9770 m_Value(Condition))) &&
9771 isImpliedCond(Pred, LHS, RHS, Condition, false);
9772 });
9773 }
9774
9775 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
9776 /// protected by a conditional between LHS and RHS. This is used to
9777 /// to eliminate casts.
9778 bool
isLoopBackedgeGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9779 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
9780 ICmpInst::Predicate Pred,
9781 const SCEV *LHS, const SCEV *RHS) {
9782 // Interpret a null as meaning no loop, where there is obviously no guard
9783 // (interprocedural conditions notwithstanding).
9784 if (!L) return true;
9785
9786 if (VerifyIR)
9787 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
9788 "This cannot be done on broken IR!");
9789
9790
9791 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
9792 return true;
9793
9794 BasicBlock *Latch = L->getLoopLatch();
9795 if (!Latch)
9796 return false;
9797
9798 BranchInst *LoopContinuePredicate =
9799 dyn_cast<BranchInst>(Latch->getTerminator());
9800 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
9801 isImpliedCond(Pred, LHS, RHS,
9802 LoopContinuePredicate->getCondition(),
9803 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
9804 return true;
9805
9806 // We don't want more than one activation of the following loops on the stack
9807 // -- that can lead to O(n!) time complexity.
9808 if (WalkingBEDominatingConds)
9809 return false;
9810
9811 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);
9812
9813 // See if we can exploit a trip count to prove the predicate.
9814 const auto &BETakenInfo = getBackedgeTakenInfo(L);
9815 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
9816 if (LatchBECount != getCouldNotCompute()) {
9817 // We know that Latch branches back to the loop header exactly
9818 // LatchBECount times. This means the backdege condition at Latch is
9819 // equivalent to "{0,+,1} u< LatchBECount".
9820 Type *Ty = LatchBECount->getType();
9821 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
9822 const SCEV *LoopCounter =
9823 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
9824 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
9825 LatchBECount))
9826 return true;
9827 }
9828
9829 // Check conditions due to any @llvm.assume intrinsics.
9830 for (auto &AssumeVH : AC.assumptions()) {
9831 if (!AssumeVH)
9832 continue;
9833 auto *CI = cast<CallInst>(AssumeVH);
9834 if (!DT.dominates(CI, Latch->getTerminator()))
9835 continue;
9836
9837 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
9838 return true;
9839 }
9840
9841 // If the loop is not reachable from the entry block, we risk running into an
9842 // infinite loop as we walk up into the dom tree. These loops do not matter
9843 // anyway, so we just return a conservative answer when we see them.
9844 if (!DT.isReachableFromEntry(L->getHeader()))
9845 return false;
9846
9847 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
9848 return true;
9849
9850 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
9851 DTN != HeaderDTN; DTN = DTN->getIDom()) {
9852 assert(DTN && "should reach the loop header before reaching the root!");
9853
9854 BasicBlock *BB = DTN->getBlock();
9855 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
9856 return true;
9857
9858 BasicBlock *PBB = BB->getSinglePredecessor();
9859 if (!PBB)
9860 continue;
9861
9862 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
9863 if (!ContinuePredicate || !ContinuePredicate->isConditional())
9864 continue;
9865
9866 Value *Condition = ContinuePredicate->getCondition();
9867
9868 // If we have an edge `E` within the loop body that dominates the only
9869 // latch, the condition guarding `E` also guards the backedge. This
9870 // reasoning works only for loops with a single latch.
9871
9872 BasicBlockEdge DominatingEdge(PBB, BB);
9873 if (DominatingEdge.isSingleEdge()) {
9874 // We're constructively (and conservatively) enumerating edges within the
9875 // loop body that dominate the latch. The dominator tree better agree
9876 // with us on this:
9877 assert(DT.dominates(DominatingEdge, Latch) && "should be!");
9878
9879 if (isImpliedCond(Pred, LHS, RHS, Condition,
9880 BB != ContinuePredicate->getSuccessor(0)))
9881 return true;
9882 }
9883 }
9884
9885 return false;
9886 }
9887
isBasicBlockEntryGuardedByCond(const BasicBlock * BB,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9888 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
9889 ICmpInst::Predicate Pred,
9890 const SCEV *LHS,
9891 const SCEV *RHS) {
9892 if (VerifyIR)
9893 assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
9894 "This cannot be done on broken IR!");
9895
9896 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
9897 return true;
9898
9899 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
9900 // the facts (a >= b && a != b) separately. A typical situation is when the
9901 // non-strict comparison is known from ranges and non-equality is known from
9902 // dominating predicates. If we are proving strict comparison, we always try
9903 // to prove non-equality and non-strict comparison separately.
9904 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
9905 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
9906 bool ProvedNonStrictComparison = false;
9907 bool ProvedNonEquality = false;
9908
9909 if (ProvingStrictComparison) {
9910 ProvedNonStrictComparison =
9911 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS);
9912 ProvedNonEquality =
9913 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS);
9914 if (ProvedNonStrictComparison && ProvedNonEquality)
9915 return true;
9916 }
9917
9918 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
9919 auto ProveViaGuard = [&](const BasicBlock *Block) {
9920 if (isImpliedViaGuard(Block, Pred, LHS, RHS))
9921 return true;
9922 if (ProvingStrictComparison) {
9923 if (!ProvedNonStrictComparison)
9924 ProvedNonStrictComparison =
9925 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS);
9926 if (!ProvedNonEquality)
9927 ProvedNonEquality =
9928 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS);
9929 if (ProvedNonStrictComparison && ProvedNonEquality)
9930 return true;
9931 }
9932 return false;
9933 };
9934
9935 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
9936 auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
9937 const Instruction *Context = &BB->front();
9938 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context))
9939 return true;
9940 if (ProvingStrictComparison) {
9941 if (!ProvedNonStrictComparison)
9942 ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS,
9943 Condition, Inverse, Context);
9944 if (!ProvedNonEquality)
9945 ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS,
9946 Condition, Inverse, Context);
9947 if (ProvedNonStrictComparison && ProvedNonEquality)
9948 return true;
9949 }
9950 return false;
9951 };
9952
9953 // Starting at the block's predecessor, climb up the predecessor chain, as long
9954 // as there are predecessors that can be found that have unique successors
9955 // leading to the original block.
9956 const Loop *ContainingLoop = LI.getLoopFor(BB);
9957 const BasicBlock *PredBB;
9958 if (ContainingLoop && ContainingLoop->getHeader() == BB)
9959 PredBB = ContainingLoop->getLoopPredecessor();
9960 else
9961 PredBB = BB->getSinglePredecessor();
9962 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
9963 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
9964 if (ProveViaGuard(Pair.first))
9965 return true;
9966
9967 const BranchInst *LoopEntryPredicate =
9968 dyn_cast<BranchInst>(Pair.first->getTerminator());
9969 if (!LoopEntryPredicate ||
9970 LoopEntryPredicate->isUnconditional())
9971 continue;
9972
9973 if (ProveViaCond(LoopEntryPredicate->getCondition(),
9974 LoopEntryPredicate->getSuccessor(0) != Pair.second))
9975 return true;
9976 }
9977
9978 // Check conditions due to any @llvm.assume intrinsics.
9979 for (auto &AssumeVH : AC.assumptions()) {
9980 if (!AssumeVH)
9981 continue;
9982 auto *CI = cast<CallInst>(AssumeVH);
9983 if (!DT.dominates(CI, BB))
9984 continue;
9985
9986 if (ProveViaCond(CI->getArgOperand(0), false))
9987 return true;
9988 }
9989
9990 return false;
9991 }
9992
isLoopEntryGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9993 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
9994 ICmpInst::Predicate Pred,
9995 const SCEV *LHS,
9996 const SCEV *RHS) {
9997 // Interpret a null as meaning no loop, where there is obviously no guard
9998 // (interprocedural conditions notwithstanding).
9999 if (!L)
10000 return false;
10001
10002 // Both LHS and RHS must be available at loop entry.
10003 assert(isAvailableAtLoopEntry(LHS, L) &&
10004 "LHS is not available at Loop Entry");
10005 assert(isAvailableAtLoopEntry(RHS, L) &&
10006 "RHS is not available at Loop Entry");
10007 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
10008 }
10009
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Value * FoundCondValue,bool Inverse,const Instruction * Context)10010 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10011 const SCEV *RHS,
10012 const Value *FoundCondValue, bool Inverse,
10013 const Instruction *Context) {
10014 if (!PendingLoopPredicates.insert(FoundCondValue).second)
10015 return false;
10016
10017 auto ClearOnExit =
10018 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
10019
10020 // Recursively handle And and Or conditions.
10021 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
10022 if (BO->getOpcode() == Instruction::And) {
10023 if (!Inverse)
10024 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse,
10025 Context) ||
10026 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse,
10027 Context);
10028 } else if (BO->getOpcode() == Instruction::Or) {
10029 if (Inverse)
10030 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse,
10031 Context) ||
10032 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse,
10033 Context);
10034 }
10035 }
10036
10037 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
10038 if (!ICI) return false;
10039
10040 // Now that we found a conditional branch that dominates the loop or controls
10041 // the loop latch. Check to see if it is the comparison we are looking for.
10042 ICmpInst::Predicate FoundPred;
10043 if (Inverse)
10044 FoundPred = ICI->getInversePredicate();
10045 else
10046 FoundPred = ICI->getPredicate();
10047
10048 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
10049 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
10050
10051 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context);
10052 }
10053
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,ICmpInst::Predicate FoundPred,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10054 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10055 const SCEV *RHS,
10056 ICmpInst::Predicate FoundPred,
10057 const SCEV *FoundLHS, const SCEV *FoundRHS,
10058 const Instruction *Context) {
10059 // Balance the types.
10060 if (getTypeSizeInBits(LHS->getType()) <
10061 getTypeSizeInBits(FoundLHS->getType())) {
10062 // For unsigned and equality predicates, try to prove that both found
10063 // operands fit into narrow unsigned range. If so, try to prove facts in
10064 // narrow types.
10065 if (!CmpInst::isSigned(FoundPred)) {
10066 auto *NarrowType = LHS->getType();
10067 auto *WideType = FoundLHS->getType();
10068 auto BitWidth = getTypeSizeInBits(NarrowType);
10069 const SCEV *MaxValue = getZeroExtendExpr(
10070 getConstant(APInt::getMaxValue(BitWidth)), WideType);
10071 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) &&
10072 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) {
10073 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
10074 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
10075 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
10076 TruncFoundRHS, Context))
10077 return true;
10078 }
10079 }
10080
10081 if (CmpInst::isSigned(Pred)) {
10082 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
10083 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
10084 } else {
10085 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
10086 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
10087 }
10088 } else if (getTypeSizeInBits(LHS->getType()) >
10089 getTypeSizeInBits(FoundLHS->getType())) {
10090 if (CmpInst::isSigned(FoundPred)) {
10091 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
10092 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
10093 } else {
10094 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
10095 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
10096 }
10097 }
10098 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
10099 FoundRHS, Context);
10100 }
10101
isImpliedCondBalancedTypes(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,ICmpInst::Predicate FoundPred,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10102 bool ScalarEvolution::isImpliedCondBalancedTypes(
10103 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10104 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
10105 const Instruction *Context) {
10106 assert(getTypeSizeInBits(LHS->getType()) ==
10107 getTypeSizeInBits(FoundLHS->getType()) &&
10108 "Types should be balanced!");
10109 // Canonicalize the query to match the way instcombine will have
10110 // canonicalized the comparison.
10111 if (SimplifyICmpOperands(Pred, LHS, RHS))
10112 if (LHS == RHS)
10113 return CmpInst::isTrueWhenEqual(Pred);
10114 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
10115 if (FoundLHS == FoundRHS)
10116 return CmpInst::isFalseWhenEqual(FoundPred);
10117
10118 // Check to see if we can make the LHS or RHS match.
10119 if (LHS == FoundRHS || RHS == FoundLHS) {
10120 if (isa<SCEVConstant>(RHS)) {
10121 std::swap(FoundLHS, FoundRHS);
10122 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
10123 } else {
10124 std::swap(LHS, RHS);
10125 Pred = ICmpInst::getSwappedPredicate(Pred);
10126 }
10127 }
10128
10129 // Check whether the found predicate is the same as the desired predicate.
10130 if (FoundPred == Pred)
10131 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10132
10133 // Check whether swapping the found predicate makes it the same as the
10134 // desired predicate.
10135 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
10136 if (isa<SCEVConstant>(RHS))
10137 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context);
10138 else
10139 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS,
10140 LHS, FoundLHS, FoundRHS, Context);
10141 }
10142
10143 // Unsigned comparison is the same as signed comparison when both the operands
10144 // are non-negative.
10145 if (CmpInst::isUnsigned(FoundPred) &&
10146 CmpInst::getSignedPredicate(FoundPred) == Pred &&
10147 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS))
10148 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10149
10150 // Check if we can make progress by sharpening ranges.
10151 if (FoundPred == ICmpInst::ICMP_NE &&
10152 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
10153
10154 const SCEVConstant *C = nullptr;
10155 const SCEV *V = nullptr;
10156
10157 if (isa<SCEVConstant>(FoundLHS)) {
10158 C = cast<SCEVConstant>(FoundLHS);
10159 V = FoundRHS;
10160 } else {
10161 C = cast<SCEVConstant>(FoundRHS);
10162 V = FoundLHS;
10163 }
10164
10165 // The guarding predicate tells us that C != V. If the known range
10166 // of V is [C, t), we can sharpen the range to [C + 1, t). The
10167 // range we consider has to correspond to same signedness as the
10168 // predicate we're interested in folding.
10169
10170 APInt Min = ICmpInst::isSigned(Pred) ?
10171 getSignedRangeMin(V) : getUnsignedRangeMin(V);
10172
10173 if (Min == C->getAPInt()) {
10174 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
10175 // This is true even if (Min + 1) wraps around -- in case of
10176 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
10177
10178 APInt SharperMin = Min + 1;
10179
10180 switch (Pred) {
10181 case ICmpInst::ICMP_SGE:
10182 case ICmpInst::ICMP_UGE:
10183 // We know V `Pred` SharperMin. If this implies LHS `Pred`
10184 // RHS, we're done.
10185 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
10186 Context))
10187 return true;
10188 LLVM_FALLTHROUGH;
10189
10190 case ICmpInst::ICMP_SGT:
10191 case ICmpInst::ICMP_UGT:
10192 // We know from the range information that (V `Pred` Min ||
10193 // V == Min). We know from the guarding condition that !(V
10194 // == Min). This gives us
10195 //
10196 // V `Pred` Min || V == Min && !(V == Min)
10197 // => V `Pred` Min
10198 //
10199 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
10200
10201 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min),
10202 Context))
10203 return true;
10204 break;
10205
10206 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
10207 case ICmpInst::ICMP_SLE:
10208 case ICmpInst::ICMP_ULE:
10209 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10210 LHS, V, getConstant(SharperMin), Context))
10211 return true;
10212 LLVM_FALLTHROUGH;
10213
10214 case ICmpInst::ICMP_SLT:
10215 case ICmpInst::ICMP_ULT:
10216 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10217 LHS, V, getConstant(Min), Context))
10218 return true;
10219 break;
10220
10221 default:
10222 // No change
10223 break;
10224 }
10225 }
10226 }
10227
10228 // Check whether the actual condition is beyond sufficient.
10229 if (FoundPred == ICmpInst::ICMP_EQ)
10230 if (ICmpInst::isTrueWhenEqual(Pred))
10231 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context))
10232 return true;
10233 if (Pred == ICmpInst::ICMP_NE)
10234 if (!ICmpInst::isTrueWhenEqual(FoundPred))
10235 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS,
10236 Context))
10237 return true;
10238
10239 // Otherwise assume the worst.
10240 return false;
10241 }
10242
splitBinaryAdd(const SCEV * Expr,const SCEV * & L,const SCEV * & R,SCEV::NoWrapFlags & Flags)10243 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
10244 const SCEV *&L, const SCEV *&R,
10245 SCEV::NoWrapFlags &Flags) {
10246 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
10247 if (!AE || AE->getNumOperands() != 2)
10248 return false;
10249
10250 L = AE->getOperand(0);
10251 R = AE->getOperand(1);
10252 Flags = AE->getNoWrapFlags();
10253 return true;
10254 }
10255
computeConstantDifference(const SCEV * More,const SCEV * Less)10256 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
10257 const SCEV *Less) {
10258 // We avoid subtracting expressions here because this function is usually
10259 // fairly deep in the call stack (i.e. is called many times).
10260
10261 // X - X = 0.
10262 if (More == Less)
10263 return APInt(getTypeSizeInBits(More->getType()), 0);
10264
10265 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
10266 const auto *LAR = cast<SCEVAddRecExpr>(Less);
10267 const auto *MAR = cast<SCEVAddRecExpr>(More);
10268
10269 if (LAR->getLoop() != MAR->getLoop())
10270 return None;
10271
10272 // We look at affine expressions only; not for correctness but to keep
10273 // getStepRecurrence cheap.
10274 if (!LAR->isAffine() || !MAR->isAffine())
10275 return None;
10276
10277 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
10278 return None;
10279
10280 Less = LAR->getStart();
10281 More = MAR->getStart();
10282
10283 // fall through
10284 }
10285
10286 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
10287 const auto &M = cast<SCEVConstant>(More)->getAPInt();
10288 const auto &L = cast<SCEVConstant>(Less)->getAPInt();
10289 return M - L;
10290 }
10291
10292 SCEV::NoWrapFlags Flags;
10293 const SCEV *LLess = nullptr, *RLess = nullptr;
10294 const SCEV *LMore = nullptr, *RMore = nullptr;
10295 const SCEVConstant *C1 = nullptr, *C2 = nullptr;
10296 // Compare (X + C1) vs X.
10297 if (splitBinaryAdd(Less, LLess, RLess, Flags))
10298 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
10299 if (RLess == More)
10300 return -(C1->getAPInt());
10301
10302 // Compare X vs (X + C2).
10303 if (splitBinaryAdd(More, LMore, RMore, Flags))
10304 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
10305 if (RMore == Less)
10306 return C2->getAPInt();
10307
10308 // Compare (X + C1) vs (X + C2).
10309 if (C1 && C2 && RLess == RMore)
10310 return C2->getAPInt() - C1->getAPInt();
10311
10312 return None;
10313 }
10314
isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10315 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
10316 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10317 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) {
10318 // Try to recognize the following pattern:
10319 //
10320 // FoundRHS = ...
10321 // ...
10322 // loop:
10323 // FoundLHS = {Start,+,W}
10324 // context_bb: // Basic block from the same loop
10325 // known(Pred, FoundLHS, FoundRHS)
10326 //
10327 // If some predicate is known in the context of a loop, it is also known on
10328 // each iteration of this loop, including the first iteration. Therefore, in
10329 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
10330 // prove the original pred using this fact.
10331 if (!Context)
10332 return false;
10333 const BasicBlock *ContextBB = Context->getParent();
10334 // Make sure AR varies in the context block.
10335 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
10336 const Loop *L = AR->getLoop();
10337 // Make sure that context belongs to the loop and executes on 1st iteration
10338 // (if it ever executes at all).
10339 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10340 return false;
10341 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
10342 return false;
10343 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
10344 }
10345
10346 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
10347 const Loop *L = AR->getLoop();
10348 // Make sure that context belongs to the loop and executes on 1st iteration
10349 // (if it ever executes at all).
10350 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10351 return false;
10352 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
10353 return false;
10354 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
10355 }
10356
10357 return false;
10358 }
10359
isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10360 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
10361 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10362 const SCEV *FoundLHS, const SCEV *FoundRHS) {
10363 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
10364 return false;
10365
10366 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10367 if (!AddRecLHS)
10368 return false;
10369
10370 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
10371 if (!AddRecFoundLHS)
10372 return false;
10373
10374 // We'd like to let SCEV reason about control dependencies, so we constrain
10375 // both the inequalities to be about add recurrences on the same loop. This
10376 // way we can use isLoopEntryGuardedByCond later.
10377
10378 const Loop *L = AddRecFoundLHS->getLoop();
10379 if (L != AddRecLHS->getLoop())
10380 return false;
10381
10382 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
10383 //
10384 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
10385 // ... (2)
10386 //
10387 // Informal proof for (2), assuming (1) [*]:
10388 //
10389 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
10390 //
10391 // Then
10392 //
10393 // FoundLHS s< FoundRHS s< INT_MIN - C
10394 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
10395 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
10396 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
10397 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
10398 // <=> FoundLHS + C s< FoundRHS + C
10399 //
10400 // [*]: (1) can be proved by ruling out overflow.
10401 //
10402 // [**]: This can be proved by analyzing all the four possibilities:
10403 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
10404 // (A s>= 0, B s>= 0).
10405 //
10406 // Note:
10407 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
10408 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
10409 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
10410 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
10411 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
10412 // C)".
10413
10414 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
10415 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
10416 if (!LDiff || !RDiff || *LDiff != *RDiff)
10417 return false;
10418
10419 if (LDiff->isMinValue())
10420 return true;
10421
10422 APInt FoundRHSLimit;
10423
10424 if (Pred == CmpInst::ICMP_ULT) {
10425 FoundRHSLimit = -(*RDiff);
10426 } else {
10427 assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
10428 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
10429 }
10430
10431 // Try to prove (1) or (2), as needed.
10432 return isAvailableAtLoopEntry(FoundRHS, L) &&
10433 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
10434 getConstant(FoundRHSLimit));
10435 }
10436
isImpliedViaMerge(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,unsigned Depth)10437 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
10438 const SCEV *LHS, const SCEV *RHS,
10439 const SCEV *FoundLHS,
10440 const SCEV *FoundRHS, unsigned Depth) {
10441 const PHINode *LPhi = nullptr, *RPhi = nullptr;
10442
10443 auto ClearOnExit = make_scope_exit([&]() {
10444 if (LPhi) {
10445 bool Erased = PendingMerges.erase(LPhi);
10446 assert(Erased && "Failed to erase LPhi!");
10447 (void)Erased;
10448 }
10449 if (RPhi) {
10450 bool Erased = PendingMerges.erase(RPhi);
10451 assert(Erased && "Failed to erase RPhi!");
10452 (void)Erased;
10453 }
10454 });
10455
10456 // Find respective Phis and check that they are not being pending.
10457 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
10458 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
10459 if (!PendingMerges.insert(Phi).second)
10460 return false;
10461 LPhi = Phi;
10462 }
10463 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
10464 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
10465 // If we detect a loop of Phi nodes being processed by this method, for
10466 // example:
10467 //
10468 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
10469 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
10470 //
10471 // we don't want to deal with a case that complex, so return conservative
10472 // answer false.
10473 if (!PendingMerges.insert(Phi).second)
10474 return false;
10475 RPhi = Phi;
10476 }
10477
10478 // If none of LHS, RHS is a Phi, nothing to do here.
10479 if (!LPhi && !RPhi)
10480 return false;
10481
10482 // If there is a SCEVUnknown Phi we are interested in, make it left.
10483 if (!LPhi) {
10484 std::swap(LHS, RHS);
10485 std::swap(FoundLHS, FoundRHS);
10486 std::swap(LPhi, RPhi);
10487 Pred = ICmpInst::getSwappedPredicate(Pred);
10488 }
10489
10490 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
10491 const BasicBlock *LBB = LPhi->getParent();
10492 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
10493
10494 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
10495 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
10496 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
10497 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
10498 };
10499
10500 if (RPhi && RPhi->getParent() == LBB) {
10501 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
10502 // If we compare two Phis from the same block, and for each entry block
10503 // the predicate is true for incoming values from this block, then the
10504 // predicate is also true for the Phis.
10505 for (const BasicBlock *IncBB : predecessors(LBB)) {
10506 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
10507 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
10508 if (!ProvedEasily(L, R))
10509 return false;
10510 }
10511 } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
10512 // Case two: RHS is also a Phi from the same basic block, and it is an
10513 // AddRec. It means that there is a loop which has both AddRec and Unknown
10514 // PHIs, for it we can compare incoming values of AddRec from above the loop
10515 // and latch with their respective incoming values of LPhi.
10516 // TODO: Generalize to handle loops with many inputs in a header.
10517 if (LPhi->getNumIncomingValues() != 2) return false;
10518
10519 auto *RLoop = RAR->getLoop();
10520 auto *Predecessor = RLoop->getLoopPredecessor();
10521 assert(Predecessor && "Loop with AddRec with no predecessor?");
10522 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
10523 if (!ProvedEasily(L1, RAR->getStart()))
10524 return false;
10525 auto *Latch = RLoop->getLoopLatch();
10526 assert(Latch && "Loop with AddRec with no latch?");
10527 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
10528 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
10529 return false;
10530 } else {
10531 // In all other cases go over inputs of LHS and compare each of them to RHS,
10532 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
10533 // At this point RHS is either a non-Phi, or it is a Phi from some block
10534 // different from LBB.
10535 for (const BasicBlock *IncBB : predecessors(LBB)) {
10536 // Check that RHS is available in this block.
10537 if (!dominates(RHS, IncBB))
10538 return false;
10539 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
10540 if (!ProvedEasily(L, RHS))
10541 return false;
10542 }
10543 }
10544 return true;
10545 }
10546
isImpliedCondOperands(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * Context)10547 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
10548 const SCEV *LHS, const SCEV *RHS,
10549 const SCEV *FoundLHS,
10550 const SCEV *FoundRHS,
10551 const Instruction *Context) {
10552 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
10553 return true;
10554
10555 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
10556 return true;
10557
10558 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
10559 Context))
10560 return true;
10561
10562 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
10563 FoundLHS, FoundRHS) ||
10564 // ~x < ~y --> x > y
10565 isImpliedCondOperandsHelper(Pred, LHS, RHS,
10566 getNotSCEV(FoundRHS),
10567 getNotSCEV(FoundLHS));
10568 }
10569
10570 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
10571 template <typename MinMaxExprType>
IsMinMaxConsistingOf(const SCEV * MaybeMinMaxExpr,const SCEV * Candidate)10572 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
10573 const SCEV *Candidate) {
10574 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
10575 if (!MinMaxExpr)
10576 return false;
10577
10578 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end();
10579 }
10580
IsKnownPredicateViaAddRecStart(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10581 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
10582 ICmpInst::Predicate Pred,
10583 const SCEV *LHS, const SCEV *RHS) {
10584 // If both sides are affine addrecs for the same loop, with equal
10585 // steps, and we know the recurrences don't wrap, then we only
10586 // need to check the predicate on the starting values.
10587
10588 if (!ICmpInst::isRelational(Pred))
10589 return false;
10590
10591 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
10592 if (!LAR)
10593 return false;
10594 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
10595 if (!RAR)
10596 return false;
10597 if (LAR->getLoop() != RAR->getLoop())
10598 return false;
10599 if (!LAR->isAffine() || !RAR->isAffine())
10600 return false;
10601
10602 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
10603 return false;
10604
10605 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
10606 SCEV::FlagNSW : SCEV::FlagNUW;
10607 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
10608 return false;
10609
10610 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
10611 }
10612
10613 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
10614 /// expression?
IsKnownPredicateViaMinOrMax(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10615 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
10616 ICmpInst::Predicate Pred,
10617 const SCEV *LHS, const SCEV *RHS) {
10618 switch (Pred) {
10619 default:
10620 return false;
10621
10622 case ICmpInst::ICMP_SGE:
10623 std::swap(LHS, RHS);
10624 LLVM_FALLTHROUGH;
10625 case ICmpInst::ICMP_SLE:
10626 return
10627 // min(A, ...) <= A
10628 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
10629 // A <= max(A, ...)
10630 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
10631
10632 case ICmpInst::ICMP_UGE:
10633 std::swap(LHS, RHS);
10634 LLVM_FALLTHROUGH;
10635 case ICmpInst::ICMP_ULE:
10636 return
10637 // min(A, ...) <= A
10638 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
10639 // A <= max(A, ...)
10640 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
10641 }
10642
10643 llvm_unreachable("covered switch fell through?!");
10644 }
10645
isImpliedViaOperations(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,unsigned Depth)10646 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
10647 const SCEV *LHS, const SCEV *RHS,
10648 const SCEV *FoundLHS,
10649 const SCEV *FoundRHS,
10650 unsigned Depth) {
10651 assert(getTypeSizeInBits(LHS->getType()) ==
10652 getTypeSizeInBits(RHS->getType()) &&
10653 "LHS and RHS have different sizes?");
10654 assert(getTypeSizeInBits(FoundLHS->getType()) ==
10655 getTypeSizeInBits(FoundRHS->getType()) &&
10656 "FoundLHS and FoundRHS have different sizes?");
10657 // We want to avoid hurting the compile time with analysis of too big trees.
10658 if (Depth > MaxSCEVOperationsImplicationDepth)
10659 return false;
10660
10661 // We only want to work with GT comparison so far.
10662 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) {
10663 Pred = CmpInst::getSwappedPredicate(Pred);
10664 std::swap(LHS, RHS);
10665 std::swap(FoundLHS, FoundRHS);
10666 }
10667
10668 // For unsigned, try to reduce it to corresponding signed comparison.
10669 if (Pred == ICmpInst::ICMP_UGT)
10670 // We can replace unsigned predicate with its signed counterpart if all
10671 // involved values are non-negative.
10672 // TODO: We could have better support for unsigned.
10673 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
10674 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
10675 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
10676 // use this fact to prove that LHS and RHS are non-negative.
10677 const SCEV *MinusOne = getMinusOne(LHS->getType());
10678 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
10679 FoundRHS) &&
10680 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
10681 FoundRHS))
10682 Pred = ICmpInst::ICMP_SGT;
10683 }
10684
10685 if (Pred != ICmpInst::ICMP_SGT)
10686 return false;
10687
10688 auto GetOpFromSExt = [&](const SCEV *S) {
10689 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
10690 return Ext->getOperand();
10691 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
10692 // the constant in some cases.
10693 return S;
10694 };
10695
10696 // Acquire values from extensions.
10697 auto *OrigLHS = LHS;
10698 auto *OrigFoundLHS = FoundLHS;
10699 LHS = GetOpFromSExt(LHS);
10700 FoundLHS = GetOpFromSExt(FoundLHS);
10701
10702 // Is the SGT predicate can be proved trivially or using the found context.
10703 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
10704 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
10705 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
10706 FoundRHS, Depth + 1);
10707 };
10708
10709 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
10710 // We want to avoid creation of any new non-constant SCEV. Since we are
10711 // going to compare the operands to RHS, we should be certain that we don't
10712 // need any size extensions for this. So let's decline all cases when the
10713 // sizes of types of LHS and RHS do not match.
10714 // TODO: Maybe try to get RHS from sext to catch more cases?
10715 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
10716 return false;
10717
10718 // Should not overflow.
10719 if (!LHSAddExpr->hasNoSignedWrap())
10720 return false;
10721
10722 auto *LL = LHSAddExpr->getOperand(0);
10723 auto *LR = LHSAddExpr->getOperand(1);
10724 auto *MinusOne = getMinusOne(RHS->getType());
10725
10726 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
10727 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
10728 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
10729 };
10730 // Try to prove the following rule:
10731 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
10732 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
10733 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
10734 return true;
10735 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
10736 Value *LL, *LR;
10737 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
10738
10739 using namespace llvm::PatternMatch;
10740
10741 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
10742 // Rules for division.
10743 // We are going to perform some comparisons with Denominator and its
10744 // derivative expressions. In general case, creating a SCEV for it may
10745 // lead to a complex analysis of the entire graph, and in particular it
10746 // can request trip count recalculation for the same loop. This would
10747 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
10748 // this, we only want to create SCEVs that are constants in this section.
10749 // So we bail if Denominator is not a constant.
10750 if (!isa<ConstantInt>(LR))
10751 return false;
10752
10753 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
10754
10755 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
10756 // then a SCEV for the numerator already exists and matches with FoundLHS.
10757 auto *Numerator = getExistingSCEV(LL);
10758 if (!Numerator || Numerator->getType() != FoundLHS->getType())
10759 return false;
10760
10761 // Make sure that the numerator matches with FoundLHS and the denominator
10762 // is positive.
10763 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
10764 return false;
10765
10766 auto *DTy = Denominator->getType();
10767 auto *FRHSTy = FoundRHS->getType();
10768 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
10769 // One of types is a pointer and another one is not. We cannot extend
10770 // them properly to a wider type, so let us just reject this case.
10771 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
10772 // to avoid this check.
10773 return false;
10774
10775 // Given that:
10776 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
10777 auto *WTy = getWiderType(DTy, FRHSTy);
10778 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
10779 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
10780
10781 // Try to prove the following rule:
10782 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
10783 // For example, given that FoundLHS > 2. It means that FoundLHS is at
10784 // least 3. If we divide it by Denominator < 4, we will have at least 1.
10785 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
10786 if (isKnownNonPositive(RHS) &&
10787 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
10788 return true;
10789
10790 // Try to prove the following rule:
10791 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
10792 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
10793 // If we divide it by Denominator > 2, then:
10794 // 1. If FoundLHS is negative, then the result is 0.
10795 // 2. If FoundLHS is non-negative, then the result is non-negative.
10796 // Anyways, the result is non-negative.
10797 auto *MinusOne = getMinusOne(WTy);
10798 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
10799 if (isKnownNegative(RHS) &&
10800 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
10801 return true;
10802 }
10803 }
10804
10805 // If our expression contained SCEVUnknown Phis, and we split it down and now
10806 // need to prove something for them, try to prove the predicate for every
10807 // possible incoming values of those Phis.
10808 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
10809 return true;
10810
10811 return false;
10812 }
10813
isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10814 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
10815 const SCEV *LHS, const SCEV *RHS) {
10816 // zext x u<= sext x, sext x s<= zext x
10817 switch (Pred) {
10818 case ICmpInst::ICMP_SGE:
10819 std::swap(LHS, RHS);
10820 LLVM_FALLTHROUGH;
10821 case ICmpInst::ICMP_SLE: {
10822 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
10823 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
10824 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
10825 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
10826 return true;
10827 break;
10828 }
10829 case ICmpInst::ICMP_UGE:
10830 std::swap(LHS, RHS);
10831 LLVM_FALLTHROUGH;
10832 case ICmpInst::ICMP_ULE: {
10833 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
10834 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
10835 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
10836 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
10837 return true;
10838 break;
10839 }
10840 default:
10841 break;
10842 };
10843 return false;
10844 }
10845
10846 bool
isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10847 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
10848 const SCEV *LHS, const SCEV *RHS) {
10849 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
10850 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
10851 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
10852 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
10853 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
10854 }
10855
10856 bool
isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10857 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
10858 const SCEV *LHS, const SCEV *RHS,
10859 const SCEV *FoundLHS,
10860 const SCEV *FoundRHS) {
10861 switch (Pred) {
10862 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
10863 case ICmpInst::ICMP_EQ:
10864 case ICmpInst::ICMP_NE:
10865 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
10866 return true;
10867 break;
10868 case ICmpInst::ICMP_SLT:
10869 case ICmpInst::ICMP_SLE:
10870 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
10871 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
10872 return true;
10873 break;
10874 case ICmpInst::ICMP_SGT:
10875 case ICmpInst::ICMP_SGE:
10876 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
10877 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
10878 return true;
10879 break;
10880 case ICmpInst::ICMP_ULT:
10881 case ICmpInst::ICMP_ULE:
10882 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
10883 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
10884 return true;
10885 break;
10886 case ICmpInst::ICMP_UGT:
10887 case ICmpInst::ICMP_UGE:
10888 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
10889 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
10890 return true;
10891 break;
10892 }
10893
10894 // Maybe it can be proved via operations?
10895 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
10896 return true;
10897
10898 return false;
10899 }
10900
isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10901 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
10902 const SCEV *LHS,
10903 const SCEV *RHS,
10904 const SCEV *FoundLHS,
10905 const SCEV *FoundRHS) {
10906 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
10907 // The restriction on `FoundRHS` be lifted easily -- it exists only to
10908 // reduce the compile time impact of this optimization.
10909 return false;
10910
10911 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
10912 if (!Addend)
10913 return false;
10914
10915 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
10916
10917 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
10918 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
10919 ConstantRange FoundLHSRange =
10920 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
10921
10922 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
10923 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
10924
10925 // We can also compute the range of values for `LHS` that satisfy the
10926 // consequent, "`LHS` `Pred` `RHS`":
10927 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
10928 ConstantRange SatisfyingLHSRange =
10929 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
10930
10931 // The antecedent implies the consequent if every value of `LHS` that
10932 // satisfies the antecedent also satisfies the consequent.
10933 return SatisfyingLHSRange.contains(LHSRange);
10934 }
10935
doesIVOverflowOnLT(const SCEV * RHS,const SCEV * Stride,bool IsSigned,bool NoWrap)10936 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
10937 bool IsSigned, bool NoWrap) {
10938 assert(isKnownPositive(Stride) && "Positive stride expected!");
10939
10940 if (NoWrap) return false;
10941
10942 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
10943 const SCEV *One = getOne(Stride->getType());
10944
10945 if (IsSigned) {
10946 APInt MaxRHS = getSignedRangeMax(RHS);
10947 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
10948 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
10949
10950 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
10951 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
10952 }
10953
10954 APInt MaxRHS = getUnsignedRangeMax(RHS);
10955 APInt MaxValue = APInt::getMaxValue(BitWidth);
10956 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
10957
10958 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
10959 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
10960 }
10961
doesIVOverflowOnGT(const SCEV * RHS,const SCEV * Stride,bool IsSigned,bool NoWrap)10962 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
10963 bool IsSigned, bool NoWrap) {
10964 if (NoWrap) return false;
10965
10966 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
10967 const SCEV *One = getOne(Stride->getType());
10968
10969 if (IsSigned) {
10970 APInt MinRHS = getSignedRangeMin(RHS);
10971 APInt MinValue = APInt::getSignedMinValue(BitWidth);
10972 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
10973
10974 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
10975 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
10976 }
10977
10978 APInt MinRHS = getUnsignedRangeMin(RHS);
10979 APInt MinValue = APInt::getMinValue(BitWidth);
10980 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
10981
10982 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
10983 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
10984 }
10985
computeBECount(const SCEV * Delta,const SCEV * Step,bool Equality)10986 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
10987 bool Equality) {
10988 const SCEV *One = getOne(Step->getType());
10989 Delta = Equality ? getAddExpr(Delta, Step)
10990 : getAddExpr(Delta, getMinusSCEV(Step, One));
10991 return getUDivExpr(Delta, Step);
10992 }
10993
computeMaxBECountForLT(const SCEV * Start,const SCEV * Stride,const SCEV * End,unsigned BitWidth,bool IsSigned)10994 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
10995 const SCEV *Stride,
10996 const SCEV *End,
10997 unsigned BitWidth,
10998 bool IsSigned) {
10999
11000 assert(!isKnownNonPositive(Stride) &&
11001 "Stride is expected strictly positive!");
11002 // Calculate the maximum backedge count based on the range of values
11003 // permitted by Start, End, and Stride.
11004 const SCEV *MaxBECount;
11005 APInt MinStart =
11006 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
11007
11008 APInt StrideForMaxBECount =
11009 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
11010
11011 // We already know that the stride is positive, so we paper over conservatism
11012 // in our range computation by forcing StrideForMaxBECount to be at least one.
11013 // In theory this is unnecessary, but we expect MaxBECount to be a
11014 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
11015 // is nothing to constant fold it to).
11016 APInt One(BitWidth, 1, IsSigned);
11017 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount);
11018
11019 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
11020 : APInt::getMaxValue(BitWidth);
11021 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
11022
11023 // Although End can be a MAX expression we estimate MaxEnd considering only
11024 // the case End = RHS of the loop termination condition. This is safe because
11025 // in the other case (End - Start) is zero, leading to a zero maximum backedge
11026 // taken count.
11027 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
11028 : APIntOps::umin(getUnsignedRangeMax(End), Limit);
11029
11030 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */,
11031 getConstant(StrideForMaxBECount) /* Step */,
11032 false /* Equality */);
11033
11034 return MaxBECount;
11035 }
11036
11037 ScalarEvolution::ExitLimit
howManyLessThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit,bool AllowPredicates)11038 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
11039 const Loop *L, bool IsSigned,
11040 bool ControlsExit, bool AllowPredicates) {
11041 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11042
11043 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11044 bool PredicatedIV = false;
11045
11046 if (!IV && AllowPredicates) {
11047 // Try to make this an AddRec using runtime tests, in the first X
11048 // iterations of this loop, where X is the SCEV expression found by the
11049 // algorithm below.
11050 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11051 PredicatedIV = true;
11052 }
11053
11054 // Avoid weird loops
11055 if (!IV || IV->getLoop() != L || !IV->isAffine())
11056 return getCouldNotCompute();
11057
11058 bool NoWrap = ControlsExit &&
11059 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
11060
11061 const SCEV *Stride = IV->getStepRecurrence(*this);
11062
11063 bool PositiveStride = isKnownPositive(Stride);
11064
11065 // Avoid negative or zero stride values.
11066 if (!PositiveStride) {
11067 // We can compute the correct backedge taken count for loops with unknown
11068 // strides if we can prove that the loop is not an infinite loop with side
11069 // effects. Here's the loop structure we are trying to handle -
11070 //
11071 // i = start
11072 // do {
11073 // A[i] = i;
11074 // i += s;
11075 // } while (i < end);
11076 //
11077 // The backedge taken count for such loops is evaluated as -
11078 // (max(end, start + stride) - start - 1) /u stride
11079 //
11080 // The additional preconditions that we need to check to prove correctness
11081 // of the above formula is as follows -
11082 //
11083 // a) IV is either nuw or nsw depending upon signedness (indicated by the
11084 // NoWrap flag).
11085 // b) loop is single exit with no side effects.
11086 //
11087 //
11088 // Precondition a) implies that if the stride is negative, this is a single
11089 // trip loop. The backedge taken count formula reduces to zero in this case.
11090 //
11091 // Precondition b) implies that the unknown stride cannot be zero otherwise
11092 // we have UB.
11093 //
11094 // The positive stride case is the same as isKnownPositive(Stride) returning
11095 // true (original behavior of the function).
11096 //
11097 // We want to make sure that the stride is truly unknown as there are edge
11098 // cases where ScalarEvolution propagates no wrap flags to the
11099 // post-increment/decrement IV even though the increment/decrement operation
11100 // itself is wrapping. The computed backedge taken count may be wrong in
11101 // such cases. This is prevented by checking that the stride is not known to
11102 // be either positive or non-positive. For example, no wrap flags are
11103 // propagated to the post-increment IV of this loop with a trip count of 2 -
11104 //
11105 // unsigned char i;
11106 // for(i=127; i<128; i+=129)
11107 // A[i] = i;
11108 //
11109 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
11110 !loopHasNoSideEffects(L))
11111 return getCouldNotCompute();
11112 } else if (!Stride->isOne() &&
11113 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
11114 // Avoid proven overflow cases: this will ensure that the backedge taken
11115 // count will not generate any unsigned overflow. Relaxed no-overflow
11116 // conditions exploit NoWrapFlags, allowing to optimize in presence of
11117 // undefined behaviors like the case of C language.
11118 return getCouldNotCompute();
11119
11120 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
11121 : ICmpInst::ICMP_ULT;
11122 const SCEV *Start = IV->getStart();
11123 const SCEV *End = RHS;
11124 // When the RHS is not invariant, we do not know the end bound of the loop and
11125 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
11126 // calculate the MaxBECount, given the start, stride and max value for the end
11127 // bound of the loop (RHS), and the fact that IV does not overflow (which is
11128 // checked above).
11129 if (!isLoopInvariant(RHS, L)) {
11130 const SCEV *MaxBECount = computeMaxBECountForLT(
11131 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11132 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
11133 false /*MaxOrZero*/, Predicates);
11134 }
11135 // If the backedge is taken at least once, then it will be taken
11136 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
11137 // is the LHS value of the less-than comparison the first time it is evaluated
11138 // and End is the RHS.
11139 const SCEV *BECountIfBackedgeTaken =
11140 computeBECount(getMinusSCEV(End, Start), Stride, false);
11141 // If the loop entry is guarded by the result of the backedge test of the
11142 // first loop iteration, then we know the backedge will be taken at least
11143 // once and so the backedge taken count is as above. If not then we use the
11144 // expression (max(End,Start)-Start)/Stride to describe the backedge count,
11145 // as if the backedge is taken at least once max(End,Start) is End and so the
11146 // result is as above, and if not max(End,Start) is Start so we get a backedge
11147 // count of zero.
11148 const SCEV *BECount;
11149 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
11150 BECount = BECountIfBackedgeTaken;
11151 else {
11152 // If we know that RHS >= Start in the context of loop, then we know that
11153 // max(RHS, Start) = RHS at this point.
11154 if (isLoopEntryGuardedByCond(
11155 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start))
11156 End = RHS;
11157 else
11158 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
11159 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
11160 }
11161
11162 const SCEV *MaxBECount;
11163 bool MaxOrZero = false;
11164 if (isa<SCEVConstant>(BECount))
11165 MaxBECount = BECount;
11166 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) {
11167 // If we know exactly how many times the backedge will be taken if it's
11168 // taken at least once, then the backedge count will either be that or
11169 // zero.
11170 MaxBECount = BECountIfBackedgeTaken;
11171 MaxOrZero = true;
11172 } else {
11173 MaxBECount = computeMaxBECountForLT(
11174 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11175 }
11176
11177 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
11178 !isa<SCEVCouldNotCompute>(BECount))
11179 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
11180
11181 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
11182 }
11183
11184 ScalarEvolution::ExitLimit
howManyGreaterThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit,bool AllowPredicates)11185 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
11186 const Loop *L, bool IsSigned,
11187 bool ControlsExit, bool AllowPredicates) {
11188 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11189 // We handle only IV > Invariant
11190 if (!isLoopInvariant(RHS, L))
11191 return getCouldNotCompute();
11192
11193 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11194 if (!IV && AllowPredicates)
11195 // Try to make this an AddRec using runtime tests, in the first X
11196 // iterations of this loop, where X is the SCEV expression found by the
11197 // algorithm below.
11198 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11199
11200 // Avoid weird loops
11201 if (!IV || IV->getLoop() != L || !IV->isAffine())
11202 return getCouldNotCompute();
11203
11204 bool NoWrap = ControlsExit &&
11205 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
11206
11207 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
11208
11209 // Avoid negative or zero stride values
11210 if (!isKnownPositive(Stride))
11211 return getCouldNotCompute();
11212
11213 // Avoid proven overflow cases: this will ensure that the backedge taken count
11214 // will not generate any unsigned overflow. Relaxed no-overflow conditions
11215 // exploit NoWrapFlags, allowing to optimize in presence of undefined
11216 // behaviors like the case of C language.
11217 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
11218 return getCouldNotCompute();
11219
11220 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
11221 : ICmpInst::ICMP_UGT;
11222
11223 const SCEV *Start = IV->getStart();
11224 const SCEV *End = RHS;
11225 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
11226 // If we know that Start >= RHS in the context of loop, then we know that
11227 // min(RHS, Start) = RHS at this point.
11228 if (isLoopEntryGuardedByCond(
11229 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
11230 End = RHS;
11231 else
11232 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
11233 }
11234
11235 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
11236
11237 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
11238 : getUnsignedRangeMax(Start);
11239
11240 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
11241 : getUnsignedRangeMin(Stride);
11242
11243 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
11244 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
11245 : APInt::getMinValue(BitWidth) + (MinStride - 1);
11246
11247 // Although End can be a MIN expression we estimate MinEnd considering only
11248 // the case End = RHS. This is safe because in the other case (Start - End)
11249 // is zero, leading to a zero maximum backedge taken count.
11250 APInt MinEnd =
11251 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
11252 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
11253
11254 const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
11255 ? BECount
11256 : computeBECount(getConstant(MaxStart - MinEnd),
11257 getConstant(MinStride), false);
11258
11259 if (isa<SCEVCouldNotCompute>(MaxBECount))
11260 MaxBECount = BECount;
11261
11262 return ExitLimit(BECount, MaxBECount, false, Predicates);
11263 }
11264
getNumIterationsInRange(const ConstantRange & Range,ScalarEvolution & SE) const11265 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
11266 ScalarEvolution &SE) const {
11267 if (Range.isFullSet()) // Infinite loop.
11268 return SE.getCouldNotCompute();
11269
11270 // If the start is a non-zero constant, shift the range to simplify things.
11271 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
11272 if (!SC->getValue()->isZero()) {
11273 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
11274 Operands[0] = SE.getZero(SC->getType());
11275 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
11276 getNoWrapFlags(FlagNW));
11277 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
11278 return ShiftedAddRec->getNumIterationsInRange(
11279 Range.subtract(SC->getAPInt()), SE);
11280 // This is strange and shouldn't happen.
11281 return SE.getCouldNotCompute();
11282 }
11283
11284 // The only time we can solve this is when we have all constant indices.
11285 // Otherwise, we cannot determine the overflow conditions.
11286 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
11287 return SE.getCouldNotCompute();
11288
11289 // Okay at this point we know that all elements of the chrec are constants and
11290 // that the start element is zero.
11291
11292 // First check to see if the range contains zero. If not, the first
11293 // iteration exits.
11294 unsigned BitWidth = SE.getTypeSizeInBits(getType());
11295 if (!Range.contains(APInt(BitWidth, 0)))
11296 return SE.getZero(getType());
11297
11298 if (isAffine()) {
11299 // If this is an affine expression then we have this situation:
11300 // Solve {0,+,A} in Range === Ax in Range
11301
11302 // We know that zero is in the range. If A is positive then we know that
11303 // the upper value of the range must be the first possible exit value.
11304 // If A is negative then the lower of the range is the last possible loop
11305 // value. Also note that we already checked for a full range.
11306 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
11307 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
11308
11309 // The exit value should be (End+A)/A.
11310 APInt ExitVal = (End + A).udiv(A);
11311 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
11312
11313 // Evaluate at the exit value. If we really did fall out of the valid
11314 // range, then we computed our trip count, otherwise wrap around or other
11315 // things must have happened.
11316 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
11317 if (Range.contains(Val->getValue()))
11318 return SE.getCouldNotCompute(); // Something strange happened
11319
11320 // Ensure that the previous value is in the range. This is a sanity check.
11321 assert(Range.contains(
11322 EvaluateConstantChrecAtConstant(this,
11323 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
11324 "Linear scev computation is off in a bad way!");
11325 return SE.getConstant(ExitValue);
11326 }
11327
11328 if (isQuadratic()) {
11329 if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
11330 return SE.getConstant(S.getValue());
11331 }
11332
11333 return SE.getCouldNotCompute();
11334 }
11335
11336 const SCEVAddRecExpr *
getPostIncExpr(ScalarEvolution & SE) const11337 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
11338 assert(getNumOperands() > 1 && "AddRec with zero step?");
11339 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
11340 // but in this case we cannot guarantee that the value returned will be an
11341 // AddRec because SCEV does not have a fixed point where it stops
11342 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
11343 // may happen if we reach arithmetic depth limit while simplifying. So we
11344 // construct the returned value explicitly.
11345 SmallVector<const SCEV *, 3> Ops;
11346 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
11347 // (this + Step) is {A+B,+,B+C,+...,+,N}.
11348 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
11349 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
11350 // We know that the last operand is not a constant zero (otherwise it would
11351 // have been popped out earlier). This guarantees us that if the result has
11352 // the same last operand, then it will also not be popped out, meaning that
11353 // the returned value will be an AddRec.
11354 const SCEV *Last = getOperand(getNumOperands() - 1);
11355 assert(!Last->isZero() && "Recurrency with zero step?");
11356 Ops.push_back(Last);
11357 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
11358 SCEV::FlagAnyWrap));
11359 }
11360
11361 // Return true when S contains at least an undef value.
containsUndefs(const SCEV * S)11362 static inline bool containsUndefs(const SCEV *S) {
11363 return SCEVExprContains(S, [](const SCEV *S) {
11364 if (const auto *SU = dyn_cast<SCEVUnknown>(S))
11365 return isa<UndefValue>(SU->getValue());
11366 return false;
11367 });
11368 }
11369
11370 namespace {
11371
11372 // Collect all steps of SCEV expressions.
11373 struct SCEVCollectStrides {
11374 ScalarEvolution &SE;
11375 SmallVectorImpl<const SCEV *> &Strides;
11376
SCEVCollectStrides__anonb3a128373011::SCEVCollectStrides11377 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
11378 : SE(SE), Strides(S) {}
11379
follow__anonb3a128373011::SCEVCollectStrides11380 bool follow(const SCEV *S) {
11381 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
11382 Strides.push_back(AR->getStepRecurrence(SE));
11383 return true;
11384 }
11385
isDone__anonb3a128373011::SCEVCollectStrides11386 bool isDone() const { return false; }
11387 };
11388
11389 // Collect all SCEVUnknown and SCEVMulExpr expressions.
11390 struct SCEVCollectTerms {
11391 SmallVectorImpl<const SCEV *> &Terms;
11392
SCEVCollectTerms__anonb3a128373011::SCEVCollectTerms11393 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {}
11394
follow__anonb3a128373011::SCEVCollectTerms11395 bool follow(const SCEV *S) {
11396 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
11397 isa<SCEVSignExtendExpr>(S)) {
11398 if (!containsUndefs(S))
11399 Terms.push_back(S);
11400
11401 // Stop recursion: once we collected a term, do not walk its operands.
11402 return false;
11403 }
11404
11405 // Keep looking.
11406 return true;
11407 }
11408
isDone__anonb3a128373011::SCEVCollectTerms11409 bool isDone() const { return false; }
11410 };
11411
11412 // Check if a SCEV contains an AddRecExpr.
11413 struct SCEVHasAddRec {
11414 bool &ContainsAddRec;
11415
SCEVHasAddRec__anonb3a128373011::SCEVHasAddRec11416 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
11417 ContainsAddRec = false;
11418 }
11419
follow__anonb3a128373011::SCEVHasAddRec11420 bool follow(const SCEV *S) {
11421 if (isa<SCEVAddRecExpr>(S)) {
11422 ContainsAddRec = true;
11423
11424 // Stop recursion: once we collected a term, do not walk its operands.
11425 return false;
11426 }
11427
11428 // Keep looking.
11429 return true;
11430 }
11431
isDone__anonb3a128373011::SCEVHasAddRec11432 bool isDone() const { return false; }
11433 };
11434
11435 // Find factors that are multiplied with an expression that (possibly as a
11436 // subexpression) contains an AddRecExpr. In the expression:
11437 //
11438 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
11439 //
11440 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
11441 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
11442 // parameters as they form a product with an induction variable.
11443 //
11444 // This collector expects all array size parameters to be in the same MulExpr.
11445 // It might be necessary to later add support for collecting parameters that are
11446 // spread over different nested MulExpr.
11447 struct SCEVCollectAddRecMultiplies {
11448 SmallVectorImpl<const SCEV *> &Terms;
11449 ScalarEvolution &SE;
11450
SCEVCollectAddRecMultiplies__anonb3a128373011::SCEVCollectAddRecMultiplies11451 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE)
11452 : Terms(T), SE(SE) {}
11453
follow__anonb3a128373011::SCEVCollectAddRecMultiplies11454 bool follow(const SCEV *S) {
11455 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
11456 bool HasAddRec = false;
11457 SmallVector<const SCEV *, 0> Operands;
11458 for (auto Op : Mul->operands()) {
11459 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
11460 if (Unknown && !isa<CallInst>(Unknown->getValue())) {
11461 Operands.push_back(Op);
11462 } else if (Unknown) {
11463 HasAddRec = true;
11464 } else {
11465 bool ContainsAddRec = false;
11466 SCEVHasAddRec ContiansAddRec(ContainsAddRec);
11467 visitAll(Op, ContiansAddRec);
11468 HasAddRec |= ContainsAddRec;
11469 }
11470 }
11471 if (Operands.size() == 0)
11472 return true;
11473
11474 if (!HasAddRec)
11475 return false;
11476
11477 Terms.push_back(SE.getMulExpr(Operands));
11478 // Stop recursion: once we collected a term, do not walk its operands.
11479 return false;
11480 }
11481
11482 // Keep looking.
11483 return true;
11484 }
11485
isDone__anonb3a128373011::SCEVCollectAddRecMultiplies11486 bool isDone() const { return false; }
11487 };
11488
11489 } // end anonymous namespace
11490
11491 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
11492 /// two places:
11493 /// 1) The strides of AddRec expressions.
11494 /// 2) Unknowns that are multiplied with AddRec expressions.
collectParametricTerms(const SCEV * Expr,SmallVectorImpl<const SCEV * > & Terms)11495 void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
11496 SmallVectorImpl<const SCEV *> &Terms) {
11497 SmallVector<const SCEV *, 4> Strides;
11498 SCEVCollectStrides StrideCollector(*this, Strides);
11499 visitAll(Expr, StrideCollector);
11500
11501 LLVM_DEBUG({
11502 dbgs() << "Strides:\n";
11503 for (const SCEV *S : Strides)
11504 dbgs() << *S << "\n";
11505 });
11506
11507 for (const SCEV *S : Strides) {
11508 SCEVCollectTerms TermCollector(Terms);
11509 visitAll(S, TermCollector);
11510 }
11511
11512 LLVM_DEBUG({
11513 dbgs() << "Terms:\n";
11514 for (const SCEV *T : Terms)
11515 dbgs() << *T << "\n";
11516 });
11517
11518 SCEVCollectAddRecMultiplies MulCollector(Terms, *this);
11519 visitAll(Expr, MulCollector);
11520 }
11521
findArrayDimensionsRec(ScalarEvolution & SE,SmallVectorImpl<const SCEV * > & Terms,SmallVectorImpl<const SCEV * > & Sizes)11522 static bool findArrayDimensionsRec(ScalarEvolution &SE,
11523 SmallVectorImpl<const SCEV *> &Terms,
11524 SmallVectorImpl<const SCEV *> &Sizes) {
11525 int Last = Terms.size() - 1;
11526 const SCEV *Step = Terms[Last];
11527
11528 // End of recursion.
11529 if (Last == 0) {
11530 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
11531 SmallVector<const SCEV *, 2> Qs;
11532 for (const SCEV *Op : M->operands())
11533 if (!isa<SCEVConstant>(Op))
11534 Qs.push_back(Op);
11535
11536 Step = SE.getMulExpr(Qs);
11537 }
11538
11539 Sizes.push_back(Step);
11540 return true;
11541 }
11542
11543 for (const SCEV *&Term : Terms) {
11544 // Normalize the terms before the next call to findArrayDimensionsRec.
11545 const SCEV *Q, *R;
11546 SCEVDivision::divide(SE, Term, Step, &Q, &R);
11547
11548 // Bail out when GCD does not evenly divide one of the terms.
11549 if (!R->isZero())
11550 return false;
11551
11552 Term = Q;
11553 }
11554
11555 // Remove all SCEVConstants.
11556 Terms.erase(
11557 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }),
11558 Terms.end());
11559
11560 if (Terms.size() > 0)
11561 if (!findArrayDimensionsRec(SE, Terms, Sizes))
11562 return false;
11563
11564 Sizes.push_back(Step);
11565 return true;
11566 }
11567
11568 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
containsParameters(SmallVectorImpl<const SCEV * > & Terms)11569 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
11570 for (const SCEV *T : Terms)
11571 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); }))
11572 return true;
11573
11574 return false;
11575 }
11576
11577 // Return the number of product terms in S.
numberOfTerms(const SCEV * S)11578 static inline int numberOfTerms(const SCEV *S) {
11579 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
11580 return Expr->getNumOperands();
11581 return 1;
11582 }
11583
removeConstantFactors(ScalarEvolution & SE,const SCEV * T)11584 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
11585 if (isa<SCEVConstant>(T))
11586 return nullptr;
11587
11588 if (isa<SCEVUnknown>(T))
11589 return T;
11590
11591 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
11592 SmallVector<const SCEV *, 2> Factors;
11593 for (const SCEV *Op : M->operands())
11594 if (!isa<SCEVConstant>(Op))
11595 Factors.push_back(Op);
11596
11597 return SE.getMulExpr(Factors);
11598 }
11599
11600 return T;
11601 }
11602
11603 /// Return the size of an element read or written by Inst.
getElementSize(Instruction * Inst)11604 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
11605 Type *Ty;
11606 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
11607 Ty = Store->getValueOperand()->getType();
11608 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
11609 Ty = Load->getType();
11610 else
11611 return nullptr;
11612
11613 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
11614 return getSizeOfExpr(ETy, Ty);
11615 }
11616
findArrayDimensions(SmallVectorImpl<const SCEV * > & Terms,SmallVectorImpl<const SCEV * > & Sizes,const SCEV * ElementSize)11617 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
11618 SmallVectorImpl<const SCEV *> &Sizes,
11619 const SCEV *ElementSize) {
11620 if (Terms.size() < 1 || !ElementSize)
11621 return;
11622
11623 // Early return when Terms do not contain parameters: we do not delinearize
11624 // non parametric SCEVs.
11625 if (!containsParameters(Terms))
11626 return;
11627
11628 LLVM_DEBUG({
11629 dbgs() << "Terms:\n";
11630 for (const SCEV *T : Terms)
11631 dbgs() << *T << "\n";
11632 });
11633
11634 // Remove duplicates.
11635 array_pod_sort(Terms.begin(), Terms.end());
11636 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
11637
11638 // Put larger terms first.
11639 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
11640 return numberOfTerms(LHS) > numberOfTerms(RHS);
11641 });
11642
11643 // Try to divide all terms by the element size. If term is not divisible by
11644 // element size, proceed with the original term.
11645 for (const SCEV *&Term : Terms) {
11646 const SCEV *Q, *R;
11647 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
11648 if (!Q->isZero())
11649 Term = Q;
11650 }
11651
11652 SmallVector<const SCEV *, 4> NewTerms;
11653
11654 // Remove constant factors.
11655 for (const SCEV *T : Terms)
11656 if (const SCEV *NewT = removeConstantFactors(*this, T))
11657 NewTerms.push_back(NewT);
11658
11659 LLVM_DEBUG({
11660 dbgs() << "Terms after sorting:\n";
11661 for (const SCEV *T : NewTerms)
11662 dbgs() << *T << "\n";
11663 });
11664
11665 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
11666 Sizes.clear();
11667 return;
11668 }
11669
11670 // The last element to be pushed into Sizes is the size of an element.
11671 Sizes.push_back(ElementSize);
11672
11673 LLVM_DEBUG({
11674 dbgs() << "Sizes:\n";
11675 for (const SCEV *S : Sizes)
11676 dbgs() << *S << "\n";
11677 });
11678 }
11679
computeAccessFunctions(const SCEV * Expr,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<const SCEV * > & Sizes)11680 void ScalarEvolution::computeAccessFunctions(
11681 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
11682 SmallVectorImpl<const SCEV *> &Sizes) {
11683 // Early exit in case this SCEV is not an affine multivariate function.
11684 if (Sizes.empty())
11685 return;
11686
11687 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr))
11688 if (!AR->isAffine())
11689 return;
11690
11691 const SCEV *Res = Expr;
11692 int Last = Sizes.size() - 1;
11693 for (int i = Last; i >= 0; i--) {
11694 const SCEV *Q, *R;
11695 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
11696
11697 LLVM_DEBUG({
11698 dbgs() << "Res: " << *Res << "\n";
11699 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
11700 dbgs() << "Res divided by Sizes[i]:\n";
11701 dbgs() << "Quotient: " << *Q << "\n";
11702 dbgs() << "Remainder: " << *R << "\n";
11703 });
11704
11705 Res = Q;
11706
11707 // Do not record the last subscript corresponding to the size of elements in
11708 // the array.
11709 if (i == Last) {
11710
11711 // Bail out if the remainder is too complex.
11712 if (isa<SCEVAddRecExpr>(R)) {
11713 Subscripts.clear();
11714 Sizes.clear();
11715 return;
11716 }
11717
11718 continue;
11719 }
11720
11721 // Record the access function for the current subscript.
11722 Subscripts.push_back(R);
11723 }
11724
11725 // Also push in last position the remainder of the last division: it will be
11726 // the access function of the innermost dimension.
11727 Subscripts.push_back(Res);
11728
11729 std::reverse(Subscripts.begin(), Subscripts.end());
11730
11731 LLVM_DEBUG({
11732 dbgs() << "Subscripts:\n";
11733 for (const SCEV *S : Subscripts)
11734 dbgs() << *S << "\n";
11735 });
11736 }
11737
11738 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
11739 /// sizes of an array access. Returns the remainder of the delinearization that
11740 /// is the offset start of the array. The SCEV->delinearize algorithm computes
11741 /// the multiples of SCEV coefficients: that is a pattern matching of sub
11742 /// expressions in the stride and base of a SCEV corresponding to the
11743 /// computation of a GCD (greatest common divisor) of base and stride. When
11744 /// SCEV->delinearize fails, it returns the SCEV unchanged.
11745 ///
11746 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
11747 ///
11748 /// void foo(long n, long m, long o, double A[n][m][o]) {
11749 ///
11750 /// for (long i = 0; i < n; i++)
11751 /// for (long j = 0; j < m; j++)
11752 /// for (long k = 0; k < o; k++)
11753 /// A[i][j][k] = 1.0;
11754 /// }
11755 ///
11756 /// the delinearization input is the following AddRec SCEV:
11757 ///
11758 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
11759 ///
11760 /// From this SCEV, we are able to say that the base offset of the access is %A
11761 /// because it appears as an offset that does not divide any of the strides in
11762 /// the loops:
11763 ///
11764 /// CHECK: Base offset: %A
11765 ///
11766 /// and then SCEV->delinearize determines the size of some of the dimensions of
11767 /// the array as these are the multiples by which the strides are happening:
11768 ///
11769 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
11770 ///
11771 /// Note that the outermost dimension remains of UnknownSize because there are
11772 /// no strides that would help identifying the size of the last dimension: when
11773 /// the array has been statically allocated, one could compute the size of that
11774 /// dimension by dividing the overall size of the array by the size of the known
11775 /// dimensions: %m * %o * 8.
11776 ///
11777 /// Finally delinearize provides the access functions for the array reference
11778 /// that does correspond to A[i][j][k] of the above C testcase:
11779 ///
11780 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
11781 ///
11782 /// The testcases are checking the output of a function pass:
11783 /// DelinearizationPass that walks through all loads and stores of a function
11784 /// asking for the SCEV of the memory access with respect to all enclosing
11785 /// loops, calling SCEV->delinearize on that and printing the results.
delinearize(const SCEV * Expr,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<const SCEV * > & Sizes,const SCEV * ElementSize)11786 void ScalarEvolution::delinearize(const SCEV *Expr,
11787 SmallVectorImpl<const SCEV *> &Subscripts,
11788 SmallVectorImpl<const SCEV *> &Sizes,
11789 const SCEV *ElementSize) {
11790 // First step: collect parametric terms.
11791 SmallVector<const SCEV *, 4> Terms;
11792 collectParametricTerms(Expr, Terms);
11793
11794 if (Terms.empty())
11795 return;
11796
11797 // Second step: find subscript sizes.
11798 findArrayDimensions(Terms, Sizes, ElementSize);
11799
11800 if (Sizes.empty())
11801 return;
11802
11803 // Third step: compute the access functions for each subscript.
11804 computeAccessFunctions(Expr, Subscripts, Sizes);
11805
11806 if (Subscripts.empty())
11807 return;
11808
11809 LLVM_DEBUG({
11810 dbgs() << "succeeded to delinearize " << *Expr << "\n";
11811 dbgs() << "ArrayDecl[UnknownSize]";
11812 for (const SCEV *S : Sizes)
11813 dbgs() << "[" << *S << "]";
11814
11815 dbgs() << "\nArrayRef";
11816 for (const SCEV *S : Subscripts)
11817 dbgs() << "[" << *S << "]";
11818 dbgs() << "\n";
11819 });
11820 }
11821
getIndexExpressionsFromGEP(const GetElementPtrInst * GEP,SmallVectorImpl<const SCEV * > & Subscripts,SmallVectorImpl<int> & Sizes)11822 bool ScalarEvolution::getIndexExpressionsFromGEP(
11823 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts,
11824 SmallVectorImpl<int> &Sizes) {
11825 assert(Subscripts.empty() && Sizes.empty() &&
11826 "Expected output lists to be empty on entry to this function.");
11827 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP");
11828 Type *Ty = GEP->getPointerOperandType();
11829 bool DroppedFirstDim = false;
11830 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
11831 const SCEV *Expr = getSCEV(GEP->getOperand(i));
11832 if (i == 1) {
11833 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
11834 Ty = PtrTy->getElementType();
11835 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
11836 Ty = ArrayTy->getElementType();
11837 } else {
11838 Subscripts.clear();
11839 Sizes.clear();
11840 return false;
11841 }
11842 if (auto *Const = dyn_cast<SCEVConstant>(Expr))
11843 if (Const->getValue()->isZero()) {
11844 DroppedFirstDim = true;
11845 continue;
11846 }
11847 Subscripts.push_back(Expr);
11848 continue;
11849 }
11850
11851 auto *ArrayTy = dyn_cast<ArrayType>(Ty);
11852 if (!ArrayTy) {
11853 Subscripts.clear();
11854 Sizes.clear();
11855 return false;
11856 }
11857
11858 Subscripts.push_back(Expr);
11859 if (!(DroppedFirstDim && i == 2))
11860 Sizes.push_back(ArrayTy->getNumElements());
11861
11862 Ty = ArrayTy->getElementType();
11863 }
11864 return !Subscripts.empty();
11865 }
11866
11867 //===----------------------------------------------------------------------===//
11868 // SCEVCallbackVH Class Implementation
11869 //===----------------------------------------------------------------------===//
11870
deleted()11871 void ScalarEvolution::SCEVCallbackVH::deleted() {
11872 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
11873 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
11874 SE->ConstantEvolutionLoopExitValue.erase(PN);
11875 SE->eraseValueFromMap(getValPtr());
11876 // this now dangles!
11877 }
11878
allUsesReplacedWith(Value * V)11879 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
11880 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
11881
11882 // Forget all the expressions associated with users of the old value,
11883 // so that future queries will recompute the expressions using the new
11884 // value.
11885 Value *Old = getValPtr();
11886 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
11887 SmallPtrSet<User *, 8> Visited;
11888 while (!Worklist.empty()) {
11889 User *U = Worklist.pop_back_val();
11890 // Deleting the Old value will cause this to dangle. Postpone
11891 // that until everything else is done.
11892 if (U == Old)
11893 continue;
11894 if (!Visited.insert(U).second)
11895 continue;
11896 if (PHINode *PN = dyn_cast<PHINode>(U))
11897 SE->ConstantEvolutionLoopExitValue.erase(PN);
11898 SE->eraseValueFromMap(U);
11899 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
11900 }
11901 // Delete the Old value.
11902 if (PHINode *PN = dyn_cast<PHINode>(Old))
11903 SE->ConstantEvolutionLoopExitValue.erase(PN);
11904 SE->eraseValueFromMap(Old);
11905 // this now dangles!
11906 }
11907
SCEVCallbackVH(Value * V,ScalarEvolution * se)11908 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
11909 : CallbackVH(V), SE(se) {}
11910
11911 //===----------------------------------------------------------------------===//
11912 // ScalarEvolution Class Implementation
11913 //===----------------------------------------------------------------------===//
11914
ScalarEvolution(Function & F,TargetLibraryInfo & TLI,AssumptionCache & AC,DominatorTree & DT,LoopInfo & LI)11915 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
11916 AssumptionCache &AC, DominatorTree &DT,
11917 LoopInfo &LI)
11918 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
11919 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
11920 LoopDispositions(64), BlockDispositions(64) {
11921 // To use guards for proving predicates, we need to scan every instruction in
11922 // relevant basic blocks, and not just terminators. Doing this is a waste of
11923 // time if the IR does not actually contain any calls to
11924 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
11925 //
11926 // This pessimizes the case where a pass that preserves ScalarEvolution wants
11927 // to _add_ guards to the module when there weren't any before, and wants
11928 // ScalarEvolution to optimize based on those guards. For now we prefer to be
11929 // efficient in lieu of being smart in that rather obscure case.
11930
11931 auto *GuardDecl = F.getParent()->getFunction(
11932 Intrinsic::getName(Intrinsic::experimental_guard));
11933 HasGuards = GuardDecl && !GuardDecl->use_empty();
11934 }
11935
ScalarEvolution(ScalarEvolution && Arg)11936 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
11937 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
11938 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
11939 ValueExprMap(std::move(Arg.ValueExprMap)),
11940 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
11941 PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
11942 PendingMerges(std::move(Arg.PendingMerges)),
11943 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
11944 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
11945 PredicatedBackedgeTakenCounts(
11946 std::move(Arg.PredicatedBackedgeTakenCounts)),
11947 ConstantEvolutionLoopExitValue(
11948 std::move(Arg.ConstantEvolutionLoopExitValue)),
11949 ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
11950 LoopDispositions(std::move(Arg.LoopDispositions)),
11951 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
11952 BlockDispositions(std::move(Arg.BlockDispositions)),
11953 UnsignedRanges(std::move(Arg.UnsignedRanges)),
11954 SignedRanges(std::move(Arg.SignedRanges)),
11955 UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
11956 UniquePreds(std::move(Arg.UniquePreds)),
11957 SCEVAllocator(std::move(Arg.SCEVAllocator)),
11958 LoopUsers(std::move(Arg.LoopUsers)),
11959 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
11960 FirstUnknown(Arg.FirstUnknown) {
11961 Arg.FirstUnknown = nullptr;
11962 }
11963
~ScalarEvolution()11964 ScalarEvolution::~ScalarEvolution() {
11965 // Iterate through all the SCEVUnknown instances and call their
11966 // destructors, so that they release their references to their values.
11967 for (SCEVUnknown *U = FirstUnknown; U;) {
11968 SCEVUnknown *Tmp = U;
11969 U = U->Next;
11970 Tmp->~SCEVUnknown();
11971 }
11972 FirstUnknown = nullptr;
11973
11974 ExprValueMap.clear();
11975 ValueExprMap.clear();
11976 HasRecMap.clear();
11977
11978 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
11979 // that a loop had multiple computable exits.
11980 for (auto &BTCI : BackedgeTakenCounts)
11981 BTCI.second.clear();
11982 for (auto &BTCI : PredicatedBackedgeTakenCounts)
11983 BTCI.second.clear();
11984
11985 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
11986 assert(PendingPhiRanges.empty() && "getRangeRef garbage");
11987 assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
11988 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
11989 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
11990 }
11991
hasLoopInvariantBackedgeTakenCount(const Loop * L)11992 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
11993 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
11994 }
11995
PrintLoopInfo(raw_ostream & OS,ScalarEvolution * SE,const Loop * L)11996 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
11997 const Loop *L) {
11998 // Print all inner loops first
11999 for (Loop *I : *L)
12000 PrintLoopInfo(OS, SE, I);
12001
12002 OS << "Loop ";
12003 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12004 OS << ": ";
12005
12006 SmallVector<BasicBlock *, 8> ExitingBlocks;
12007 L->getExitingBlocks(ExitingBlocks);
12008 if (ExitingBlocks.size() != 1)
12009 OS << "<multiple exits> ";
12010
12011 if (SE->hasLoopInvariantBackedgeTakenCount(L))
12012 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
12013 else
12014 OS << "Unpredictable backedge-taken count.\n";
12015
12016 if (ExitingBlocks.size() > 1)
12017 for (BasicBlock *ExitingBlock : ExitingBlocks) {
12018 OS << " exit count for " << ExitingBlock->getName() << ": "
12019 << *SE->getExitCount(L, ExitingBlock) << "\n";
12020 }
12021
12022 OS << "Loop ";
12023 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12024 OS << ": ";
12025
12026 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
12027 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
12028 if (SE->isBackedgeTakenCountMaxOrZero(L))
12029 OS << ", actual taken count either this or zero.";
12030 } else {
12031 OS << "Unpredictable max backedge-taken count. ";
12032 }
12033
12034 OS << "\n"
12035 "Loop ";
12036 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12037 OS << ": ";
12038
12039 SCEVUnionPredicate Pred;
12040 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred);
12041 if (!isa<SCEVCouldNotCompute>(PBT)) {
12042 OS << "Predicated backedge-taken count is " << *PBT << "\n";
12043 OS << " Predicates:\n";
12044 Pred.print(OS, 4);
12045 } else {
12046 OS << "Unpredictable predicated backedge-taken count. ";
12047 }
12048 OS << "\n";
12049
12050 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
12051 OS << "Loop ";
12052 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12053 OS << ": ";
12054 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
12055 }
12056 }
12057
loopDispositionToStr(ScalarEvolution::LoopDisposition LD)12058 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
12059 switch (LD) {
12060 case ScalarEvolution::LoopVariant:
12061 return "Variant";
12062 case ScalarEvolution::LoopInvariant:
12063 return "Invariant";
12064 case ScalarEvolution::LoopComputable:
12065 return "Computable";
12066 }
12067 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
12068 }
12069
print(raw_ostream & OS) const12070 void ScalarEvolution::print(raw_ostream &OS) const {
12071 // ScalarEvolution's implementation of the print method is to print
12072 // out SCEV values of all instructions that are interesting. Doing
12073 // this potentially causes it to create new SCEV objects though,
12074 // which technically conflicts with the const qualifier. This isn't
12075 // observable from outside the class though, so casting away the
12076 // const isn't dangerous.
12077 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12078
12079 if (ClassifyExpressions) {
12080 OS << "Classifying expressions for: ";
12081 F.printAsOperand(OS, /*PrintType=*/false);
12082 OS << "\n";
12083 for (Instruction &I : instructions(F))
12084 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
12085 OS << I << '\n';
12086 OS << " --> ";
12087 const SCEV *SV = SE.getSCEV(&I);
12088 SV->print(OS);
12089 if (!isa<SCEVCouldNotCompute>(SV)) {
12090 OS << " U: ";
12091 SE.getUnsignedRange(SV).print(OS);
12092 OS << " S: ";
12093 SE.getSignedRange(SV).print(OS);
12094 }
12095
12096 const Loop *L = LI.getLoopFor(I.getParent());
12097
12098 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
12099 if (AtUse != SV) {
12100 OS << " --> ";
12101 AtUse->print(OS);
12102 if (!isa<SCEVCouldNotCompute>(AtUse)) {
12103 OS << " U: ";
12104 SE.getUnsignedRange(AtUse).print(OS);
12105 OS << " S: ";
12106 SE.getSignedRange(AtUse).print(OS);
12107 }
12108 }
12109
12110 if (L) {
12111 OS << "\t\t" "Exits: ";
12112 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
12113 if (!SE.isLoopInvariant(ExitValue, L)) {
12114 OS << "<<Unknown>>";
12115 } else {
12116 OS << *ExitValue;
12117 }
12118
12119 bool First = true;
12120 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
12121 if (First) {
12122 OS << "\t\t" "LoopDispositions: { ";
12123 First = false;
12124 } else {
12125 OS << ", ";
12126 }
12127
12128 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12129 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
12130 }
12131
12132 for (auto *InnerL : depth_first(L)) {
12133 if (InnerL == L)
12134 continue;
12135 if (First) {
12136 OS << "\t\t" "LoopDispositions: { ";
12137 First = false;
12138 } else {
12139 OS << ", ";
12140 }
12141
12142 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12143 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
12144 }
12145
12146 OS << " }";
12147 }
12148
12149 OS << "\n";
12150 }
12151 }
12152
12153 OS << "Determining loop execution counts for: ";
12154 F.printAsOperand(OS, /*PrintType=*/false);
12155 OS << "\n";
12156 for (Loop *I : LI)
12157 PrintLoopInfo(OS, &SE, I);
12158 }
12159
12160 ScalarEvolution::LoopDisposition
getLoopDisposition(const SCEV * S,const Loop * L)12161 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
12162 auto &Values = LoopDispositions[S];
12163 for (auto &V : Values) {
12164 if (V.getPointer() == L)
12165 return V.getInt();
12166 }
12167 Values.emplace_back(L, LoopVariant);
12168 LoopDisposition D = computeLoopDisposition(S, L);
12169 auto &Values2 = LoopDispositions[S];
12170 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12171 if (V.getPointer() == L) {
12172 V.setInt(D);
12173 break;
12174 }
12175 }
12176 return D;
12177 }
12178
12179 ScalarEvolution::LoopDisposition
computeLoopDisposition(const SCEV * S,const Loop * L)12180 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
12181 switch (S->getSCEVType()) {
12182 case scConstant:
12183 return LoopInvariant;
12184 case scPtrToInt:
12185 case scTruncate:
12186 case scZeroExtend:
12187 case scSignExtend:
12188 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
12189 case scAddRecExpr: {
12190 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12191
12192 // If L is the addrec's loop, it's computable.
12193 if (AR->getLoop() == L)
12194 return LoopComputable;
12195
12196 // Add recurrences are never invariant in the function-body (null loop).
12197 if (!L)
12198 return LoopVariant;
12199
12200 // Everything that is not defined at loop entry is variant.
12201 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
12202 return LoopVariant;
12203 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
12204 " dominate the contained loop's header?");
12205
12206 // This recurrence is invariant w.r.t. L if AR's loop contains L.
12207 if (AR->getLoop()->contains(L))
12208 return LoopInvariant;
12209
12210 // This recurrence is variant w.r.t. L if any of its operands
12211 // are variant.
12212 for (auto *Op : AR->operands())
12213 if (!isLoopInvariant(Op, L))
12214 return LoopVariant;
12215
12216 // Otherwise it's loop-invariant.
12217 return LoopInvariant;
12218 }
12219 case scAddExpr:
12220 case scMulExpr:
12221 case scUMaxExpr:
12222 case scSMaxExpr:
12223 case scUMinExpr:
12224 case scSMinExpr: {
12225 bool HasVarying = false;
12226 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
12227 LoopDisposition D = getLoopDisposition(Op, L);
12228 if (D == LoopVariant)
12229 return LoopVariant;
12230 if (D == LoopComputable)
12231 HasVarying = true;
12232 }
12233 return HasVarying ? LoopComputable : LoopInvariant;
12234 }
12235 case scUDivExpr: {
12236 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
12237 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
12238 if (LD == LoopVariant)
12239 return LoopVariant;
12240 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
12241 if (RD == LoopVariant)
12242 return LoopVariant;
12243 return (LD == LoopInvariant && RD == LoopInvariant) ?
12244 LoopInvariant : LoopComputable;
12245 }
12246 case scUnknown:
12247 // All non-instruction values are loop invariant. All instructions are loop
12248 // invariant if they are not contained in the specified loop.
12249 // Instructions are never considered invariant in the function body
12250 // (null loop) because they are defined within the "loop".
12251 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
12252 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
12253 return LoopInvariant;
12254 case scCouldNotCompute:
12255 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
12256 }
12257 llvm_unreachable("Unknown SCEV kind!");
12258 }
12259
isLoopInvariant(const SCEV * S,const Loop * L)12260 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
12261 return getLoopDisposition(S, L) == LoopInvariant;
12262 }
12263
hasComputableLoopEvolution(const SCEV * S,const Loop * L)12264 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
12265 return getLoopDisposition(S, L) == LoopComputable;
12266 }
12267
12268 ScalarEvolution::BlockDisposition
getBlockDisposition(const SCEV * S,const BasicBlock * BB)12269 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
12270 auto &Values = BlockDispositions[S];
12271 for (auto &V : Values) {
12272 if (V.getPointer() == BB)
12273 return V.getInt();
12274 }
12275 Values.emplace_back(BB, DoesNotDominateBlock);
12276 BlockDisposition D = computeBlockDisposition(S, BB);
12277 auto &Values2 = BlockDispositions[S];
12278 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12279 if (V.getPointer() == BB) {
12280 V.setInt(D);
12281 break;
12282 }
12283 }
12284 return D;
12285 }
12286
12287 ScalarEvolution::BlockDisposition
computeBlockDisposition(const SCEV * S,const BasicBlock * BB)12288 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
12289 switch (S->getSCEVType()) {
12290 case scConstant:
12291 return ProperlyDominatesBlock;
12292 case scPtrToInt:
12293 case scTruncate:
12294 case scZeroExtend:
12295 case scSignExtend:
12296 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
12297 case scAddRecExpr: {
12298 // This uses a "dominates" query instead of "properly dominates" query
12299 // to test for proper dominance too, because the instruction which
12300 // produces the addrec's value is a PHI, and a PHI effectively properly
12301 // dominates its entire containing block.
12302 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12303 if (!DT.dominates(AR->getLoop()->getHeader(), BB))
12304 return DoesNotDominateBlock;
12305
12306 // Fall through into SCEVNAryExpr handling.
12307 LLVM_FALLTHROUGH;
12308 }
12309 case scAddExpr:
12310 case scMulExpr:
12311 case scUMaxExpr:
12312 case scSMaxExpr:
12313 case scUMinExpr:
12314 case scSMinExpr: {
12315 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
12316 bool Proper = true;
12317 for (const SCEV *NAryOp : NAry->operands()) {
12318 BlockDisposition D = getBlockDisposition(NAryOp, BB);
12319 if (D == DoesNotDominateBlock)
12320 return DoesNotDominateBlock;
12321 if (D == DominatesBlock)
12322 Proper = false;
12323 }
12324 return Proper ? ProperlyDominatesBlock : DominatesBlock;
12325 }
12326 case scUDivExpr: {
12327 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
12328 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
12329 BlockDisposition LD = getBlockDisposition(LHS, BB);
12330 if (LD == DoesNotDominateBlock)
12331 return DoesNotDominateBlock;
12332 BlockDisposition RD = getBlockDisposition(RHS, BB);
12333 if (RD == DoesNotDominateBlock)
12334 return DoesNotDominateBlock;
12335 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
12336 ProperlyDominatesBlock : DominatesBlock;
12337 }
12338 case scUnknown:
12339 if (Instruction *I =
12340 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
12341 if (I->getParent() == BB)
12342 return DominatesBlock;
12343 if (DT.properlyDominates(I->getParent(), BB))
12344 return ProperlyDominatesBlock;
12345 return DoesNotDominateBlock;
12346 }
12347 return ProperlyDominatesBlock;
12348 case scCouldNotCompute:
12349 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
12350 }
12351 llvm_unreachable("Unknown SCEV kind!");
12352 }
12353
dominates(const SCEV * S,const BasicBlock * BB)12354 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
12355 return getBlockDisposition(S, BB) >= DominatesBlock;
12356 }
12357
properlyDominates(const SCEV * S,const BasicBlock * BB)12358 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
12359 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
12360 }
12361
hasOperand(const SCEV * S,const SCEV * Op) const12362 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
12363 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
12364 }
12365
hasOperand(const SCEV * S) const12366 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const {
12367 auto IsS = [&](const SCEV *X) { return S == X; };
12368 auto ContainsS = [&](const SCEV *X) {
12369 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS);
12370 };
12371 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken);
12372 }
12373
12374 void
forgetMemoizedResults(const SCEV * S)12375 ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
12376 ValuesAtScopes.erase(S);
12377 LoopDispositions.erase(S);
12378 BlockDispositions.erase(S);
12379 UnsignedRanges.erase(S);
12380 SignedRanges.erase(S);
12381 ExprValueMap.erase(S);
12382 HasRecMap.erase(S);
12383 MinTrailingZerosCache.erase(S);
12384
12385 for (auto I = PredicatedSCEVRewrites.begin();
12386 I != PredicatedSCEVRewrites.end();) {
12387 std::pair<const SCEV *, const Loop *> Entry = I->first;
12388 if (Entry.first == S)
12389 PredicatedSCEVRewrites.erase(I++);
12390 else
12391 ++I;
12392 }
12393
12394 auto RemoveSCEVFromBackedgeMap =
12395 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
12396 for (auto I = Map.begin(), E = Map.end(); I != E;) {
12397 BackedgeTakenInfo &BEInfo = I->second;
12398 if (BEInfo.hasOperand(S, this)) {
12399 BEInfo.clear();
12400 Map.erase(I++);
12401 } else
12402 ++I;
12403 }
12404 };
12405
12406 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
12407 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
12408 }
12409
12410 void
getUsedLoops(const SCEV * S,SmallPtrSetImpl<const Loop * > & LoopsUsed)12411 ScalarEvolution::getUsedLoops(const SCEV *S,
12412 SmallPtrSetImpl<const Loop *> &LoopsUsed) {
12413 struct FindUsedLoops {
12414 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
12415 : LoopsUsed(LoopsUsed) {}
12416 SmallPtrSetImpl<const Loop *> &LoopsUsed;
12417 bool follow(const SCEV *S) {
12418 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
12419 LoopsUsed.insert(AR->getLoop());
12420 return true;
12421 }
12422
12423 bool isDone() const { return false; }
12424 };
12425
12426 FindUsedLoops F(LoopsUsed);
12427 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
12428 }
12429
addToLoopUseLists(const SCEV * S)12430 void ScalarEvolution::addToLoopUseLists(const SCEV *S) {
12431 SmallPtrSet<const Loop *, 8> LoopsUsed;
12432 getUsedLoops(S, LoopsUsed);
12433 for (auto *L : LoopsUsed)
12434 LoopUsers[L].push_back(S);
12435 }
12436
verify() const12437 void ScalarEvolution::verify() const {
12438 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12439 ScalarEvolution SE2(F, TLI, AC, DT, LI);
12440
12441 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
12442
12443 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
12444 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
12445 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
12446
12447 const SCEV *visitConstant(const SCEVConstant *Constant) {
12448 return SE.getConstant(Constant->getAPInt());
12449 }
12450
12451 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
12452 return SE.getUnknown(Expr->getValue());
12453 }
12454
12455 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
12456 return SE.getCouldNotCompute();
12457 }
12458 };
12459
12460 SCEVMapper SCM(SE2);
12461
12462 while (!LoopStack.empty()) {
12463 auto *L = LoopStack.pop_back_val();
12464 LoopStack.insert(LoopStack.end(), L->begin(), L->end());
12465
12466 auto *CurBECount = SCM.visit(
12467 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
12468 auto *NewBECount = SE2.getBackedgeTakenCount(L);
12469
12470 if (CurBECount == SE2.getCouldNotCompute() ||
12471 NewBECount == SE2.getCouldNotCompute()) {
12472 // NB! This situation is legal, but is very suspicious -- whatever pass
12473 // change the loop to make a trip count go from could not compute to
12474 // computable or vice-versa *should have* invalidated SCEV. However, we
12475 // choose not to assert here (for now) since we don't want false
12476 // positives.
12477 continue;
12478 }
12479
12480 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
12481 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
12482 // not propagate undef aggressively). This means we can (and do) fail
12483 // verification in cases where a transform makes the trip count of a loop
12484 // go from "undef" to "undef+1" (say). The transform is fine, since in
12485 // both cases the loop iterates "undef" times, but SCEV thinks we
12486 // increased the trip count of the loop by 1 incorrectly.
12487 continue;
12488 }
12489
12490 if (SE.getTypeSizeInBits(CurBECount->getType()) >
12491 SE.getTypeSizeInBits(NewBECount->getType()))
12492 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
12493 else if (SE.getTypeSizeInBits(CurBECount->getType()) <
12494 SE.getTypeSizeInBits(NewBECount->getType()))
12495 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
12496
12497 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);
12498
12499 // Unless VerifySCEVStrict is set, we only compare constant deltas.
12500 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
12501 dbgs() << "Trip Count for " << *L << " Changed!\n";
12502 dbgs() << "Old: " << *CurBECount << "\n";
12503 dbgs() << "New: " << *NewBECount << "\n";
12504 dbgs() << "Delta: " << *Delta << "\n";
12505 std::abort();
12506 }
12507 }
12508
12509 // Collect all valid loops currently in LoopInfo.
12510 SmallPtrSet<Loop *, 32> ValidLoops;
12511 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
12512 while (!Worklist.empty()) {
12513 Loop *L = Worklist.pop_back_val();
12514 if (ValidLoops.contains(L))
12515 continue;
12516 ValidLoops.insert(L);
12517 Worklist.append(L->begin(), L->end());
12518 }
12519 // Check for SCEV expressions referencing invalid/deleted loops.
12520 for (auto &KV : ValueExprMap) {
12521 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second);
12522 if (!AR)
12523 continue;
12524 assert(ValidLoops.contains(AR->getLoop()) &&
12525 "AddRec references invalid loop");
12526 }
12527 }
12528
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)12529 bool ScalarEvolution::invalidate(
12530 Function &F, const PreservedAnalyses &PA,
12531 FunctionAnalysisManager::Invalidator &Inv) {
12532 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
12533 // of its dependencies is invalidated.
12534 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
12535 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
12536 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
12537 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
12538 Inv.invalidate<LoopAnalysis>(F, PA);
12539 }
12540
12541 AnalysisKey ScalarEvolutionAnalysis::Key;
12542
run(Function & F,FunctionAnalysisManager & AM)12543 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
12544 FunctionAnalysisManager &AM) {
12545 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
12546 AM.getResult<AssumptionAnalysis>(F),
12547 AM.getResult<DominatorTreeAnalysis>(F),
12548 AM.getResult<LoopAnalysis>(F));
12549 }
12550
12551 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)12552 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
12553 AM.getResult<ScalarEvolutionAnalysis>(F).verify();
12554 return PreservedAnalyses::all();
12555 }
12556
12557 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)12558 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
12559 // For compatibility with opt's -analyze feature under legacy pass manager
12560 // which was not ported to NPM. This keeps tests using
12561 // update_analyze_test_checks.py working.
12562 OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
12563 << F.getName() << "':\n";
12564 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
12565 return PreservedAnalyses::all();
12566 }
12567
12568 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
12569 "Scalar Evolution Analysis", false, true)
12570 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
12571 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
12572 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
12573 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
12574 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
12575 "Scalar Evolution Analysis", false, true)
12576
12577 char ScalarEvolutionWrapperPass::ID = 0;
12578
ScalarEvolutionWrapperPass()12579 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
12580 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
12581 }
12582
runOnFunction(Function & F)12583 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
12584 SE.reset(new ScalarEvolution(
12585 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
12586 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
12587 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
12588 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
12589 return false;
12590 }
12591
releaseMemory()12592 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
12593
print(raw_ostream & OS,const Module *) const12594 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
12595 SE->print(OS);
12596 }
12597
verifyAnalysis() const12598 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
12599 if (!VerifySCEV)
12600 return;
12601
12602 SE->verify();
12603 }
12604
getAnalysisUsage(AnalysisUsage & AU) const12605 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
12606 AU.setPreservesAll();
12607 AU.addRequiredTransitive<AssumptionCacheTracker>();
12608 AU.addRequiredTransitive<LoopInfoWrapperPass>();
12609 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
12610 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
12611 }
12612
getEqualPredicate(const SCEV * LHS,const SCEV * RHS)12613 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
12614 const SCEV *RHS) {
12615 FoldingSetNodeID ID;
12616 assert(LHS->getType() == RHS->getType() &&
12617 "Type mismatch between LHS and RHS");
12618 // Unique this node based on the arguments
12619 ID.AddInteger(SCEVPredicate::P_Equal);
12620 ID.AddPointer(LHS);
12621 ID.AddPointer(RHS);
12622 void *IP = nullptr;
12623 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
12624 return S;
12625 SCEVEqualPredicate *Eq = new (SCEVAllocator)
12626 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS);
12627 UniquePreds.InsertNode(Eq, IP);
12628 return Eq;
12629 }
12630
getWrapPredicate(const SCEVAddRecExpr * AR,SCEVWrapPredicate::IncrementWrapFlags AddedFlags)12631 const SCEVPredicate *ScalarEvolution::getWrapPredicate(
12632 const SCEVAddRecExpr *AR,
12633 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
12634 FoldingSetNodeID ID;
12635 // Unique this node based on the arguments
12636 ID.AddInteger(SCEVPredicate::P_Wrap);
12637 ID.AddPointer(AR);
12638 ID.AddInteger(AddedFlags);
12639 void *IP = nullptr;
12640 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
12641 return S;
12642 auto *OF = new (SCEVAllocator)
12643 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
12644 UniquePreds.InsertNode(OF, IP);
12645 return OF;
12646 }
12647
12648 namespace {
12649
12650 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
12651 public:
12652
12653 /// Rewrites \p S in the context of a loop L and the SCEV predication
12654 /// infrastructure.
12655 ///
12656 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
12657 /// equivalences present in \p Pred.
12658 ///
12659 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
12660 /// \p NewPreds such that the result will be an AddRecExpr.
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE,SmallPtrSetImpl<const SCEVPredicate * > * NewPreds,SCEVUnionPredicate * Pred)12661 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
12662 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
12663 SCEVUnionPredicate *Pred) {
12664 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
12665 return Rewriter.visit(S);
12666 }
12667
visitUnknown(const SCEVUnknown * Expr)12668 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
12669 if (Pred) {
12670 auto ExprPreds = Pred->getPredicatesForExpr(Expr);
12671 for (auto *Pred : ExprPreds)
12672 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
12673 if (IPred->getLHS() == Expr)
12674 return IPred->getRHS();
12675 }
12676 return convertToAddRecWithPreds(Expr);
12677 }
12678
visitZeroExtendExpr(const SCEVZeroExtendExpr * Expr)12679 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
12680 const SCEV *Operand = visit(Expr->getOperand());
12681 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
12682 if (AR && AR->getLoop() == L && AR->isAffine()) {
12683 // This couldn't be folded because the operand didn't have the nuw
12684 // flag. Add the nusw flag as an assumption that we could make.
12685 const SCEV *Step = AR->getStepRecurrence(SE);
12686 Type *Ty = Expr->getType();
12687 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
12688 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
12689 SE.getSignExtendExpr(Step, Ty), L,
12690 AR->getNoWrapFlags());
12691 }
12692 return SE.getZeroExtendExpr(Operand, Expr->getType());
12693 }
12694
visitSignExtendExpr(const SCEVSignExtendExpr * Expr)12695 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
12696 const SCEV *Operand = visit(Expr->getOperand());
12697 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
12698 if (AR && AR->getLoop() == L && AR->isAffine()) {
12699 // This couldn't be folded because the operand didn't have the nsw
12700 // flag. Add the nssw flag as an assumption that we could make.
12701 const SCEV *Step = AR->getStepRecurrence(SE);
12702 Type *Ty = Expr->getType();
12703 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
12704 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
12705 SE.getSignExtendExpr(Step, Ty), L,
12706 AR->getNoWrapFlags());
12707 }
12708 return SE.getSignExtendExpr(Operand, Expr->getType());
12709 }
12710
12711 private:
SCEVPredicateRewriter(const Loop * L,ScalarEvolution & SE,SmallPtrSetImpl<const SCEVPredicate * > * NewPreds,SCEVUnionPredicate * Pred)12712 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
12713 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
12714 SCEVUnionPredicate *Pred)
12715 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
12716
addOverflowAssumption(const SCEVPredicate * P)12717 bool addOverflowAssumption(const SCEVPredicate *P) {
12718 if (!NewPreds) {
12719 // Check if we've already made this assumption.
12720 return Pred && Pred->implies(P);
12721 }
12722 NewPreds->insert(P);
12723 return true;
12724 }
12725
addOverflowAssumption(const SCEVAddRecExpr * AR,SCEVWrapPredicate::IncrementWrapFlags AddedFlags)12726 bool addOverflowAssumption(const SCEVAddRecExpr *AR,
12727 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
12728 auto *A = SE.getWrapPredicate(AR, AddedFlags);
12729 return addOverflowAssumption(A);
12730 }
12731
12732 // If \p Expr represents a PHINode, we try to see if it can be represented
12733 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
12734 // to add this predicate as a runtime overflow check, we return the AddRec.
12735 // If \p Expr does not meet these conditions (is not a PHI node, or we
12736 // couldn't create an AddRec for it, or couldn't add the predicate), we just
12737 // return \p Expr.
convertToAddRecWithPreds(const SCEVUnknown * Expr)12738 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
12739 if (!isa<PHINode>(Expr->getValue()))
12740 return Expr;
12741 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
12742 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
12743 if (!PredicatedRewrite)
12744 return Expr;
12745 for (auto *P : PredicatedRewrite->second){
12746 // Wrap predicates from outer loops are not supported.
12747 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
12748 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
12749 if (L != AR->getLoop())
12750 return Expr;
12751 }
12752 if (!addOverflowAssumption(P))
12753 return Expr;
12754 }
12755 return PredicatedRewrite->first;
12756 }
12757
12758 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
12759 SCEVUnionPredicate *Pred;
12760 const Loop *L;
12761 };
12762
12763 } // end anonymous namespace
12764
rewriteUsingPredicate(const SCEV * S,const Loop * L,SCEVUnionPredicate & Preds)12765 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
12766 SCEVUnionPredicate &Preds) {
12767 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
12768 }
12769
convertSCEVToAddRecWithPredicates(const SCEV * S,const Loop * L,SmallPtrSetImpl<const SCEVPredicate * > & Preds)12770 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
12771 const SCEV *S, const Loop *L,
12772 SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
12773 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
12774 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
12775 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
12776
12777 if (!AddRec)
12778 return nullptr;
12779
12780 // Since the transformation was successful, we can now transfer the SCEV
12781 // predicates.
12782 for (auto *P : TransformPreds)
12783 Preds.insert(P);
12784
12785 return AddRec;
12786 }
12787
12788 /// SCEV predicates
SCEVPredicate(const FoldingSetNodeIDRef ID,SCEVPredicateKind Kind)12789 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
12790 SCEVPredicateKind Kind)
12791 : FastID(ID), Kind(Kind) {}
12792
SCEVEqualPredicate(const FoldingSetNodeIDRef ID,const SCEV * LHS,const SCEV * RHS)12793 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
12794 const SCEV *LHS, const SCEV *RHS)
12795 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
12796 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
12797 assert(LHS != RHS && "LHS and RHS are the same SCEV");
12798 }
12799
implies(const SCEVPredicate * N) const12800 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
12801 const auto *Op = dyn_cast<SCEVEqualPredicate>(N);
12802
12803 if (!Op)
12804 return false;
12805
12806 return Op->LHS == LHS && Op->RHS == RHS;
12807 }
12808
isAlwaysTrue() const12809 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
12810
getExpr() const12811 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; }
12812
print(raw_ostream & OS,unsigned Depth) const12813 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const {
12814 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
12815 }
12816
SCEVWrapPredicate(const FoldingSetNodeIDRef ID,const SCEVAddRecExpr * AR,IncrementWrapFlags Flags)12817 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
12818 const SCEVAddRecExpr *AR,
12819 IncrementWrapFlags Flags)
12820 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
12821
getExpr() const12822 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
12823
implies(const SCEVPredicate * N) const12824 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
12825 const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
12826
12827 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
12828 }
12829
isAlwaysTrue() const12830 bool SCEVWrapPredicate::isAlwaysTrue() const {
12831 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
12832 IncrementWrapFlags IFlags = Flags;
12833
12834 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
12835 IFlags = clearFlags(IFlags, IncrementNSSW);
12836
12837 return IFlags == IncrementAnyWrap;
12838 }
12839
print(raw_ostream & OS,unsigned Depth) const12840 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
12841 OS.indent(Depth) << *getExpr() << " Added Flags: ";
12842 if (SCEVWrapPredicate::IncrementNUSW & getFlags())
12843 OS << "<nusw>";
12844 if (SCEVWrapPredicate::IncrementNSSW & getFlags())
12845 OS << "<nssw>";
12846 OS << "\n";
12847 }
12848
12849 SCEVWrapPredicate::IncrementWrapFlags
getImpliedFlags(const SCEVAddRecExpr * AR,ScalarEvolution & SE)12850 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
12851 ScalarEvolution &SE) {
12852 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
12853 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
12854
12855 // We can safely transfer the NSW flag as NSSW.
12856 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
12857 ImpliedFlags = IncrementNSSW;
12858
12859 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
12860 // If the increment is positive, the SCEV NUW flag will also imply the
12861 // WrapPredicate NUSW flag.
12862 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
12863 if (Step->getValue()->getValue().isNonNegative())
12864 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
12865 }
12866
12867 return ImpliedFlags;
12868 }
12869
12870 /// Union predicates don't get cached so create a dummy set ID for it.
SCEVUnionPredicate()12871 SCEVUnionPredicate::SCEVUnionPredicate()
12872 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {}
12873
isAlwaysTrue() const12874 bool SCEVUnionPredicate::isAlwaysTrue() const {
12875 return all_of(Preds,
12876 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
12877 }
12878
12879 ArrayRef<const SCEVPredicate *>
getPredicatesForExpr(const SCEV * Expr)12880 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
12881 auto I = SCEVToPreds.find(Expr);
12882 if (I == SCEVToPreds.end())
12883 return ArrayRef<const SCEVPredicate *>();
12884 return I->second;
12885 }
12886
implies(const SCEVPredicate * N) const12887 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
12888 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
12889 return all_of(Set->Preds,
12890 [this](const SCEVPredicate *I) { return this->implies(I); });
12891
12892 auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
12893 if (ScevPredsIt == SCEVToPreds.end())
12894 return false;
12895 auto &SCEVPreds = ScevPredsIt->second;
12896
12897 return any_of(SCEVPreds,
12898 [N](const SCEVPredicate *I) { return I->implies(N); });
12899 }
12900
getExpr() const12901 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
12902
print(raw_ostream & OS,unsigned Depth) const12903 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
12904 for (auto Pred : Preds)
12905 Pred->print(OS, Depth);
12906 }
12907
add(const SCEVPredicate * N)12908 void SCEVUnionPredicate::add(const SCEVPredicate *N) {
12909 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
12910 for (auto Pred : Set->Preds)
12911 add(Pred);
12912 return;
12913 }
12914
12915 if (implies(N))
12916 return;
12917
12918 const SCEV *Key = N->getExpr();
12919 assert(Key && "Only SCEVUnionPredicate doesn't have an "
12920 " associated expression!");
12921
12922 SCEVToPreds[Key].push_back(N);
12923 Preds.push_back(N);
12924 }
12925
PredicatedScalarEvolution(ScalarEvolution & SE,Loop & L)12926 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
12927 Loop &L)
12928 : SE(SE), L(L) {}
12929
getSCEV(Value * V)12930 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
12931 const SCEV *Expr = SE.getSCEV(V);
12932 RewriteEntry &Entry = RewriteMap[Expr];
12933
12934 // If we already have an entry and the version matches, return it.
12935 if (Entry.second && Generation == Entry.first)
12936 return Entry.second;
12937
12938 // We found an entry but it's stale. Rewrite the stale entry
12939 // according to the current predicate.
12940 if (Entry.second)
12941 Expr = Entry.second;
12942
12943 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds);
12944 Entry = {Generation, NewSCEV};
12945
12946 return NewSCEV;
12947 }
12948
getBackedgeTakenCount()12949 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
12950 if (!BackedgeCount) {
12951 SCEVUnionPredicate BackedgePred;
12952 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred);
12953 addPredicate(BackedgePred);
12954 }
12955 return BackedgeCount;
12956 }
12957
addPredicate(const SCEVPredicate & Pred)12958 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
12959 if (Preds.implies(&Pred))
12960 return;
12961 Preds.add(&Pred);
12962 updateGeneration();
12963 }
12964
getUnionPredicate() const12965 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
12966 return Preds;
12967 }
12968
updateGeneration()12969 void PredicatedScalarEvolution::updateGeneration() {
12970 // If the generation number wrapped recompute everything.
12971 if (++Generation == 0) {
12972 for (auto &II : RewriteMap) {
12973 const SCEV *Rewritten = II.second.second;
12974 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)};
12975 }
12976 }
12977 }
12978
setNoOverflow(Value * V,SCEVWrapPredicate::IncrementWrapFlags Flags)12979 void PredicatedScalarEvolution::setNoOverflow(
12980 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
12981 const SCEV *Expr = getSCEV(V);
12982 const auto *AR = cast<SCEVAddRecExpr>(Expr);
12983
12984 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
12985
12986 // Clear the statically implied flags.
12987 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
12988 addPredicate(*SE.getWrapPredicate(AR, Flags));
12989
12990 auto II = FlagsMap.insert({V, Flags});
12991 if (!II.second)
12992 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
12993 }
12994
hasNoOverflow(Value * V,SCEVWrapPredicate::IncrementWrapFlags Flags)12995 bool PredicatedScalarEvolution::hasNoOverflow(
12996 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
12997 const SCEV *Expr = getSCEV(V);
12998 const auto *AR = cast<SCEVAddRecExpr>(Expr);
12999
13000 Flags = SCEVWrapPredicate::clearFlags(
13001 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
13002
13003 auto II = FlagsMap.find(V);
13004
13005 if (II != FlagsMap.end())
13006 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
13007
13008 return Flags == SCEVWrapPredicate::IncrementAnyWrap;
13009 }
13010
getAsAddRec(Value * V)13011 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
13012 const SCEV *Expr = this->getSCEV(V);
13013 SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
13014 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
13015
13016 if (!New)
13017 return nullptr;
13018
13019 for (auto *P : NewPreds)
13020 Preds.add(P);
13021
13022 updateGeneration();
13023 RewriteMap[SE.getSCEV(V)] = {Generation, New};
13024 return New;
13025 }
13026
PredicatedScalarEvolution(const PredicatedScalarEvolution & Init)13027 PredicatedScalarEvolution::PredicatedScalarEvolution(
13028 const PredicatedScalarEvolution &Init)
13029 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
13030 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
13031 for (auto I : Init.FlagsMap)
13032 FlagsMap.insert(I);
13033 }
13034
print(raw_ostream & OS,unsigned Depth) const13035 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
13036 // For each block.
13037 for (auto *BB : L.getBlocks())
13038 for (auto &I : *BB) {
13039 if (!SE.isSCEVable(I.getType()))
13040 continue;
13041
13042 auto *Expr = SE.getSCEV(&I);
13043 auto II = RewriteMap.find(Expr);
13044
13045 if (II == RewriteMap.end())
13046 continue;
13047
13048 // Don't print things that are not interesting.
13049 if (II->second.second == Expr)
13050 continue;
13051
13052 OS.indent(Depth) << "[PSE]" << I << ":\n";
13053 OS.indent(Depth + 2) << *Expr << "\n";
13054 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
13055 }
13056 }
13057
13058 // Match the mathematical pattern A - (A / B) * B, where A and B can be
13059 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
13060 // for URem with constant power-of-2 second operands.
13061 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
13062 // 4, A / B becomes X / 8).
matchURem(const SCEV * Expr,const SCEV * & LHS,const SCEV * & RHS)13063 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
13064 const SCEV *&RHS) {
13065 // Try to match 'zext (trunc A to iB) to iY', which is used
13066 // for URem with constant power-of-2 second operands. Make sure the size of
13067 // the operand A matches the size of the whole expressions.
13068 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
13069 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
13070 LHS = Trunc->getOperand();
13071 if (LHS->getType() != Expr->getType())
13072 LHS = getZeroExtendExpr(LHS, Expr->getType());
13073 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
13074 << getTypeSizeInBits(Trunc->getType()));
13075 return true;
13076 }
13077 const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
13078 if (Add == nullptr || Add->getNumOperands() != 2)
13079 return false;
13080
13081 const SCEV *A = Add->getOperand(1);
13082 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
13083
13084 if (Mul == nullptr)
13085 return false;
13086
13087 const auto MatchURemWithDivisor = [&](const SCEV *B) {
13088 // (SomeExpr + (-(SomeExpr / B) * B)).
13089 if (Expr == getURemExpr(A, B)) {
13090 LHS = A;
13091 RHS = B;
13092 return true;
13093 }
13094 return false;
13095 };
13096
13097 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
13098 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
13099 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13100 MatchURemWithDivisor(Mul->getOperand(2));
13101
13102 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
13103 if (Mul->getNumOperands() == 2)
13104 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13105 MatchURemWithDivisor(Mul->getOperand(0)) ||
13106 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
13107 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
13108 return false;
13109 }
13110
13111 const SCEV *
computeSymbolicMaxBackedgeTakenCount(const Loop * L)13112 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
13113 SmallVector<BasicBlock*, 16> ExitingBlocks;
13114 L->getExitingBlocks(ExitingBlocks);
13115
13116 // Form an expression for the maximum exit count possible for this loop. We
13117 // merge the max and exact information to approximate a version of
13118 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
13119 SmallVector<const SCEV*, 4> ExitCounts;
13120 for (BasicBlock *ExitingBB : ExitingBlocks) {
13121 const SCEV *ExitCount = getExitCount(L, ExitingBB);
13122 if (isa<SCEVCouldNotCompute>(ExitCount))
13123 ExitCount = getExitCount(L, ExitingBB,
13124 ScalarEvolution::ConstantMaximum);
13125 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
13126 assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
13127 "We should only have known counts for exiting blocks that "
13128 "dominate latch!");
13129 ExitCounts.push_back(ExitCount);
13130 }
13131 }
13132 if (ExitCounts.empty())
13133 return getCouldNotCompute();
13134 return getUMinFromMismatchedTypes(ExitCounts);
13135 }
13136
13137 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown
13138 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because
13139 /// we cannot guarantee that the replacement is loop invariant in the loop of
13140 /// the AddRec.
13141 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
13142 ValueToSCEVMapTy ⤅
13143
13144 public:
SCEVLoopGuardRewriter(ScalarEvolution & SE,ValueToSCEVMapTy & M)13145 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
13146 : SCEVRewriteVisitor(SE), Map(M) {}
13147
visitAddRecExpr(const SCEVAddRecExpr * Expr)13148 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
13149
visitUnknown(const SCEVUnknown * Expr)13150 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13151 auto I = Map.find(Expr->getValue());
13152 if (I == Map.end())
13153 return Expr;
13154 return I->second;
13155 }
13156 };
13157
applyLoopGuards(const SCEV * Expr,const Loop * L)13158 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
13159 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
13160 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) {
13161 if (!isa<SCEVUnknown>(LHS)) {
13162 std::swap(LHS, RHS);
13163 Predicate = CmpInst::getSwappedPredicate(Predicate);
13164 }
13165
13166 // For now, limit to conditions that provide information about unknown
13167 // expressions.
13168 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
13169 if (!LHSUnknown)
13170 return;
13171
13172 // TODO: use information from more predicates.
13173 switch (Predicate) {
13174 case CmpInst::ICMP_ULT: {
13175 if (!containsAddRecurrence(RHS)) {
13176 const SCEV *Base = LHS;
13177 auto I = RewriteMap.find(LHSUnknown->getValue());
13178 if (I != RewriteMap.end())
13179 Base = I->second;
13180
13181 RewriteMap[LHSUnknown->getValue()] =
13182 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType())));
13183 }
13184 break;
13185 }
13186 case CmpInst::ICMP_ULE: {
13187 if (!containsAddRecurrence(RHS)) {
13188 const SCEV *Base = LHS;
13189 auto I = RewriteMap.find(LHSUnknown->getValue());
13190 if (I != RewriteMap.end())
13191 Base = I->second;
13192 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS);
13193 }
13194 break;
13195 }
13196 case CmpInst::ICMP_EQ:
13197 if (isa<SCEVConstant>(RHS))
13198 RewriteMap[LHSUnknown->getValue()] = RHS;
13199 break;
13200 case CmpInst::ICMP_NE:
13201 if (isa<SCEVConstant>(RHS) &&
13202 cast<SCEVConstant>(RHS)->getValue()->isNullValue())
13203 RewriteMap[LHSUnknown->getValue()] =
13204 getUMaxExpr(LHS, getOne(RHS->getType()));
13205 break;
13206 default:
13207 break;
13208 }
13209 };
13210 // Starting at the loop predecessor, climb up the predecessor chain, as long
13211 // as there are predecessors that can be found that have unique successors
13212 // leading to the original header.
13213 // TODO: share this logic with isLoopEntryGuardedByCond.
13214 ValueToSCEVMapTy RewriteMap;
13215 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
13216 L->getLoopPredecessor(), L->getHeader());
13217 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
13218
13219 const BranchInst *LoopEntryPredicate =
13220 dyn_cast<BranchInst>(Pair.first->getTerminator());
13221 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
13222 continue;
13223
13224 // TODO: use information from more complex conditions, e.g. AND expressions.
13225 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition());
13226 if (!Cmp)
13227 continue;
13228
13229 auto Predicate = Cmp->getPredicate();
13230 if (LoopEntryPredicate->getSuccessor(1) == Pair.second)
13231 Predicate = CmpInst::getInversePredicate(Predicate);
13232 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
13233 getSCEV(Cmp->getOperand(1)), RewriteMap);
13234 }
13235
13236 // Also collect information from assumptions dominating the loop.
13237 for (auto &AssumeVH : AC.assumptions()) {
13238 if (!AssumeVH)
13239 continue;
13240 auto *AssumeI = cast<CallInst>(AssumeVH);
13241 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0));
13242 if (!Cmp || !DT.dominates(AssumeI, L->getHeader()))
13243 continue;
13244 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)),
13245 getSCEV(Cmp->getOperand(1)), RewriteMap);
13246 }
13247
13248 if (RewriteMap.empty())
13249 return Expr;
13250 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
13251 return Rewriter.visit(Expr);
13252 }
13253