1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
12 //
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
18 //
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
24 //
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
29 //
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
33 //
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // There are several good references for the techniques used in this analysis.
40 //
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44 //
45 // On computational properties of chains of recurrences
46 // Eugene V. Zima
47 //
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
50 //
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
53 //
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
57 //
58 //===----------------------------------------------------------------------===//
59
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionDivision.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/Config/llvm-config.h"
87 #include "llvm/IR/Argument.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/InitializePasses.h"
116 #include "llvm/Pass.h"
117 #include "llvm/Support/Casting.h"
118 #include "llvm/Support/CommandLine.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/KnownBits.h"
123 #include "llvm/Support/SaveAndRestore.h"
124 #include "llvm/Support/raw_ostream.h"
125 #include <algorithm>
126 #include <cassert>
127 #include <climits>
128 #include <cstddef>
129 #include <cstdint>
130 #include <cstdlib>
131 #include <map>
132 #include <memory>
133 #include <tuple>
134 #include <utility>
135 #include <vector>
136
137 using namespace llvm;
138 using namespace PatternMatch;
139
140 #define DEBUG_TYPE "scalar-evolution"
141
142 STATISTIC(NumArrayLenItCounts,
143 "Number of trip counts computed with array length");
144 STATISTIC(NumTripCountsComputed,
145 "Number of loops with predictable loop counts");
146 STATISTIC(NumTripCountsNotComputed,
147 "Number of loops without predictable loop counts");
148 STATISTIC(NumBruteForceTripCountsComputed,
149 "Number of loops with trip counts computed by force");
150
151 static cl::opt<unsigned>
152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
153 cl::ZeroOrMore,
154 cl::desc("Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
156 "derived loop"),
157 cl::init(100));
158
159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
160 static cl::opt<bool> VerifySCEV(
161 "verify-scev", cl::Hidden,
162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
163 static cl::opt<bool> VerifySCEVStrict(
164 "verify-scev-strict", cl::Hidden,
165 cl::desc("Enable stricter verification with -verify-scev is passed"));
166 static cl::opt<bool>
167 VerifySCEVMap("verify-scev-maps", cl::Hidden,
168 cl::desc("Verify no dangling value in ScalarEvolution's "
169 "ExprValueMap (slow)"));
170
171 static cl::opt<bool> VerifyIR(
172 "scev-verify-ir", cl::Hidden,
173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::init(false));
175
176 static cl::opt<unsigned> MulOpsInlineThreshold(
177 "scev-mulops-inline-threshold", cl::Hidden,
178 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
179 cl::init(32));
180
181 static cl::opt<unsigned> AddOpsInlineThreshold(
182 "scev-addops-inline-threshold", cl::Hidden,
183 cl::desc("Threshold for inlining addition operands into a SCEV"),
184 cl::init(500));
185
186 static cl::opt<unsigned> MaxSCEVCompareDepth(
187 "scalar-evolution-max-scev-compare-depth", cl::Hidden,
188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
189 cl::init(32));
190
191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
194 cl::init(2));
195
196 static cl::opt<unsigned> MaxValueCompareDepth(
197 "scalar-evolution-max-value-compare-depth", cl::Hidden,
198 cl::desc("Maximum depth of recursive value complexity comparisons"),
199 cl::init(2));
200
201 static cl::opt<unsigned>
202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
203 cl::desc("Maximum depth of recursive arithmetics"),
204 cl::init(32));
205
206 static cl::opt<unsigned> MaxConstantEvolvingDepth(
207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
209
210 static cl::opt<unsigned>
211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::init(8));
214
215 static cl::opt<unsigned>
216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
217 cl::desc("Max coefficients in AddRec during evolving"),
218 cl::init(8));
219
220 static cl::opt<unsigned>
221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
222 cl::desc("Size of the expression which is considered huge"),
223 cl::init(4096));
224
225 static cl::opt<bool>
226 ClassifyExpressions("scalar-evolution-classify-expressions",
227 cl::Hidden, cl::init(true),
228 cl::desc("When printing analysis, include information on every instruction"));
229
230 static cl::opt<bool> UseExpensiveRangeSharpening(
231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
232 cl::init(false),
233 cl::desc("Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
235
236 //===----------------------------------------------------------------------===//
237 // SCEV class definitions
238 //===----------------------------------------------------------------------===//
239
240 //===----------------------------------------------------------------------===//
241 // Implementation of the SCEV class.
242 //
243
244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const245 LLVM_DUMP_METHOD void SCEV::dump() const {
246 print(dbgs());
247 dbgs() << '\n';
248 }
249 #endif
250
print(raw_ostream & OS) const251 void SCEV::print(raw_ostream &OS) const {
252 switch (getSCEVType()) {
253 case scConstant:
254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
255 return;
256 case scPtrToInt: {
257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
258 const SCEV *Op = PtrToInt->getOperand();
259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
260 << *PtrToInt->getType() << ")";
261 return;
262 }
263 case scTruncate: {
264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
265 const SCEV *Op = Trunc->getOperand();
266 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
267 << *Trunc->getType() << ")";
268 return;
269 }
270 case scZeroExtend: {
271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
272 const SCEV *Op = ZExt->getOperand();
273 OS << "(zext " << *Op->getType() << " " << *Op << " to "
274 << *ZExt->getType() << ")";
275 return;
276 }
277 case scSignExtend: {
278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
279 const SCEV *Op = SExt->getOperand();
280 OS << "(sext " << *Op->getType() << " " << *Op << " to "
281 << *SExt->getType() << ")";
282 return;
283 }
284 case scAddRecExpr: {
285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
286 OS << "{" << *AR->getOperand(0);
287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
288 OS << ",+," << *AR->getOperand(i);
289 OS << "}<";
290 if (AR->hasNoUnsignedWrap())
291 OS << "nuw><";
292 if (AR->hasNoSignedWrap())
293 OS << "nsw><";
294 if (AR->hasNoSelfWrap() &&
295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
296 OS << "nw><";
297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
298 OS << ">";
299 return;
300 }
301 case scAddExpr:
302 case scMulExpr:
303 case scUMaxExpr:
304 case scSMaxExpr:
305 case scUMinExpr:
306 case scSMinExpr: {
307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
308 const char *OpStr = nullptr;
309 switch (NAry->getSCEVType()) {
310 case scAddExpr: OpStr = " + "; break;
311 case scMulExpr: OpStr = " * "; break;
312 case scUMaxExpr: OpStr = " umax "; break;
313 case scSMaxExpr: OpStr = " smax "; break;
314 case scUMinExpr:
315 OpStr = " umin ";
316 break;
317 case scSMinExpr:
318 OpStr = " smin ";
319 break;
320 default:
321 llvm_unreachable("There are no other nary expression types.");
322 }
323 OS << "(";
324 ListSeparator LS(OpStr);
325 for (const SCEV *Op : NAry->operands())
326 OS << LS << *Op;
327 OS << ")";
328 switch (NAry->getSCEVType()) {
329 case scAddExpr:
330 case scMulExpr:
331 if (NAry->hasNoUnsignedWrap())
332 OS << "<nuw>";
333 if (NAry->hasNoSignedWrap())
334 OS << "<nsw>";
335 break;
336 default:
337 // Nothing to print for other nary expressions.
338 break;
339 }
340 return;
341 }
342 case scUDivExpr: {
343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
345 return;
346 }
347 case scUnknown: {
348 const SCEVUnknown *U = cast<SCEVUnknown>(this);
349 Type *AllocTy;
350 if (U->isSizeOf(AllocTy)) {
351 OS << "sizeof(" << *AllocTy << ")";
352 return;
353 }
354 if (U->isAlignOf(AllocTy)) {
355 OS << "alignof(" << *AllocTy << ")";
356 return;
357 }
358
359 Type *CTy;
360 Constant *FieldNo;
361 if (U->isOffsetOf(CTy, FieldNo)) {
362 OS << "offsetof(" << *CTy << ", ";
363 FieldNo->printAsOperand(OS, false);
364 OS << ")";
365 return;
366 }
367
368 // Otherwise just print it normally.
369 U->getValue()->printAsOperand(OS, false);
370 return;
371 }
372 case scCouldNotCompute:
373 OS << "***COULDNOTCOMPUTE***";
374 return;
375 }
376 llvm_unreachable("Unknown SCEV kind!");
377 }
378
getType() const379 Type *SCEV::getType() const {
380 switch (getSCEVType()) {
381 case scConstant:
382 return cast<SCEVConstant>(this)->getType();
383 case scPtrToInt:
384 case scTruncate:
385 case scZeroExtend:
386 case scSignExtend:
387 return cast<SCEVCastExpr>(this)->getType();
388 case scAddRecExpr:
389 return cast<SCEVAddRecExpr>(this)->getType();
390 case scMulExpr:
391 return cast<SCEVMulExpr>(this)->getType();
392 case scUMaxExpr:
393 case scSMaxExpr:
394 case scUMinExpr:
395 case scSMinExpr:
396 return cast<SCEVMinMaxExpr>(this)->getType();
397 case scAddExpr:
398 return cast<SCEVAddExpr>(this)->getType();
399 case scUDivExpr:
400 return cast<SCEVUDivExpr>(this)->getType();
401 case scUnknown:
402 return cast<SCEVUnknown>(this)->getType();
403 case scCouldNotCompute:
404 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
405 }
406 llvm_unreachable("Unknown SCEV kind!");
407 }
408
isZero() const409 bool SCEV::isZero() const {
410 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
411 return SC->getValue()->isZero();
412 return false;
413 }
414
isOne() const415 bool SCEV::isOne() const {
416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
417 return SC->getValue()->isOne();
418 return false;
419 }
420
isAllOnesValue() const421 bool SCEV::isAllOnesValue() const {
422 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
423 return SC->getValue()->isMinusOne();
424 return false;
425 }
426
isNonConstantNegative() const427 bool SCEV::isNonConstantNegative() const {
428 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
429 if (!Mul) return false;
430
431 // If there is a constant factor, it will be first.
432 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
433 if (!SC) return false;
434
435 // Return true if the value is negative, this matches things like (-42 * V).
436 return SC->getAPInt().isNegative();
437 }
438
SCEVCouldNotCompute()439 SCEVCouldNotCompute::SCEVCouldNotCompute() :
440 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}
441
classof(const SCEV * S)442 bool SCEVCouldNotCompute::classof(const SCEV *S) {
443 return S->getSCEVType() == scCouldNotCompute;
444 }
445
getConstant(ConstantInt * V)446 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
447 FoldingSetNodeID ID;
448 ID.AddInteger(scConstant);
449 ID.AddPointer(V);
450 void *IP = nullptr;
451 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
452 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
453 UniqueSCEVs.InsertNode(S, IP);
454 return S;
455 }
456
getConstant(const APInt & Val)457 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
458 return getConstant(ConstantInt::get(getContext(), Val));
459 }
460
461 const SCEV *
getConstant(Type * Ty,uint64_t V,bool isSigned)462 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
463 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
464 return getConstant(ConstantInt::get(ITy, V, isSigned));
465 }
466
SCEVCastExpr(const FoldingSetNodeIDRef ID,SCEVTypes SCEVTy,const SCEV * op,Type * ty)467 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
468 const SCEV *op, Type *ty)
469 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) {
470 Operands[0] = op;
471 }
472
SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID,const SCEV * Op,Type * ITy)473 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
474 Type *ITy)
475 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
476 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
477 "Must be a non-bit-width-changing pointer-to-integer cast!");
478 }
479
SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,SCEVTypes SCEVTy,const SCEV * op,Type * ty)480 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
481 SCEVTypes SCEVTy, const SCEV *op,
482 Type *ty)
483 : SCEVCastExpr(ID, SCEVTy, op, ty) {}
484
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)485 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
486 Type *ty)
487 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
488 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
489 "Cannot truncate non-integer value!");
490 }
491
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)492 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
493 const SCEV *op, Type *ty)
494 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
495 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
496 "Cannot zero extend non-integer value!");
497 }
498
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)499 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
500 const SCEV *op, Type *ty)
501 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
502 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
503 "Cannot sign extend non-integer value!");
504 }
505
deleted()506 void SCEVUnknown::deleted() {
507 // Clear this SCEVUnknown from various maps.
508 SE->forgetMemoizedResults(this);
509
510 // Remove this SCEVUnknown from the uniquing map.
511 SE->UniqueSCEVs.RemoveNode(this);
512
513 // Release the value.
514 setValPtr(nullptr);
515 }
516
allUsesReplacedWith(Value * New)517 void SCEVUnknown::allUsesReplacedWith(Value *New) {
518 // Remove this SCEVUnknown from the uniquing map.
519 SE->UniqueSCEVs.RemoveNode(this);
520
521 // Update this SCEVUnknown to point to the new value. This is needed
522 // because there may still be outstanding SCEVs which still point to
523 // this SCEVUnknown.
524 setValPtr(New);
525 }
526
isSizeOf(Type * & AllocTy) const527 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
528 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
529 if (VCE->getOpcode() == Instruction::PtrToInt)
530 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
531 if (CE->getOpcode() == Instruction::GetElementPtr &&
532 CE->getOperand(0)->isNullValue() &&
533 CE->getNumOperands() == 2)
534 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
535 if (CI->isOne()) {
536 AllocTy = cast<GEPOperator>(CE)->getSourceElementType();
537 return true;
538 }
539
540 return false;
541 }
542
isAlignOf(Type * & AllocTy) const543 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
544 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
545 if (VCE->getOpcode() == Instruction::PtrToInt)
546 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
547 if (CE->getOpcode() == Instruction::GetElementPtr &&
548 CE->getOperand(0)->isNullValue()) {
549 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
550 if (StructType *STy = dyn_cast<StructType>(Ty))
551 if (!STy->isPacked() &&
552 CE->getNumOperands() == 3 &&
553 CE->getOperand(1)->isNullValue()) {
554 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
555 if (CI->isOne() &&
556 STy->getNumElements() == 2 &&
557 STy->getElementType(0)->isIntegerTy(1)) {
558 AllocTy = STy->getElementType(1);
559 return true;
560 }
561 }
562 }
563
564 return false;
565 }
566
isOffsetOf(Type * & CTy,Constant * & FieldNo) const567 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
568 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
569 if (VCE->getOpcode() == Instruction::PtrToInt)
570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
571 if (CE->getOpcode() == Instruction::GetElementPtr &&
572 CE->getNumOperands() == 3 &&
573 CE->getOperand(0)->isNullValue() &&
574 CE->getOperand(1)->isNullValue()) {
575 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
576 // Ignore vector types here so that ScalarEvolutionExpander doesn't
577 // emit getelementptrs that index into vectors.
578 if (Ty->isStructTy() || Ty->isArrayTy()) {
579 CTy = Ty;
580 FieldNo = CE->getOperand(2);
581 return true;
582 }
583 }
584
585 return false;
586 }
587
588 //===----------------------------------------------------------------------===//
589 // SCEV Utilities
590 //===----------------------------------------------------------------------===//
591
592 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
593 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
594 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
595 /// have been previously deemed to be "equally complex" by this routine. It is
596 /// intended to avoid exponential time complexity in cases like:
597 ///
598 /// %a = f(%x, %y)
599 /// %b = f(%a, %a)
600 /// %c = f(%b, %b)
601 ///
602 /// %d = f(%x, %y)
603 /// %e = f(%d, %d)
604 /// %f = f(%e, %e)
605 ///
606 /// CompareValueComplexity(%f, %c)
607 ///
608 /// Since we do not continue running this routine on expression trees once we
609 /// have seen unequal values, there is no need to track them in the cache.
610 static int
CompareValueComplexity(EquivalenceClasses<const Value * > & EqCacheValue,const LoopInfo * const LI,Value * LV,Value * RV,unsigned Depth)611 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
612 const LoopInfo *const LI, Value *LV, Value *RV,
613 unsigned Depth) {
614 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
615 return 0;
616
617 // Order pointer values after integer values. This helps SCEVExpander form
618 // GEPs.
619 bool LIsPointer = LV->getType()->isPointerTy(),
620 RIsPointer = RV->getType()->isPointerTy();
621 if (LIsPointer != RIsPointer)
622 return (int)LIsPointer - (int)RIsPointer;
623
624 // Compare getValueID values.
625 unsigned LID = LV->getValueID(), RID = RV->getValueID();
626 if (LID != RID)
627 return (int)LID - (int)RID;
628
629 // Sort arguments by their position.
630 if (const auto *LA = dyn_cast<Argument>(LV)) {
631 const auto *RA = cast<Argument>(RV);
632 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
633 return (int)LArgNo - (int)RArgNo;
634 }
635
636 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
637 const auto *RGV = cast<GlobalValue>(RV);
638
639 const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
640 auto LT = GV->getLinkage();
641 return !(GlobalValue::isPrivateLinkage(LT) ||
642 GlobalValue::isInternalLinkage(LT));
643 };
644
645 // Use the names to distinguish the two values, but only if the
646 // names are semantically important.
647 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
648 return LGV->getName().compare(RGV->getName());
649 }
650
651 // For instructions, compare their loop depth, and their operand count. This
652 // is pretty loose.
653 if (const auto *LInst = dyn_cast<Instruction>(LV)) {
654 const auto *RInst = cast<Instruction>(RV);
655
656 // Compare loop depths.
657 const BasicBlock *LParent = LInst->getParent(),
658 *RParent = RInst->getParent();
659 if (LParent != RParent) {
660 unsigned LDepth = LI->getLoopDepth(LParent),
661 RDepth = LI->getLoopDepth(RParent);
662 if (LDepth != RDepth)
663 return (int)LDepth - (int)RDepth;
664 }
665
666 // Compare the number of operands.
667 unsigned LNumOps = LInst->getNumOperands(),
668 RNumOps = RInst->getNumOperands();
669 if (LNumOps != RNumOps)
670 return (int)LNumOps - (int)RNumOps;
671
672 for (unsigned Idx : seq(0u, LNumOps)) {
673 int Result =
674 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
675 RInst->getOperand(Idx), Depth + 1);
676 if (Result != 0)
677 return Result;
678 }
679 }
680
681 EqCacheValue.unionSets(LV, RV);
682 return 0;
683 }
684
685 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
686 // than RHS, respectively. A three-way result allows recursive comparisons to be
687 // more efficient.
688 // If the max analysis depth was reached, return None, assuming we do not know
689 // if they are equivalent for sure.
690 static Optional<int>
CompareSCEVComplexity(EquivalenceClasses<const SCEV * > & EqCacheSCEV,EquivalenceClasses<const Value * > & EqCacheValue,const LoopInfo * const LI,const SCEV * LHS,const SCEV * RHS,DominatorTree & DT,unsigned Depth=0)691 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
692 EquivalenceClasses<const Value *> &EqCacheValue,
693 const LoopInfo *const LI, const SCEV *LHS,
694 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
695 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
696 if (LHS == RHS)
697 return 0;
698
699 // Primarily, sort the SCEVs by their getSCEVType().
700 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
701 if (LType != RType)
702 return (int)LType - (int)RType;
703
704 if (EqCacheSCEV.isEquivalent(LHS, RHS))
705 return 0;
706
707 if (Depth > MaxSCEVCompareDepth)
708 return None;
709
710 // Aside from the getSCEVType() ordering, the particular ordering
711 // isn't very important except that it's beneficial to be consistent,
712 // so that (a + b) and (b + a) don't end up as different expressions.
713 switch (LType) {
714 case scUnknown: {
715 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
716 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
717
718 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
719 RU->getValue(), Depth + 1);
720 if (X == 0)
721 EqCacheSCEV.unionSets(LHS, RHS);
722 return X;
723 }
724
725 case scConstant: {
726 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
727 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
728
729 // Compare constant values.
730 const APInt &LA = LC->getAPInt();
731 const APInt &RA = RC->getAPInt();
732 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
733 if (LBitWidth != RBitWidth)
734 return (int)LBitWidth - (int)RBitWidth;
735 return LA.ult(RA) ? -1 : 1;
736 }
737
738 case scAddRecExpr: {
739 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
740 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
741
742 // There is always a dominance between two recs that are used by one SCEV,
743 // so we can safely sort recs by loop header dominance. We require such
744 // order in getAddExpr.
745 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
746 if (LLoop != RLoop) {
747 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
748 assert(LHead != RHead && "Two loops share the same header?");
749 if (DT.dominates(LHead, RHead))
750 return 1;
751 else
752 assert(DT.dominates(RHead, LHead) &&
753 "No dominance between recurrences used by one SCEV?");
754 return -1;
755 }
756
757 // Addrec complexity grows with operand count.
758 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
759 if (LNumOps != RNumOps)
760 return (int)LNumOps - (int)RNumOps;
761
762 // Lexicographically compare.
763 for (unsigned i = 0; i != LNumOps; ++i) {
764 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
765 LA->getOperand(i), RA->getOperand(i), DT,
766 Depth + 1);
767 if (X != 0)
768 return X;
769 }
770 EqCacheSCEV.unionSets(LHS, RHS);
771 return 0;
772 }
773
774 case scAddExpr:
775 case scMulExpr:
776 case scSMaxExpr:
777 case scUMaxExpr:
778 case scSMinExpr:
779 case scUMinExpr: {
780 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
781 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
782
783 // Lexicographically compare n-ary expressions.
784 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
785 if (LNumOps != RNumOps)
786 return (int)LNumOps - (int)RNumOps;
787
788 for (unsigned i = 0; i != LNumOps; ++i) {
789 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
790 LC->getOperand(i), RC->getOperand(i), DT,
791 Depth + 1);
792 if (X != 0)
793 return X;
794 }
795 EqCacheSCEV.unionSets(LHS, RHS);
796 return 0;
797 }
798
799 case scUDivExpr: {
800 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
801 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
802
803 // Lexicographically compare udiv expressions.
804 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
805 RC->getLHS(), DT, Depth + 1);
806 if (X != 0)
807 return X;
808 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
809 RC->getRHS(), DT, Depth + 1);
810 if (X == 0)
811 EqCacheSCEV.unionSets(LHS, RHS);
812 return X;
813 }
814
815 case scPtrToInt:
816 case scTruncate:
817 case scZeroExtend:
818 case scSignExtend: {
819 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
820 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
821
822 // Compare cast expressions by operand.
823 auto X =
824 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(),
825 RC->getOperand(), DT, Depth + 1);
826 if (X == 0)
827 EqCacheSCEV.unionSets(LHS, RHS);
828 return X;
829 }
830
831 case scCouldNotCompute:
832 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
833 }
834 llvm_unreachable("Unknown SCEV kind!");
835 }
836
837 /// Given a list of SCEV objects, order them by their complexity, and group
838 /// objects of the same complexity together by value. When this routine is
839 /// finished, we know that any duplicates in the vector are consecutive and that
840 /// complexity is monotonically increasing.
841 ///
842 /// Note that we go take special precautions to ensure that we get deterministic
843 /// results from this routine. In other words, we don't want the results of
844 /// this to depend on where the addresses of various SCEV objects happened to
845 /// land in memory.
GroupByComplexity(SmallVectorImpl<const SCEV * > & Ops,LoopInfo * LI,DominatorTree & DT)846 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
847 LoopInfo *LI, DominatorTree &DT) {
848 if (Ops.size() < 2) return; // Noop
849
850 EquivalenceClasses<const SCEV *> EqCacheSCEV;
851 EquivalenceClasses<const Value *> EqCacheValue;
852
853 // Whether LHS has provably less complexity than RHS.
854 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
855 auto Complexity =
856 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT);
857 return Complexity && *Complexity < 0;
858 };
859 if (Ops.size() == 2) {
860 // This is the common case, which also happens to be trivially simple.
861 // Special case it.
862 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
863 if (IsLessComplex(RHS, LHS))
864 std::swap(LHS, RHS);
865 return;
866 }
867
868 // Do the rough sort by complexity.
869 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
870 return IsLessComplex(LHS, RHS);
871 });
872
873 // Now that we are sorted by complexity, group elements of the same
874 // complexity. Note that this is, at worst, N^2, but the vector is likely to
875 // be extremely short in practice. Note that we take this approach because we
876 // do not want to depend on the addresses of the objects we are grouping.
877 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
878 const SCEV *S = Ops[i];
879 unsigned Complexity = S->getSCEVType();
880
881 // If there are any objects of the same complexity and same value as this
882 // one, group them.
883 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
884 if (Ops[j] == S) { // Found a duplicate.
885 // Move it to immediately after i'th element.
886 std::swap(Ops[i+1], Ops[j]);
887 ++i; // no need to rescan it.
888 if (i == e-2) return; // Done!
889 }
890 }
891 }
892 }
893
894 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
895 /// least HugeExprThreshold nodes).
hasHugeExpression(ArrayRef<const SCEV * > Ops)896 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
897 return any_of(Ops, [](const SCEV *S) {
898 return S->getExpressionSize() >= HugeExprThreshold;
899 });
900 }
901
902 //===----------------------------------------------------------------------===//
903 // Simple SCEV method implementations
904 //===----------------------------------------------------------------------===//
905
906 /// Compute BC(It, K). The result has width W. Assume, K > 0.
BinomialCoefficient(const SCEV * It,unsigned K,ScalarEvolution & SE,Type * ResultTy)907 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
908 ScalarEvolution &SE,
909 Type *ResultTy) {
910 // Handle the simplest case efficiently.
911 if (K == 1)
912 return SE.getTruncateOrZeroExtend(It, ResultTy);
913
914 // We are using the following formula for BC(It, K):
915 //
916 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
917 //
918 // Suppose, W is the bitwidth of the return value. We must be prepared for
919 // overflow. Hence, we must assure that the result of our computation is
920 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
921 // safe in modular arithmetic.
922 //
923 // However, this code doesn't use exactly that formula; the formula it uses
924 // is something like the following, where T is the number of factors of 2 in
925 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
926 // exponentiation:
927 //
928 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
929 //
930 // This formula is trivially equivalent to the previous formula. However,
931 // this formula can be implemented much more efficiently. The trick is that
932 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
933 // arithmetic. To do exact division in modular arithmetic, all we have
934 // to do is multiply by the inverse. Therefore, this step can be done at
935 // width W.
936 //
937 // The next issue is how to safely do the division by 2^T. The way this
938 // is done is by doing the multiplication step at a width of at least W + T
939 // bits. This way, the bottom W+T bits of the product are accurate. Then,
940 // when we perform the division by 2^T (which is equivalent to a right shift
941 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
942 // truncated out after the division by 2^T.
943 //
944 // In comparison to just directly using the first formula, this technique
945 // is much more efficient; using the first formula requires W * K bits,
946 // but this formula less than W + K bits. Also, the first formula requires
947 // a division step, whereas this formula only requires multiplies and shifts.
948 //
949 // It doesn't matter whether the subtraction step is done in the calculation
950 // width or the input iteration count's width; if the subtraction overflows,
951 // the result must be zero anyway. We prefer here to do it in the width of
952 // the induction variable because it helps a lot for certain cases; CodeGen
953 // isn't smart enough to ignore the overflow, which leads to much less
954 // efficient code if the width of the subtraction is wider than the native
955 // register width.
956 //
957 // (It's possible to not widen at all by pulling out factors of 2 before
958 // the multiplication; for example, K=2 can be calculated as
959 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
960 // extra arithmetic, so it's not an obvious win, and it gets
961 // much more complicated for K > 3.)
962
963 // Protection from insane SCEVs; this bound is conservative,
964 // but it probably doesn't matter.
965 if (K > 1000)
966 return SE.getCouldNotCompute();
967
968 unsigned W = SE.getTypeSizeInBits(ResultTy);
969
970 // Calculate K! / 2^T and T; we divide out the factors of two before
971 // multiplying for calculating K! / 2^T to avoid overflow.
972 // Other overflow doesn't matter because we only care about the bottom
973 // W bits of the result.
974 APInt OddFactorial(W, 1);
975 unsigned T = 1;
976 for (unsigned i = 3; i <= K; ++i) {
977 APInt Mult(W, i);
978 unsigned TwoFactors = Mult.countTrailingZeros();
979 T += TwoFactors;
980 Mult.lshrInPlace(TwoFactors);
981 OddFactorial *= Mult;
982 }
983
984 // We need at least W + T bits for the multiplication step
985 unsigned CalculationBits = W + T;
986
987 // Calculate 2^T, at width T+W.
988 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
989
990 // Calculate the multiplicative inverse of K! / 2^T;
991 // this multiplication factor will perform the exact division by
992 // K! / 2^T.
993 APInt Mod = APInt::getSignedMinValue(W+1);
994 APInt MultiplyFactor = OddFactorial.zext(W+1);
995 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
996 MultiplyFactor = MultiplyFactor.trunc(W);
997
998 // Calculate the product, at width T+W
999 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1000 CalculationBits);
1001 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1002 for (unsigned i = 1; i != K; ++i) {
1003 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1004 Dividend = SE.getMulExpr(Dividend,
1005 SE.getTruncateOrZeroExtend(S, CalculationTy));
1006 }
1007
1008 // Divide by 2^T
1009 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1010
1011 // Truncate the result, and divide by K! / 2^T.
1012
1013 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1014 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1015 }
1016
1017 /// Return the value of this chain of recurrences at the specified iteration
1018 /// number. We can evaluate this recurrence by multiplying each element in the
1019 /// chain by the binomial coefficient corresponding to it. In other words, we
1020 /// can evaluate {A,+,B,+,C,+,D} as:
1021 ///
1022 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1023 ///
1024 /// where BC(It, k) stands for binomial coefficient.
evaluateAtIteration(const SCEV * It,ScalarEvolution & SE) const1025 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1026 ScalarEvolution &SE) const {
1027 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE);
1028 }
1029
1030 const SCEV *
evaluateAtIteration(ArrayRef<const SCEV * > Operands,const SCEV * It,ScalarEvolution & SE)1031 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
1032 const SCEV *It, ScalarEvolution &SE) {
1033 assert(Operands.size() > 0);
1034 const SCEV *Result = Operands[0];
1035 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1036 // The computation is correct in the face of overflow provided that the
1037 // multiplication is performed _after_ the evaluation of the binomial
1038 // coefficient.
1039 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
1040 if (isa<SCEVCouldNotCompute>(Coeff))
1041 return Coeff;
1042
1043 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
1044 }
1045 return Result;
1046 }
1047
1048 //===----------------------------------------------------------------------===//
1049 // SCEV Expression folder implementations
1050 //===----------------------------------------------------------------------===//
1051
getLosslessPtrToIntExpr(const SCEV * Op,unsigned Depth)1052 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
1053 unsigned Depth) {
1054 assert(Depth <= 1 &&
1055 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1056
1057 // We could be called with an integer-typed operands during SCEV rewrites.
1058 // Since the operand is an integer already, just perform zext/trunc/self cast.
1059 if (!Op->getType()->isPointerTy())
1060 return Op;
1061
1062 // What would be an ID for such a SCEV cast expression?
1063 FoldingSetNodeID ID;
1064 ID.AddInteger(scPtrToInt);
1065 ID.AddPointer(Op);
1066
1067 void *IP = nullptr;
1068
1069 // Is there already an expression for such a cast?
1070 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1071 return S;
1072
1073 // It isn't legal for optimizations to construct new ptrtoint expressions
1074 // for non-integral pointers.
1075 if (getDataLayout().isNonIntegralPointerType(Op->getType()))
1076 return getCouldNotCompute();
1077
1078 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1079
1080 // We can only trivially model ptrtoint if SCEV's effective (integer) type
1081 // is sufficiently wide to represent all possible pointer values.
1082 // We could theoretically teach SCEV to truncate wider pointers, but
1083 // that isn't implemented for now.
1084 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) !=
1085 getDataLayout().getTypeSizeInBits(IntPtrTy))
1086 return getCouldNotCompute();
1087
1088 // If not, is this expression something we can't reduce any further?
1089 if (auto *U = dyn_cast<SCEVUnknown>(Op)) {
1090 // Perform some basic constant folding. If the operand of the ptr2int cast
1091 // is a null pointer, don't create a ptr2int SCEV expression (that will be
1092 // left as-is), but produce a zero constant.
1093 // NOTE: We could handle a more general case, but lack motivational cases.
1094 if (isa<ConstantPointerNull>(U->getValue()))
1095 return getZero(IntPtrTy);
1096
1097 // Create an explicit cast node.
1098 // We can reuse the existing insert position since if we get here,
1099 // we won't have made any changes which would invalidate it.
1100 SCEV *S = new (SCEVAllocator)
1101 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
1102 UniqueSCEVs.InsertNode(S, IP);
1103 addToLoopUseLists(S);
1104 return S;
1105 }
1106
1107 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "
1108 "non-SCEVUnknown's.");
1109
1110 // Otherwise, we've got some expression that is more complex than just a
1111 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1112 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1113 // only, and the expressions must otherwise be integer-typed.
1114 // So sink the cast down to the SCEVUnknown's.
1115
1116 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1117 /// which computes a pointer-typed value, and rewrites the whole expression
1118 /// tree so that *all* the computations are done on integers, and the only
1119 /// pointer-typed operands in the expression are SCEVUnknown.
1120 class SCEVPtrToIntSinkingRewriter
1121 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> {
1122 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>;
1123
1124 public:
1125 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
1126
1127 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
1128 SCEVPtrToIntSinkingRewriter Rewriter(SE);
1129 return Rewriter.visit(Scev);
1130 }
1131
1132 const SCEV *visit(const SCEV *S) {
1133 Type *STy = S->getType();
1134 // If the expression is not pointer-typed, just keep it as-is.
1135 if (!STy->isPointerTy())
1136 return S;
1137 // Else, recursively sink the cast down into it.
1138 return Base::visit(S);
1139 }
1140
1141 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1142 SmallVector<const SCEV *, 2> Operands;
1143 bool Changed = false;
1144 for (auto *Op : Expr->operands()) {
1145 Operands.push_back(visit(Op));
1146 Changed |= Op != Operands.back();
1147 }
1148 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1149 }
1150
1151 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1152 SmallVector<const SCEV *, 2> Operands;
1153 bool Changed = false;
1154 for (auto *Op : Expr->operands()) {
1155 Operands.push_back(visit(Op));
1156 Changed |= Op != Operands.back();
1157 }
1158 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1159 }
1160
1161 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1162 assert(Expr->getType()->isPointerTy() &&
1163 "Should only reach pointer-typed SCEVUnknown's.");
1164 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1);
1165 }
1166 };
1167
1168 // And actually perform the cast sinking.
1169 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
1170 assert(IntOp->getType()->isIntegerTy() &&
1171 "We must have succeeded in sinking the cast, "
1172 "and ending up with an integer-typed expression!");
1173 return IntOp;
1174 }
1175
getPtrToIntExpr(const SCEV * Op,Type * Ty)1176 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) {
1177 assert(Ty->isIntegerTy() && "Target type must be an integer type!");
1178
1179 const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
1180 if (isa<SCEVCouldNotCompute>(IntOp))
1181 return IntOp;
1182
1183 return getTruncateOrZeroExtend(IntOp, Ty);
1184 }
1185
getTruncateExpr(const SCEV * Op,Type * Ty,unsigned Depth)1186 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
1187 unsigned Depth) {
1188 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1189 "This is not a truncating conversion!");
1190 assert(isSCEVable(Ty) &&
1191 "This is not a conversion to a SCEVable type!");
1192 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!");
1193 Ty = getEffectiveSCEVType(Ty);
1194
1195 FoldingSetNodeID ID;
1196 ID.AddInteger(scTruncate);
1197 ID.AddPointer(Op);
1198 ID.AddPointer(Ty);
1199 void *IP = nullptr;
1200 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1201
1202 // Fold if the operand is constant.
1203 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1204 return getConstant(
1205 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1206
1207 // trunc(trunc(x)) --> trunc(x)
1208 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1209 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1210
1211 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1212 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1213 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1214
1215 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1216 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1217 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1218
1219 if (Depth > MaxCastDepth) {
1220 SCEV *S =
1221 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1222 UniqueSCEVs.InsertNode(S, IP);
1223 addToLoopUseLists(S);
1224 return S;
1225 }
1226
1227 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1228 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1229 // if after transforming we have at most one truncate, not counting truncates
1230 // that replace other casts.
1231 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1232 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1233 SmallVector<const SCEV *, 4> Operands;
1234 unsigned numTruncs = 0;
1235 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1236 ++i) {
1237 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1238 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1239 isa<SCEVTruncateExpr>(S))
1240 numTruncs++;
1241 Operands.push_back(S);
1242 }
1243 if (numTruncs < 2) {
1244 if (isa<SCEVAddExpr>(Op))
1245 return getAddExpr(Operands);
1246 else if (isa<SCEVMulExpr>(Op))
1247 return getMulExpr(Operands);
1248 else
1249 llvm_unreachable("Unexpected SCEV type for Op.");
1250 }
1251 // Although we checked in the beginning that ID is not in the cache, it is
1252 // possible that during recursion and different modification ID was inserted
1253 // into the cache. So if we find it, just return it.
1254 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1255 return S;
1256 }
1257
1258 // If the input value is a chrec scev, truncate the chrec's operands.
1259 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1260 SmallVector<const SCEV *, 4> Operands;
1261 for (const SCEV *Op : AddRec->operands())
1262 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1263 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1264 }
1265
1266 // Return zero if truncating to known zeros.
1267 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op);
1268 if (MinTrailingZeros >= getTypeSizeInBits(Ty))
1269 return getZero(Ty);
1270
1271 // The cast wasn't folded; create an explicit cast node. We can reuse
1272 // the existing insert position since if we get here, we won't have
1273 // made any changes which would invalidate it.
1274 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1275 Op, Ty);
1276 UniqueSCEVs.InsertNode(S, IP);
1277 addToLoopUseLists(S);
1278 return S;
1279 }
1280
1281 // Get the limit of a recurrence such that incrementing by Step cannot cause
1282 // signed overflow as long as the value of the recurrence within the
1283 // loop does not exceed this limit before incrementing.
getSignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1284 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1285 ICmpInst::Predicate *Pred,
1286 ScalarEvolution *SE) {
1287 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1288 if (SE->isKnownPositive(Step)) {
1289 *Pred = ICmpInst::ICMP_SLT;
1290 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1291 SE->getSignedRangeMax(Step));
1292 }
1293 if (SE->isKnownNegative(Step)) {
1294 *Pred = ICmpInst::ICMP_SGT;
1295 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1296 SE->getSignedRangeMin(Step));
1297 }
1298 return nullptr;
1299 }
1300
1301 // Get the limit of a recurrence such that incrementing by Step cannot cause
1302 // unsigned overflow as long as the value of the recurrence within the loop does
1303 // not exceed this limit before incrementing.
getUnsignedOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1304 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1305 ICmpInst::Predicate *Pred,
1306 ScalarEvolution *SE) {
1307 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1308 *Pred = ICmpInst::ICMP_ULT;
1309
1310 return SE->getConstant(APInt::getMinValue(BitWidth) -
1311 SE->getUnsignedRangeMax(Step));
1312 }
1313
1314 namespace {
1315
1316 struct ExtendOpTraitsBase {
1317 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1318 unsigned);
1319 };
1320
1321 // Used to make code generic over signed and unsigned overflow.
1322 template <typename ExtendOp> struct ExtendOpTraits {
1323 // Members present:
1324 //
1325 // static const SCEV::NoWrapFlags WrapType;
1326 //
1327 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1328 //
1329 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1330 // ICmpInst::Predicate *Pred,
1331 // ScalarEvolution *SE);
1332 };
1333
1334 template <>
1335 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1336 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1337
1338 static const GetExtendExprTy GetExtendExpr;
1339
getOverflowLimitForStep__anonb5a706170511::ExtendOpTraits1340 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1341 ICmpInst::Predicate *Pred,
1342 ScalarEvolution *SE) {
1343 return getSignedOverflowLimitForStep(Step, Pred, SE);
1344 }
1345 };
1346
1347 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1348 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1349
1350 template <>
1351 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1352 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1353
1354 static const GetExtendExprTy GetExtendExpr;
1355
getOverflowLimitForStep__anonb5a706170511::ExtendOpTraits1356 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1357 ICmpInst::Predicate *Pred,
1358 ScalarEvolution *SE) {
1359 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1360 }
1361 };
1362
1363 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1364 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1365
1366 } // end anonymous namespace
1367
1368 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1369 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1370 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1371 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1372 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1373 // expression "Step + sext/zext(PreIncAR)" is congruent with
1374 // "sext/zext(PostIncAR)"
1375 template <typename ExtendOpTy>
getPreStartForExtend(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE,unsigned Depth)1376 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1377 ScalarEvolution *SE, unsigned Depth) {
1378 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1379 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1380
1381 const Loop *L = AR->getLoop();
1382 const SCEV *Start = AR->getStart();
1383 const SCEV *Step = AR->getStepRecurrence(*SE);
1384
1385 // Check for a simple looking step prior to loop entry.
1386 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1387 if (!SA)
1388 return nullptr;
1389
1390 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1391 // subtraction is expensive. For this purpose, perform a quick and dirty
1392 // difference, by checking for Step in the operand list.
1393 SmallVector<const SCEV *, 4> DiffOps;
1394 for (const SCEV *Op : SA->operands())
1395 if (Op != Step)
1396 DiffOps.push_back(Op);
1397
1398 if (DiffOps.size() == SA->getNumOperands())
1399 return nullptr;
1400
1401 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1402 // `Step`:
1403
1404 // 1. NSW/NUW flags on the step increment.
1405 auto PreStartFlags =
1406 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1407 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1408 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1409 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1410
1411 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1412 // "S+X does not sign/unsign-overflow".
1413 //
1414
1415 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1416 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1417 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1418 return PreStart;
1419
1420 // 2. Direct overflow check on the step operation's expression.
1421 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1422 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1423 const SCEV *OperandExtendedStart =
1424 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1425 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1426 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1427 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1428 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1429 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1430 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1431 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1432 }
1433 return PreStart;
1434 }
1435
1436 // 3. Loop precondition.
1437 ICmpInst::Predicate Pred;
1438 const SCEV *OverflowLimit =
1439 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1440
1441 if (OverflowLimit &&
1442 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1443 return PreStart;
1444
1445 return nullptr;
1446 }
1447
1448 // Get the normalized zero or sign extended expression for this AddRec's Start.
1449 template <typename ExtendOpTy>
getExtendAddRecStart(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE,unsigned Depth)1450 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1451 ScalarEvolution *SE,
1452 unsigned Depth) {
1453 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1454
1455 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1456 if (!PreStart)
1457 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1458
1459 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1460 Depth),
1461 (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1462 }
1463
1464 // Try to prove away overflow by looking at "nearby" add recurrences. A
1465 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1466 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1467 //
1468 // Formally:
1469 //
1470 // {S,+,X} == {S-T,+,X} + T
1471 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1472 //
1473 // If ({S-T,+,X} + T) does not overflow ... (1)
1474 //
1475 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1476 //
1477 // If {S-T,+,X} does not overflow ... (2)
1478 //
1479 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1480 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1481 //
1482 // If (S-T)+T does not overflow ... (3)
1483 //
1484 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1485 // == {Ext(S),+,Ext(X)} == LHS
1486 //
1487 // Thus, if (1), (2) and (3) are true for some T, then
1488 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1489 //
1490 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1491 // does not overflow" restricted to the 0th iteration. Therefore we only need
1492 // to check for (1) and (2).
1493 //
1494 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1495 // is `Delta` (defined below).
1496 template <typename ExtendOpTy>
proveNoWrapByVaryingStart(const SCEV * Start,const SCEV * Step,const Loop * L)1497 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1498 const SCEV *Step,
1499 const Loop *L) {
1500 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1501
1502 // We restrict `Start` to a constant to prevent SCEV from spending too much
1503 // time here. It is correct (but more expensive) to continue with a
1504 // non-constant `Start` and do a general SCEV subtraction to compute
1505 // `PreStart` below.
1506 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1507 if (!StartC)
1508 return false;
1509
1510 APInt StartAI = StartC->getAPInt();
1511
1512 for (unsigned Delta : {-2, -1, 1, 2}) {
1513 const SCEV *PreStart = getConstant(StartAI - Delta);
1514
1515 FoldingSetNodeID ID;
1516 ID.AddInteger(scAddRecExpr);
1517 ID.AddPointer(PreStart);
1518 ID.AddPointer(Step);
1519 ID.AddPointer(L);
1520 void *IP = nullptr;
1521 const auto *PreAR =
1522 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1523
1524 // Give up if we don't already have the add recurrence we need because
1525 // actually constructing an add recurrence is relatively expensive.
1526 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1527 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1528 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1529 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1530 DeltaS, &Pred, this);
1531 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1532 return true;
1533 }
1534 }
1535
1536 return false;
1537 }
1538
1539 // Finds an integer D for an expression (C + x + y + ...) such that the top
1540 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1541 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1542 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1543 // the (C + x + y + ...) expression is \p WholeAddExpr.
extractConstantWithoutWrapping(ScalarEvolution & SE,const SCEVConstant * ConstantTerm,const SCEVAddExpr * WholeAddExpr)1544 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1545 const SCEVConstant *ConstantTerm,
1546 const SCEVAddExpr *WholeAddExpr) {
1547 const APInt &C = ConstantTerm->getAPInt();
1548 const unsigned BitWidth = C.getBitWidth();
1549 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1550 uint32_t TZ = BitWidth;
1551 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1552 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1553 if (TZ) {
1554 // Set D to be as many least significant bits of C as possible while still
1555 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1556 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1557 }
1558 return APInt(BitWidth, 0);
1559 }
1560
1561 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1562 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1563 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1564 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
extractConstantWithoutWrapping(ScalarEvolution & SE,const APInt & ConstantStart,const SCEV * Step)1565 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1566 const APInt &ConstantStart,
1567 const SCEV *Step) {
1568 const unsigned BitWidth = ConstantStart.getBitWidth();
1569 const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1570 if (TZ)
1571 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1572 : ConstantStart;
1573 return APInt(BitWidth, 0);
1574 }
1575
1576 const SCEV *
getZeroExtendExpr(const SCEV * Op,Type * Ty,unsigned Depth)1577 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1578 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1579 "This is not an extending conversion!");
1580 assert(isSCEVable(Ty) &&
1581 "This is not a conversion to a SCEVable type!");
1582 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1583 Ty = getEffectiveSCEVType(Ty);
1584
1585 // Fold if the operand is constant.
1586 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1587 return getConstant(
1588 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1589
1590 // zext(zext(x)) --> zext(x)
1591 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1592 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1593
1594 // Before doing any expensive analysis, check to see if we've already
1595 // computed a SCEV for this Op and Ty.
1596 FoldingSetNodeID ID;
1597 ID.AddInteger(scZeroExtend);
1598 ID.AddPointer(Op);
1599 ID.AddPointer(Ty);
1600 void *IP = nullptr;
1601 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1602 if (Depth > MaxCastDepth) {
1603 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1604 Op, Ty);
1605 UniqueSCEVs.InsertNode(S, IP);
1606 addToLoopUseLists(S);
1607 return S;
1608 }
1609
1610 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1611 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1612 // It's possible the bits taken off by the truncate were all zero bits. If
1613 // so, we should be able to simplify this further.
1614 const SCEV *X = ST->getOperand();
1615 ConstantRange CR = getUnsignedRange(X);
1616 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1617 unsigned NewBits = getTypeSizeInBits(Ty);
1618 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1619 CR.zextOrTrunc(NewBits)))
1620 return getTruncateOrZeroExtend(X, Ty, Depth);
1621 }
1622
1623 // If the input value is a chrec scev, and we can prove that the value
1624 // did not overflow the old, smaller, value, we can zero extend all of the
1625 // operands (often constants). This allows analysis of something like
1626 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1627 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1628 if (AR->isAffine()) {
1629 const SCEV *Start = AR->getStart();
1630 const SCEV *Step = AR->getStepRecurrence(*this);
1631 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1632 const Loop *L = AR->getLoop();
1633
1634 if (!AR->hasNoUnsignedWrap()) {
1635 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1636 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1637 }
1638
1639 // If we have special knowledge that this addrec won't overflow,
1640 // we don't need to do any further analysis.
1641 if (AR->hasNoUnsignedWrap())
1642 return getAddRecExpr(
1643 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1644 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1645
1646 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1647 // Note that this serves two purposes: It filters out loops that are
1648 // simply not analyzable, and it covers the case where this code is
1649 // being called from within backedge-taken count analysis, such that
1650 // attempting to ask for the backedge-taken count would likely result
1651 // in infinite recursion. In the later case, the analysis code will
1652 // cope with a conservative value, and it will take care to purge
1653 // that value once it has finished.
1654 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1655 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1656 // Manually compute the final value for AR, checking for overflow.
1657
1658 // Check whether the backedge-taken count can be losslessly casted to
1659 // the addrec's type. The count is always unsigned.
1660 const SCEV *CastedMaxBECount =
1661 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1662 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1663 CastedMaxBECount, MaxBECount->getType(), Depth);
1664 if (MaxBECount == RecastedMaxBECount) {
1665 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1666 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1667 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1668 SCEV::FlagAnyWrap, Depth + 1);
1669 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1670 SCEV::FlagAnyWrap,
1671 Depth + 1),
1672 WideTy, Depth + 1);
1673 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1674 const SCEV *WideMaxBECount =
1675 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1676 const SCEV *OperandExtendedAdd =
1677 getAddExpr(WideStart,
1678 getMulExpr(WideMaxBECount,
1679 getZeroExtendExpr(Step, WideTy, Depth + 1),
1680 SCEV::FlagAnyWrap, Depth + 1),
1681 SCEV::FlagAnyWrap, Depth + 1);
1682 if (ZAdd == OperandExtendedAdd) {
1683 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1684 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1685 // Return the expression with the addrec on the outside.
1686 return getAddRecExpr(
1687 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1688 Depth + 1),
1689 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1690 AR->getNoWrapFlags());
1691 }
1692 // Similar to above, only this time treat the step value as signed.
1693 // This covers loops that count down.
1694 OperandExtendedAdd =
1695 getAddExpr(WideStart,
1696 getMulExpr(WideMaxBECount,
1697 getSignExtendExpr(Step, WideTy, Depth + 1),
1698 SCEV::FlagAnyWrap, Depth + 1),
1699 SCEV::FlagAnyWrap, Depth + 1);
1700 if (ZAdd == OperandExtendedAdd) {
1701 // Cache knowledge of AR NW, which is propagated to this AddRec.
1702 // Negative step causes unsigned wrap, but it still can't self-wrap.
1703 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1704 // Return the expression with the addrec on the outside.
1705 return getAddRecExpr(
1706 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1707 Depth + 1),
1708 getSignExtendExpr(Step, Ty, Depth + 1), L,
1709 AR->getNoWrapFlags());
1710 }
1711 }
1712 }
1713
1714 // Normally, in the cases we can prove no-overflow via a
1715 // backedge guarding condition, we can also compute a backedge
1716 // taken count for the loop. The exceptions are assumptions and
1717 // guards present in the loop -- SCEV is not great at exploiting
1718 // these to compute max backedge taken counts, but can still use
1719 // these to prove lack of overflow. Use this fact to avoid
1720 // doing extra work that may not pay off.
1721 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1722 !AC.assumptions().empty()) {
1723
1724 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1725 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1726 if (AR->hasNoUnsignedWrap()) {
1727 // Same as nuw case above - duplicated here to avoid a compile time
1728 // issue. It's not clear that the order of checks does matter, but
1729 // it's one of two issue possible causes for a change which was
1730 // reverted. Be conservative for the moment.
1731 return getAddRecExpr(
1732 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1733 Depth + 1),
1734 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1735 AR->getNoWrapFlags());
1736 }
1737
1738 // For a negative step, we can extend the operands iff doing so only
1739 // traverses values in the range zext([0,UINT_MAX]).
1740 if (isKnownNegative(Step)) {
1741 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1742 getSignedRangeMin(Step));
1743 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1744 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1745 // Cache knowledge of AR NW, which is propagated to this
1746 // AddRec. Negative step causes unsigned wrap, but it
1747 // still can't self-wrap.
1748 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1749 // Return the expression with the addrec on the outside.
1750 return getAddRecExpr(
1751 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1752 Depth + 1),
1753 getSignExtendExpr(Step, Ty, Depth + 1), L,
1754 AR->getNoWrapFlags());
1755 }
1756 }
1757 }
1758
1759 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1760 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1761 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1762 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1763 const APInt &C = SC->getAPInt();
1764 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1765 if (D != 0) {
1766 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1767 const SCEV *SResidual =
1768 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1769 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1770 return getAddExpr(SZExtD, SZExtR,
1771 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1772 Depth + 1);
1773 }
1774 }
1775
1776 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1777 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1778 return getAddRecExpr(
1779 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1780 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1781 }
1782 }
1783
1784 // zext(A % B) --> zext(A) % zext(B)
1785 {
1786 const SCEV *LHS;
1787 const SCEV *RHS;
1788 if (matchURem(Op, LHS, RHS))
1789 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1790 getZeroExtendExpr(RHS, Ty, Depth + 1));
1791 }
1792
1793 // zext(A / B) --> zext(A) / zext(B).
1794 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1795 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1796 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1797
1798 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1799 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1800 if (SA->hasNoUnsignedWrap()) {
1801 // If the addition does not unsign overflow then we can, by definition,
1802 // commute the zero extension with the addition operation.
1803 SmallVector<const SCEV *, 4> Ops;
1804 for (const auto *Op : SA->operands())
1805 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1806 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1807 }
1808
1809 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1810 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1811 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1812 //
1813 // Often address arithmetics contain expressions like
1814 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1815 // This transformation is useful while proving that such expressions are
1816 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1817 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1818 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1819 if (D != 0) {
1820 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1821 const SCEV *SResidual =
1822 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1823 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1824 return getAddExpr(SZExtD, SZExtR,
1825 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1826 Depth + 1);
1827 }
1828 }
1829 }
1830
1831 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1832 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1833 if (SM->hasNoUnsignedWrap()) {
1834 // If the multiply does not unsign overflow then we can, by definition,
1835 // commute the zero extension with the multiply operation.
1836 SmallVector<const SCEV *, 4> Ops;
1837 for (const auto *Op : SM->operands())
1838 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1839 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1840 }
1841
1842 // zext(2^K * (trunc X to iN)) to iM ->
1843 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1844 //
1845 // Proof:
1846 //
1847 // zext(2^K * (trunc X to iN)) to iM
1848 // = zext((trunc X to iN) << K) to iM
1849 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1850 // (because shl removes the top K bits)
1851 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1852 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1853 //
1854 if (SM->getNumOperands() == 2)
1855 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1856 if (MulLHS->getAPInt().isPowerOf2())
1857 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1858 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1859 MulLHS->getAPInt().logBase2();
1860 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1861 return getMulExpr(
1862 getZeroExtendExpr(MulLHS, Ty),
1863 getZeroExtendExpr(
1864 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1865 SCEV::FlagNUW, Depth + 1);
1866 }
1867 }
1868
1869 // The cast wasn't folded; create an explicit cast node.
1870 // Recompute the insert position, as it may have been invalidated.
1871 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1872 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1873 Op, Ty);
1874 UniqueSCEVs.InsertNode(S, IP);
1875 addToLoopUseLists(S);
1876 return S;
1877 }
1878
1879 const SCEV *
getSignExtendExpr(const SCEV * Op,Type * Ty,unsigned Depth)1880 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1881 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1882 "This is not an extending conversion!");
1883 assert(isSCEVable(Ty) &&
1884 "This is not a conversion to a SCEVable type!");
1885 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1886 Ty = getEffectiveSCEVType(Ty);
1887
1888 // Fold if the operand is constant.
1889 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1890 return getConstant(
1891 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1892
1893 // sext(sext(x)) --> sext(x)
1894 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1895 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1896
1897 // sext(zext(x)) --> zext(x)
1898 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1899 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1900
1901 // Before doing any expensive analysis, check to see if we've already
1902 // computed a SCEV for this Op and Ty.
1903 FoldingSetNodeID ID;
1904 ID.AddInteger(scSignExtend);
1905 ID.AddPointer(Op);
1906 ID.AddPointer(Ty);
1907 void *IP = nullptr;
1908 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1909 // Limit recursion depth.
1910 if (Depth > MaxCastDepth) {
1911 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1912 Op, Ty);
1913 UniqueSCEVs.InsertNode(S, IP);
1914 addToLoopUseLists(S);
1915 return S;
1916 }
1917
1918 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1919 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1920 // It's possible the bits taken off by the truncate were all sign bits. If
1921 // so, we should be able to simplify this further.
1922 const SCEV *X = ST->getOperand();
1923 ConstantRange CR = getSignedRange(X);
1924 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1925 unsigned NewBits = getTypeSizeInBits(Ty);
1926 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1927 CR.sextOrTrunc(NewBits)))
1928 return getTruncateOrSignExtend(X, Ty, Depth);
1929 }
1930
1931 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1932 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1933 if (SA->hasNoSignedWrap()) {
1934 // If the addition does not sign overflow then we can, by definition,
1935 // commute the sign extension with the addition operation.
1936 SmallVector<const SCEV *, 4> Ops;
1937 for (const auto *Op : SA->operands())
1938 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1939 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
1940 }
1941
1942 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1943 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1944 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1945 //
1946 // For instance, this will bring two seemingly different expressions:
1947 // 1 + sext(5 + 20 * %x + 24 * %y) and
1948 // sext(6 + 20 * %x + 24 * %y)
1949 // to the same form:
1950 // 2 + sext(4 + 20 * %x + 24 * %y)
1951 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1952 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1953 if (D != 0) {
1954 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
1955 const SCEV *SResidual =
1956 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1957 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1958 return getAddExpr(SSExtD, SSExtR,
1959 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1960 Depth + 1);
1961 }
1962 }
1963 }
1964 // If the input value is a chrec scev, and we can prove that the value
1965 // did not overflow the old, smaller, value, we can sign extend all of the
1966 // operands (often constants). This allows analysis of something like
1967 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1968 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1969 if (AR->isAffine()) {
1970 const SCEV *Start = AR->getStart();
1971 const SCEV *Step = AR->getStepRecurrence(*this);
1972 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1973 const Loop *L = AR->getLoop();
1974
1975 if (!AR->hasNoSignedWrap()) {
1976 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1977 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1978 }
1979
1980 // If we have special knowledge that this addrec won't overflow,
1981 // we don't need to do any further analysis.
1982 if (AR->hasNoSignedWrap())
1983 return getAddRecExpr(
1984 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
1985 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
1986
1987 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1988 // Note that this serves two purposes: It filters out loops that are
1989 // simply not analyzable, and it covers the case where this code is
1990 // being called from within backedge-taken count analysis, such that
1991 // attempting to ask for the backedge-taken count would likely result
1992 // in infinite recursion. In the later case, the analysis code will
1993 // cope with a conservative value, and it will take care to purge
1994 // that value once it has finished.
1995 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1996 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1997 // Manually compute the final value for AR, checking for
1998 // overflow.
1999
2000 // Check whether the backedge-taken count can be losslessly casted to
2001 // the addrec's type. The count is always unsigned.
2002 const SCEV *CastedMaxBECount =
2003 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
2004 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
2005 CastedMaxBECount, MaxBECount->getType(), Depth);
2006 if (MaxBECount == RecastedMaxBECount) {
2007 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
2008 // Check whether Start+Step*MaxBECount has no signed overflow.
2009 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2010 SCEV::FlagAnyWrap, Depth + 1);
2011 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2012 SCEV::FlagAnyWrap,
2013 Depth + 1),
2014 WideTy, Depth + 1);
2015 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2016 const SCEV *WideMaxBECount =
2017 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2018 const SCEV *OperandExtendedAdd =
2019 getAddExpr(WideStart,
2020 getMulExpr(WideMaxBECount,
2021 getSignExtendExpr(Step, WideTy, Depth + 1),
2022 SCEV::FlagAnyWrap, Depth + 1),
2023 SCEV::FlagAnyWrap, Depth + 1);
2024 if (SAdd == OperandExtendedAdd) {
2025 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2026 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2027 // Return the expression with the addrec on the outside.
2028 return getAddRecExpr(
2029 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2030 Depth + 1),
2031 getSignExtendExpr(Step, Ty, Depth + 1), L,
2032 AR->getNoWrapFlags());
2033 }
2034 // Similar to above, only this time treat the step value as unsigned.
2035 // This covers loops that count up with an unsigned step.
2036 OperandExtendedAdd =
2037 getAddExpr(WideStart,
2038 getMulExpr(WideMaxBECount,
2039 getZeroExtendExpr(Step, WideTy, Depth + 1),
2040 SCEV::FlagAnyWrap, Depth + 1),
2041 SCEV::FlagAnyWrap, Depth + 1);
2042 if (SAdd == OperandExtendedAdd) {
2043 // If AR wraps around then
2044 //
2045 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2046 // => SAdd != OperandExtendedAdd
2047 //
2048 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2049 // (SAdd == OperandExtendedAdd => AR is NW)
2050
2051 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2052
2053 // Return the expression with the addrec on the outside.
2054 return getAddRecExpr(
2055 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2056 Depth + 1),
2057 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2058 AR->getNoWrapFlags());
2059 }
2060 }
2061 }
2062
2063 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2064 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
2065 if (AR->hasNoSignedWrap()) {
2066 // Same as nsw case above - duplicated here to avoid a compile time
2067 // issue. It's not clear that the order of checks does matter, but
2068 // it's one of two issue possible causes for a change which was
2069 // reverted. Be conservative for the moment.
2070 return getAddRecExpr(
2071 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2072 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2073 }
2074
2075 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2076 // if D + (C - D + Step * n) could be proven to not signed wrap
2077 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2078 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2079 const APInt &C = SC->getAPInt();
2080 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2081 if (D != 0) {
2082 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2083 const SCEV *SResidual =
2084 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2085 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2086 return getAddExpr(SSExtD, SSExtR,
2087 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
2088 Depth + 1);
2089 }
2090 }
2091
2092 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2093 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2094 return getAddRecExpr(
2095 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2096 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2097 }
2098 }
2099
2100 // If the input value is provably positive and we could not simplify
2101 // away the sext build a zext instead.
2102 if (isKnownNonNegative(Op))
2103 return getZeroExtendExpr(Op, Ty, Depth + 1);
2104
2105 // The cast wasn't folded; create an explicit cast node.
2106 // Recompute the insert position, as it may have been invalidated.
2107 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2108 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2109 Op, Ty);
2110 UniqueSCEVs.InsertNode(S, IP);
2111 addToLoopUseLists(S);
2112 return S;
2113 }
2114
2115 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2116 /// unspecified bits out to the given type.
getAnyExtendExpr(const SCEV * Op,Type * Ty)2117 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
2118 Type *Ty) {
2119 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2120 "This is not an extending conversion!");
2121 assert(isSCEVable(Ty) &&
2122 "This is not a conversion to a SCEVable type!");
2123 Ty = getEffectiveSCEVType(Ty);
2124
2125 // Sign-extend negative constants.
2126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2127 if (SC->getAPInt().isNegative())
2128 return getSignExtendExpr(Op, Ty);
2129
2130 // Peel off a truncate cast.
2131 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2132 const SCEV *NewOp = T->getOperand();
2133 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2134 return getAnyExtendExpr(NewOp, Ty);
2135 return getTruncateOrNoop(NewOp, Ty);
2136 }
2137
2138 // Next try a zext cast. If the cast is folded, use it.
2139 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2140 if (!isa<SCEVZeroExtendExpr>(ZExt))
2141 return ZExt;
2142
2143 // Next try a sext cast. If the cast is folded, use it.
2144 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2145 if (!isa<SCEVSignExtendExpr>(SExt))
2146 return SExt;
2147
2148 // Force the cast to be folded into the operands of an addrec.
2149 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2150 SmallVector<const SCEV *, 4> Ops;
2151 for (const SCEV *Op : AR->operands())
2152 Ops.push_back(getAnyExtendExpr(Op, Ty));
2153 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2154 }
2155
2156 // If the expression is obviously signed, use the sext cast value.
2157 if (isa<SCEVSMaxExpr>(Op))
2158 return SExt;
2159
2160 // Absent any other information, use the zext cast value.
2161 return ZExt;
2162 }
2163
2164 /// Process the given Ops list, which is a list of operands to be added under
2165 /// the given scale, update the given map. This is a helper function for
2166 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2167 /// that would form an add expression like this:
2168 ///
2169 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2170 ///
2171 /// where A and B are constants, update the map with these values:
2172 ///
2173 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2174 ///
2175 /// and add 13 + A*B*29 to AccumulatedConstant.
2176 /// This will allow getAddRecExpr to produce this:
2177 ///
2178 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2179 ///
2180 /// This form often exposes folding opportunities that are hidden in
2181 /// the original operand list.
2182 ///
2183 /// Return true iff it appears that any interesting folding opportunities
2184 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2185 /// the common case where no interesting opportunities are present, and
2186 /// is also used as a check to avoid infinite recursion.
2187 static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *,APInt> & M,SmallVectorImpl<const SCEV * > & NewOps,APInt & AccumulatedConstant,const SCEV * const * Ops,size_t NumOperands,const APInt & Scale,ScalarEvolution & SE)2188 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
2189 SmallVectorImpl<const SCEV *> &NewOps,
2190 APInt &AccumulatedConstant,
2191 const SCEV *const *Ops, size_t NumOperands,
2192 const APInt &Scale,
2193 ScalarEvolution &SE) {
2194 bool Interesting = false;
2195
2196 // Iterate over the add operands. They are sorted, with constants first.
2197 unsigned i = 0;
2198 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2199 ++i;
2200 // Pull a buried constant out to the outside.
2201 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2202 Interesting = true;
2203 AccumulatedConstant += Scale * C->getAPInt();
2204 }
2205
2206 // Next comes everything else. We're especially interested in multiplies
2207 // here, but they're in the middle, so just visit the rest with one loop.
2208 for (; i != NumOperands; ++i) {
2209 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2210 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2211 APInt NewScale =
2212 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2213 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2214 // A multiplication of a constant with another add; recurse.
2215 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2216 Interesting |=
2217 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2218 Add->op_begin(), Add->getNumOperands(),
2219 NewScale, SE);
2220 } else {
2221 // A multiplication of a constant with some other value. Update
2222 // the map.
2223 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
2224 const SCEV *Key = SE.getMulExpr(MulOps);
2225 auto Pair = M.insert({Key, NewScale});
2226 if (Pair.second) {
2227 NewOps.push_back(Pair.first->first);
2228 } else {
2229 Pair.first->second += NewScale;
2230 // The map already had an entry for this value, which may indicate
2231 // a folding opportunity.
2232 Interesting = true;
2233 }
2234 }
2235 } else {
2236 // An ordinary operand. Update the map.
2237 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2238 M.insert({Ops[i], Scale});
2239 if (Pair.second) {
2240 NewOps.push_back(Pair.first->first);
2241 } else {
2242 Pair.first->second += Scale;
2243 // The map already had an entry for this value, which may indicate
2244 // a folding opportunity.
2245 Interesting = true;
2246 }
2247 }
2248 }
2249
2250 return Interesting;
2251 }
2252
willNotOverflow(Instruction::BinaryOps BinOp,bool Signed,const SCEV * LHS,const SCEV * RHS)2253 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
2254 const SCEV *LHS, const SCEV *RHS) {
2255 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
2256 SCEV::NoWrapFlags, unsigned);
2257 switch (BinOp) {
2258 default:
2259 llvm_unreachable("Unsupported binary op");
2260 case Instruction::Add:
2261 Operation = &ScalarEvolution::getAddExpr;
2262 break;
2263 case Instruction::Sub:
2264 Operation = &ScalarEvolution::getMinusSCEV;
2265 break;
2266 case Instruction::Mul:
2267 Operation = &ScalarEvolution::getMulExpr;
2268 break;
2269 }
2270
2271 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
2272 Signed ? &ScalarEvolution::getSignExtendExpr
2273 : &ScalarEvolution::getZeroExtendExpr;
2274
2275 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
2276 auto *NarrowTy = cast<IntegerType>(LHS->getType());
2277 auto *WideTy =
2278 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
2279
2280 const SCEV *A = (this->*Extension)(
2281 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
2282 const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0),
2283 (this->*Extension)(RHS, WideTy, 0),
2284 SCEV::FlagAnyWrap, 0);
2285 return A == B;
2286 }
2287
2288 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator * OBO)2289 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
2290 const OverflowingBinaryOperator *OBO) {
2291 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
2292
2293 if (OBO->hasNoUnsignedWrap())
2294 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2295 if (OBO->hasNoSignedWrap())
2296 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2297
2298 bool Deduced = false;
2299
2300 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
2301 return {Flags, Deduced};
2302
2303 if (OBO->getOpcode() != Instruction::Add &&
2304 OBO->getOpcode() != Instruction::Sub &&
2305 OBO->getOpcode() != Instruction::Mul)
2306 return {Flags, Deduced};
2307
2308 const SCEV *LHS = getSCEV(OBO->getOperand(0));
2309 const SCEV *RHS = getSCEV(OBO->getOperand(1));
2310
2311 if (!OBO->hasNoUnsignedWrap() &&
2312 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2313 /* Signed */ false, LHS, RHS)) {
2314 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2315 Deduced = true;
2316 }
2317
2318 if (!OBO->hasNoSignedWrap() &&
2319 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2320 /* Signed */ true, LHS, RHS)) {
2321 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2322 Deduced = true;
2323 }
2324
2325 return {Flags, Deduced};
2326 }
2327
2328 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2329 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2330 // can't-overflow flags for the operation if possible.
2331 static SCEV::NoWrapFlags
StrengthenNoWrapFlags(ScalarEvolution * SE,SCEVTypes Type,const ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2332 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
2333 const ArrayRef<const SCEV *> Ops,
2334 SCEV::NoWrapFlags Flags) {
2335 using namespace std::placeholders;
2336
2337 using OBO = OverflowingBinaryOperator;
2338
2339 bool CanAnalyze =
2340 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2341 (void)CanAnalyze;
2342 assert(CanAnalyze && "don't call from other places!");
2343
2344 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2345 SCEV::NoWrapFlags SignOrUnsignWrap =
2346 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2347
2348 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2349 auto IsKnownNonNegative = [&](const SCEV *S) {
2350 return SE->isKnownNonNegative(S);
2351 };
2352
2353 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2354 Flags =
2355 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2356
2357 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2358
2359 if (SignOrUnsignWrap != SignOrUnsignMask &&
2360 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2361 isa<SCEVConstant>(Ops[0])) {
2362
2363 auto Opcode = [&] {
2364 switch (Type) {
2365 case scAddExpr:
2366 return Instruction::Add;
2367 case scMulExpr:
2368 return Instruction::Mul;
2369 default:
2370 llvm_unreachable("Unexpected SCEV op.");
2371 }
2372 }();
2373
2374 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2375
2376 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2377 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2378 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2379 Opcode, C, OBO::NoSignedWrap);
2380 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2381 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2382 }
2383
2384 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2385 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2386 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2387 Opcode, C, OBO::NoUnsignedWrap);
2388 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2389 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2390 }
2391 }
2392
2393 // <0,+,nonnegative><nw> is also nuw
2394 // TODO: Add corresponding nsw case
2395 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) &&
2396 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 &&
2397 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2398 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2399
2400 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW
2401 if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) &&
2402 Ops.size() == 2) {
2403 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2404 if (UDiv->getOperand(1) == Ops[1])
2405 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2406 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2407 if (UDiv->getOperand(1) == Ops[0])
2408 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2409 }
2410
2411 return Flags;
2412 }
2413
isAvailableAtLoopEntry(const SCEV * S,const Loop * L)2414 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
2415 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2416 }
2417
2418 /// Get a canonical add expression, or something simpler if possible.
getAddExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OrigFlags,unsigned Depth)2419 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2420 SCEV::NoWrapFlags OrigFlags,
2421 unsigned Depth) {
2422 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2423 "only nuw or nsw allowed");
2424 assert(!Ops.empty() && "Cannot get empty add!");
2425 if (Ops.size() == 1) return Ops[0];
2426 #ifndef NDEBUG
2427 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2428 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2429 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2430 "SCEVAddExpr operand types don't match!");
2431 unsigned NumPtrs = count_if(
2432 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
2433 assert(NumPtrs <= 1 && "add has at most one pointer operand");
2434 #endif
2435
2436 // Sort by complexity, this groups all similar expression types together.
2437 GroupByComplexity(Ops, &LI, DT);
2438
2439 // If there are any constants, fold them together.
2440 unsigned Idx = 0;
2441 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2442 ++Idx;
2443 assert(Idx < Ops.size());
2444 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2445 // We found two constants, fold them together!
2446 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2447 if (Ops.size() == 2) return Ops[0];
2448 Ops.erase(Ops.begin()+1); // Erase the folded element
2449 LHSC = cast<SCEVConstant>(Ops[0]);
2450 }
2451
2452 // If we are left with a constant zero being added, strip it off.
2453 if (LHSC->getValue()->isZero()) {
2454 Ops.erase(Ops.begin());
2455 --Idx;
2456 }
2457
2458 if (Ops.size() == 1) return Ops[0];
2459 }
2460
2461 // Delay expensive flag strengthening until necessary.
2462 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2463 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2464 };
2465
2466 // Limit recursion calls depth.
2467 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2468 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2469
2470 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) {
2471 // Don't strengthen flags if we have no new information.
2472 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2473 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2474 Add->setNoWrapFlags(ComputeFlags(Ops));
2475 return S;
2476 }
2477
2478 // Okay, check to see if the same value occurs in the operand list more than
2479 // once. If so, merge them together into an multiply expression. Since we
2480 // sorted the list, these values are required to be adjacent.
2481 Type *Ty = Ops[0]->getType();
2482 bool FoundMatch = false;
2483 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2484 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2485 // Scan ahead to count how many equal operands there are.
2486 unsigned Count = 2;
2487 while (i+Count != e && Ops[i+Count] == Ops[i])
2488 ++Count;
2489 // Merge the values into a multiply.
2490 const SCEV *Scale = getConstant(Ty, Count);
2491 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2492 if (Ops.size() == Count)
2493 return Mul;
2494 Ops[i] = Mul;
2495 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2496 --i; e -= Count - 1;
2497 FoundMatch = true;
2498 }
2499 if (FoundMatch)
2500 return getAddExpr(Ops, OrigFlags, Depth + 1);
2501
2502 // Check for truncates. If all the operands are truncated from the same
2503 // type, see if factoring out the truncate would permit the result to be
2504 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2505 // if the contents of the resulting outer trunc fold to something simple.
2506 auto FindTruncSrcType = [&]() -> Type * {
2507 // We're ultimately looking to fold an addrec of truncs and muls of only
2508 // constants and truncs, so if we find any other types of SCEV
2509 // as operands of the addrec then we bail and return nullptr here.
2510 // Otherwise, we return the type of the operand of a trunc that we find.
2511 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2512 return T->getOperand()->getType();
2513 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2514 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2515 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2516 return T->getOperand()->getType();
2517 }
2518 return nullptr;
2519 };
2520 if (auto *SrcType = FindTruncSrcType()) {
2521 SmallVector<const SCEV *, 8> LargeOps;
2522 bool Ok = true;
2523 // Check all the operands to see if they can be represented in the
2524 // source type of the truncate.
2525 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2526 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2527 if (T->getOperand()->getType() != SrcType) {
2528 Ok = false;
2529 break;
2530 }
2531 LargeOps.push_back(T->getOperand());
2532 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2533 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2534 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2535 SmallVector<const SCEV *, 8> LargeMulOps;
2536 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2537 if (const SCEVTruncateExpr *T =
2538 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2539 if (T->getOperand()->getType() != SrcType) {
2540 Ok = false;
2541 break;
2542 }
2543 LargeMulOps.push_back(T->getOperand());
2544 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2545 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2546 } else {
2547 Ok = false;
2548 break;
2549 }
2550 }
2551 if (Ok)
2552 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2553 } else {
2554 Ok = false;
2555 break;
2556 }
2557 }
2558 if (Ok) {
2559 // Evaluate the expression in the larger type.
2560 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2561 // If it folds to something simple, use it. Otherwise, don't.
2562 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2563 return getTruncateExpr(Fold, Ty);
2564 }
2565 }
2566
2567 if (Ops.size() == 2) {
2568 // Check if we have an expression of the form ((X + C1) - C2), where C1 and
2569 // C2 can be folded in a way that allows retaining wrapping flags of (X +
2570 // C1).
2571 const SCEV *A = Ops[0];
2572 const SCEV *B = Ops[1];
2573 auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
2574 auto *C = dyn_cast<SCEVConstant>(A);
2575 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2576 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2577 auto C2 = C->getAPInt();
2578 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap;
2579
2580 APInt ConstAdd = C1 + C2;
2581 auto AddFlags = AddExpr->getNoWrapFlags();
2582 // Adding a smaller constant is NUW if the original AddExpr was NUW.
2583 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) &&
2584 ConstAdd.ule(C1)) {
2585 PreservedFlags =
2586 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW);
2587 }
2588
2589 // Adding a constant with the same sign and small magnitude is NSW, if the
2590 // original AddExpr was NSW.
2591 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) &&
2592 C1.isSignBitSet() == ConstAdd.isSignBitSet() &&
2593 ConstAdd.abs().ule(C1.abs())) {
2594 PreservedFlags =
2595 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW);
2596 }
2597
2598 if (PreservedFlags != SCEV::FlagAnyWrap) {
2599 SmallVector<const SCEV *, 4> NewOps(AddExpr->operands());
2600 NewOps[0] = getConstant(ConstAdd);
2601 return getAddExpr(NewOps, PreservedFlags);
2602 }
2603 }
2604 }
2605
2606 // Skip past any other cast SCEVs.
2607 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2608 ++Idx;
2609
2610 // If there are add operands they would be next.
2611 if (Idx < Ops.size()) {
2612 bool DeletedAdd = false;
2613 // If the original flags and all inlined SCEVAddExprs are NUW, use the
2614 // common NUW flag for expression after inlining. Other flags cannot be
2615 // preserved, because they may depend on the original order of operations.
2616 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW);
2617 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2618 if (Ops.size() > AddOpsInlineThreshold ||
2619 Add->getNumOperands() > AddOpsInlineThreshold)
2620 break;
2621 // If we have an add, expand the add operands onto the end of the operands
2622 // list.
2623 Ops.erase(Ops.begin()+Idx);
2624 Ops.append(Add->op_begin(), Add->op_end());
2625 DeletedAdd = true;
2626 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags());
2627 }
2628
2629 // If we deleted at least one add, we added operands to the end of the list,
2630 // and they are not necessarily sorted. Recurse to resort and resimplify
2631 // any operands we just acquired.
2632 if (DeletedAdd)
2633 return getAddExpr(Ops, CommonFlags, Depth + 1);
2634 }
2635
2636 // Skip over the add expression until we get to a multiply.
2637 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2638 ++Idx;
2639
2640 // Check to see if there are any folding opportunities present with
2641 // operands multiplied by constant values.
2642 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2643 uint64_t BitWidth = getTypeSizeInBits(Ty);
2644 DenseMap<const SCEV *, APInt> M;
2645 SmallVector<const SCEV *, 8> NewOps;
2646 APInt AccumulatedConstant(BitWidth, 0);
2647 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2648 Ops.data(), Ops.size(),
2649 APInt(BitWidth, 1), *this)) {
2650 struct APIntCompare {
2651 bool operator()(const APInt &LHS, const APInt &RHS) const {
2652 return LHS.ult(RHS);
2653 }
2654 };
2655
2656 // Some interesting folding opportunity is present, so its worthwhile to
2657 // re-generate the operands list. Group the operands by constant scale,
2658 // to avoid multiplying by the same constant scale multiple times.
2659 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2660 for (const SCEV *NewOp : NewOps)
2661 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2662 // Re-generate the operands list.
2663 Ops.clear();
2664 if (AccumulatedConstant != 0)
2665 Ops.push_back(getConstant(AccumulatedConstant));
2666 for (auto &MulOp : MulOpLists) {
2667 if (MulOp.first == 1) {
2668 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
2669 } else if (MulOp.first != 0) {
2670 Ops.push_back(getMulExpr(
2671 getConstant(MulOp.first),
2672 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2673 SCEV::FlagAnyWrap, Depth + 1));
2674 }
2675 }
2676 if (Ops.empty())
2677 return getZero(Ty);
2678 if (Ops.size() == 1)
2679 return Ops[0];
2680 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2681 }
2682 }
2683
2684 // If we are adding something to a multiply expression, make sure the
2685 // something is not already an operand of the multiply. If so, merge it into
2686 // the multiply.
2687 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2688 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2689 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2690 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2691 if (isa<SCEVConstant>(MulOpSCEV))
2692 continue;
2693 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2694 if (MulOpSCEV == Ops[AddOp]) {
2695 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2696 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2697 if (Mul->getNumOperands() != 2) {
2698 // If the multiply has more than two operands, we must get the
2699 // Y*Z term.
2700 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2701 Mul->op_begin()+MulOp);
2702 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2703 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2704 }
2705 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2706 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2707 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2708 SCEV::FlagAnyWrap, Depth + 1);
2709 if (Ops.size() == 2) return OuterMul;
2710 if (AddOp < Idx) {
2711 Ops.erase(Ops.begin()+AddOp);
2712 Ops.erase(Ops.begin()+Idx-1);
2713 } else {
2714 Ops.erase(Ops.begin()+Idx);
2715 Ops.erase(Ops.begin()+AddOp-1);
2716 }
2717 Ops.push_back(OuterMul);
2718 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2719 }
2720
2721 // Check this multiply against other multiplies being added together.
2722 for (unsigned OtherMulIdx = Idx+1;
2723 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2724 ++OtherMulIdx) {
2725 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2726 // If MulOp occurs in OtherMul, we can fold the two multiplies
2727 // together.
2728 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2729 OMulOp != e; ++OMulOp)
2730 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2731 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2732 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2733 if (Mul->getNumOperands() != 2) {
2734 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2735 Mul->op_begin()+MulOp);
2736 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2737 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2738 }
2739 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2740 if (OtherMul->getNumOperands() != 2) {
2741 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2742 OtherMul->op_begin()+OMulOp);
2743 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2744 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2745 }
2746 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2747 const SCEV *InnerMulSum =
2748 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2749 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2750 SCEV::FlagAnyWrap, Depth + 1);
2751 if (Ops.size() == 2) return OuterMul;
2752 Ops.erase(Ops.begin()+Idx);
2753 Ops.erase(Ops.begin()+OtherMulIdx-1);
2754 Ops.push_back(OuterMul);
2755 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2756 }
2757 }
2758 }
2759 }
2760
2761 // If there are any add recurrences in the operands list, see if any other
2762 // added values are loop invariant. If so, we can fold them into the
2763 // recurrence.
2764 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2765 ++Idx;
2766
2767 // Scan over all recurrences, trying to fold loop invariants into them.
2768 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2769 // Scan all of the other operands to this add and add them to the vector if
2770 // they are loop invariant w.r.t. the recurrence.
2771 SmallVector<const SCEV *, 8> LIOps;
2772 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2773 const Loop *AddRecLoop = AddRec->getLoop();
2774 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2775 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2776 LIOps.push_back(Ops[i]);
2777 Ops.erase(Ops.begin()+i);
2778 --i; --e;
2779 }
2780
2781 // If we found some loop invariants, fold them into the recurrence.
2782 if (!LIOps.empty()) {
2783 // Compute nowrap flags for the addition of the loop-invariant ops and
2784 // the addrec. Temporarily push it as an operand for that purpose. These
2785 // flags are valid in the scope of the addrec only.
2786 LIOps.push_back(AddRec);
2787 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2788 LIOps.pop_back();
2789
2790 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2791 LIOps.push_back(AddRec->getStart());
2792
2793 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2794
2795 // It is not in general safe to propagate flags valid on an add within
2796 // the addrec scope to one outside it. We must prove that the inner
2797 // scope is guaranteed to execute if the outer one does to be able to
2798 // safely propagate. We know the program is undefined if poison is
2799 // produced on the inner scoped addrec. We also know that *for this use*
2800 // the outer scoped add can't overflow (because of the flags we just
2801 // computed for the inner scoped add) without the program being undefined.
2802 // Proving that entry to the outer scope neccesitates entry to the inner
2803 // scope, thus proves the program undefined if the flags would be violated
2804 // in the outer scope.
2805 SCEV::NoWrapFlags AddFlags = Flags;
2806 if (AddFlags != SCEV::FlagAnyWrap) {
2807 auto *DefI = getDefiningScopeBound(LIOps);
2808 auto *ReachI = &*AddRecLoop->getHeader()->begin();
2809 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2810 AddFlags = SCEV::FlagAnyWrap;
2811 }
2812 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1);
2813
2814 // Build the new addrec. Propagate the NUW and NSW flags if both the
2815 // outer add and the inner addrec are guaranteed to have no overflow.
2816 // Always propagate NW.
2817 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2818 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2819
2820 // If all of the other operands were loop invariant, we are done.
2821 if (Ops.size() == 1) return NewRec;
2822
2823 // Otherwise, add the folded AddRec by the non-invariant parts.
2824 for (unsigned i = 0;; ++i)
2825 if (Ops[i] == AddRec) {
2826 Ops[i] = NewRec;
2827 break;
2828 }
2829 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2830 }
2831
2832 // Okay, if there weren't any loop invariants to be folded, check to see if
2833 // there are multiple AddRec's with the same loop induction variable being
2834 // added together. If so, we can fold them.
2835 for (unsigned OtherIdx = Idx+1;
2836 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2837 ++OtherIdx) {
2838 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2839 // so that the 1st found AddRecExpr is dominated by all others.
2840 assert(DT.dominates(
2841 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2842 AddRec->getLoop()->getHeader()) &&
2843 "AddRecExprs are not sorted in reverse dominance order?");
2844 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2845 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2846 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2847 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2848 ++OtherIdx) {
2849 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2850 if (OtherAddRec->getLoop() == AddRecLoop) {
2851 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2852 i != e; ++i) {
2853 if (i >= AddRecOps.size()) {
2854 AddRecOps.append(OtherAddRec->op_begin()+i,
2855 OtherAddRec->op_end());
2856 break;
2857 }
2858 SmallVector<const SCEV *, 2> TwoOps = {
2859 AddRecOps[i], OtherAddRec->getOperand(i)};
2860 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2861 }
2862 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2863 }
2864 }
2865 // Step size has changed, so we cannot guarantee no self-wraparound.
2866 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2867 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2868 }
2869 }
2870
2871 // Otherwise couldn't fold anything into this recurrence. Move onto the
2872 // next one.
2873 }
2874
2875 // Okay, it looks like we really DO need an add expr. Check to see if we
2876 // already have one, otherwise create a new one.
2877 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2878 }
2879
2880 const SCEV *
getOrCreateAddExpr(ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2881 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2882 SCEV::NoWrapFlags Flags) {
2883 FoldingSetNodeID ID;
2884 ID.AddInteger(scAddExpr);
2885 for (const SCEV *Op : Ops)
2886 ID.AddPointer(Op);
2887 void *IP = nullptr;
2888 SCEVAddExpr *S =
2889 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2890 if (!S) {
2891 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2892 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2893 S = new (SCEVAllocator)
2894 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2895 UniqueSCEVs.InsertNode(S, IP);
2896 addToLoopUseLists(S);
2897 }
2898 S->setNoWrapFlags(Flags);
2899 return S;
2900 }
2901
2902 const SCEV *
getOrCreateAddRecExpr(ArrayRef<const SCEV * > Ops,const Loop * L,SCEV::NoWrapFlags Flags)2903 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2904 const Loop *L, SCEV::NoWrapFlags Flags) {
2905 FoldingSetNodeID ID;
2906 ID.AddInteger(scAddRecExpr);
2907 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2908 ID.AddPointer(Ops[i]);
2909 ID.AddPointer(L);
2910 void *IP = nullptr;
2911 SCEVAddRecExpr *S =
2912 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2913 if (!S) {
2914 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2915 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2916 S = new (SCEVAllocator)
2917 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2918 UniqueSCEVs.InsertNode(S, IP);
2919 addToLoopUseLists(S);
2920 }
2921 setNoWrapFlags(S, Flags);
2922 return S;
2923 }
2924
2925 const SCEV *
getOrCreateMulExpr(ArrayRef<const SCEV * > Ops,SCEV::NoWrapFlags Flags)2926 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2927 SCEV::NoWrapFlags Flags) {
2928 FoldingSetNodeID ID;
2929 ID.AddInteger(scMulExpr);
2930 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2931 ID.AddPointer(Ops[i]);
2932 void *IP = nullptr;
2933 SCEVMulExpr *S =
2934 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2935 if (!S) {
2936 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2937 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2938 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2939 O, Ops.size());
2940 UniqueSCEVs.InsertNode(S, IP);
2941 addToLoopUseLists(S);
2942 }
2943 S->setNoWrapFlags(Flags);
2944 return S;
2945 }
2946
umul_ov(uint64_t i,uint64_t j,bool & Overflow)2947 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2948 uint64_t k = i*j;
2949 if (j > 1 && k / j != i) Overflow = true;
2950 return k;
2951 }
2952
2953 /// Compute the result of "n choose k", the binomial coefficient. If an
2954 /// intermediate computation overflows, Overflow will be set and the return will
2955 /// be garbage. Overflow is not cleared on absence of overflow.
Choose(uint64_t n,uint64_t k,bool & Overflow)2956 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2957 // We use the multiplicative formula:
2958 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2959 // At each iteration, we take the n-th term of the numeral and divide by the
2960 // (k-n)th term of the denominator. This division will always produce an
2961 // integral result, and helps reduce the chance of overflow in the
2962 // intermediate computations. However, we can still overflow even when the
2963 // final result would fit.
2964
2965 if (n == 0 || n == k) return 1;
2966 if (k > n) return 0;
2967
2968 if (k > n/2)
2969 k = n-k;
2970
2971 uint64_t r = 1;
2972 for (uint64_t i = 1; i <= k; ++i) {
2973 r = umul_ov(r, n-(i-1), Overflow);
2974 r /= i;
2975 }
2976 return r;
2977 }
2978
2979 /// Determine if any of the operands in this SCEV are a constant or if
2980 /// any of the add or multiply expressions in this SCEV contain a constant.
containsConstantInAddMulChain(const SCEV * StartExpr)2981 static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
2982 struct FindConstantInAddMulChain {
2983 bool FoundConstant = false;
2984
2985 bool follow(const SCEV *S) {
2986 FoundConstant |= isa<SCEVConstant>(S);
2987 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2988 }
2989
2990 bool isDone() const {
2991 return FoundConstant;
2992 }
2993 };
2994
2995 FindConstantInAddMulChain F;
2996 SCEVTraversal<FindConstantInAddMulChain> ST(F);
2997 ST.visitAll(StartExpr);
2998 return F.FoundConstant;
2999 }
3000
3001 /// Get a canonical multiply expression, or something simpler if possible.
getMulExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags OrigFlags,unsigned Depth)3002 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
3003 SCEV::NoWrapFlags OrigFlags,
3004 unsigned Depth) {
3005 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
3006 "only nuw or nsw allowed");
3007 assert(!Ops.empty() && "Cannot get empty mul!");
3008 if (Ops.size() == 1) return Ops[0];
3009 #ifndef NDEBUG
3010 Type *ETy = Ops[0]->getType();
3011 assert(!ETy->isPointerTy());
3012 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3013 assert(Ops[i]->getType() == ETy &&
3014 "SCEVMulExpr operand types don't match!");
3015 #endif
3016
3017 // Sort by complexity, this groups all similar expression types together.
3018 GroupByComplexity(Ops, &LI, DT);
3019
3020 // If there are any constants, fold them together.
3021 unsigned Idx = 0;
3022 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3023 ++Idx;
3024 assert(Idx < Ops.size());
3025 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3026 // We found two constants, fold them together!
3027 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3028 if (Ops.size() == 2) return Ops[0];
3029 Ops.erase(Ops.begin()+1); // Erase the folded element
3030 LHSC = cast<SCEVConstant>(Ops[0]);
3031 }
3032
3033 // If we have a multiply of zero, it will always be zero.
3034 if (LHSC->getValue()->isZero())
3035 return LHSC;
3036
3037 // If we are left with a constant one being multiplied, strip it off.
3038 if (LHSC->getValue()->isOne()) {
3039 Ops.erase(Ops.begin());
3040 --Idx;
3041 }
3042
3043 if (Ops.size() == 1)
3044 return Ops[0];
3045 }
3046
3047 // Delay expensive flag strengthening until necessary.
3048 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
3049 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
3050 };
3051
3052 // Limit recursion calls depth.
3053 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
3054 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3055
3056 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) {
3057 // Don't strengthen flags if we have no new information.
3058 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
3059 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3060 Mul->setNoWrapFlags(ComputeFlags(Ops));
3061 return S;
3062 }
3063
3064 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3065 if (Ops.size() == 2) {
3066 // C1*(C2+V) -> C1*C2 + C1*V
3067 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
3068 // If any of Add's ops are Adds or Muls with a constant, apply this
3069 // transformation as well.
3070 //
3071 // TODO: There are some cases where this transformation is not
3072 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
3073 // this transformation should be narrowed down.
3074 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
3075 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
3076 SCEV::FlagAnyWrap, Depth + 1),
3077 getMulExpr(LHSC, Add->getOperand(1),
3078 SCEV::FlagAnyWrap, Depth + 1),
3079 SCEV::FlagAnyWrap, Depth + 1);
3080
3081 if (Ops[0]->isAllOnesValue()) {
3082 // If we have a mul by -1 of an add, try distributing the -1 among the
3083 // add operands.
3084 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3085 SmallVector<const SCEV *, 4> NewOps;
3086 bool AnyFolded = false;
3087 for (const SCEV *AddOp : Add->operands()) {
3088 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
3089 Depth + 1);
3090 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
3091 NewOps.push_back(Mul);
3092 }
3093 if (AnyFolded)
3094 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
3095 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3096 // Negation preserves a recurrence's no self-wrap property.
3097 SmallVector<const SCEV *, 4> Operands;
3098 for (const SCEV *AddRecOp : AddRec->operands())
3099 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
3100 Depth + 1));
3101
3102 return getAddRecExpr(Operands, AddRec->getLoop(),
3103 AddRec->getNoWrapFlags(SCEV::FlagNW));
3104 }
3105 }
3106 }
3107 }
3108
3109 // Skip over the add expression until we get to a multiply.
3110 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3111 ++Idx;
3112
3113 // If there are mul operands inline them all into this expression.
3114 if (Idx < Ops.size()) {
3115 bool DeletedMul = false;
3116 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3117 if (Ops.size() > MulOpsInlineThreshold)
3118 break;
3119 // If we have an mul, expand the mul operands onto the end of the
3120 // operands list.
3121 Ops.erase(Ops.begin()+Idx);
3122 Ops.append(Mul->op_begin(), Mul->op_end());
3123 DeletedMul = true;
3124 }
3125
3126 // If we deleted at least one mul, we added operands to the end of the
3127 // list, and they are not necessarily sorted. Recurse to resort and
3128 // resimplify any operands we just acquired.
3129 if (DeletedMul)
3130 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3131 }
3132
3133 // If there are any add recurrences in the operands list, see if any other
3134 // added values are loop invariant. If so, we can fold them into the
3135 // recurrence.
3136 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3137 ++Idx;
3138
3139 // Scan over all recurrences, trying to fold loop invariants into them.
3140 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3141 // Scan all of the other operands to this mul and add them to the vector
3142 // if they are loop invariant w.r.t. the recurrence.
3143 SmallVector<const SCEV *, 8> LIOps;
3144 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3145 const Loop *AddRecLoop = AddRec->getLoop();
3146 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3147 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
3148 LIOps.push_back(Ops[i]);
3149 Ops.erase(Ops.begin()+i);
3150 --i; --e;
3151 }
3152
3153 // If we found some loop invariants, fold them into the recurrence.
3154 if (!LIOps.empty()) {
3155 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3156 SmallVector<const SCEV *, 4> NewOps;
3157 NewOps.reserve(AddRec->getNumOperands());
3158 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
3159 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
3160 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
3161 SCEV::FlagAnyWrap, Depth + 1));
3162
3163 // Build the new addrec. Propagate the NUW and NSW flags if both the
3164 // outer mul and the inner addrec are guaranteed to have no overflow.
3165 //
3166 // No self-wrap cannot be guaranteed after changing the step size, but
3167 // will be inferred if either NUW or NSW is true.
3168 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec});
3169 const SCEV *NewRec = getAddRecExpr(
3170 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags));
3171
3172 // If all of the other operands were loop invariant, we are done.
3173 if (Ops.size() == 1) return NewRec;
3174
3175 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3176 for (unsigned i = 0;; ++i)
3177 if (Ops[i] == AddRec) {
3178 Ops[i] = NewRec;
3179 break;
3180 }
3181 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3182 }
3183
3184 // Okay, if there weren't any loop invariants to be folded, check to see
3185 // if there are multiple AddRec's with the same loop induction variable
3186 // being multiplied together. If so, we can fold them.
3187
3188 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3189 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3190 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3191 // ]]],+,...up to x=2n}.
3192 // Note that the arguments to choose() are always integers with values
3193 // known at compile time, never SCEV objects.
3194 //
3195 // The implementation avoids pointless extra computations when the two
3196 // addrec's are of different length (mathematically, it's equivalent to
3197 // an infinite stream of zeros on the right).
3198 bool OpsModified = false;
3199 for (unsigned OtherIdx = Idx+1;
3200 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3201 ++OtherIdx) {
3202 const SCEVAddRecExpr *OtherAddRec =
3203 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3204 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3205 continue;
3206
3207 // Limit max number of arguments to avoid creation of unreasonably big
3208 // SCEVAddRecs with very complex operands.
3209 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3210 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3211 continue;
3212
3213 bool Overflow = false;
3214 Type *Ty = AddRec->getType();
3215 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3216 SmallVector<const SCEV*, 7> AddRecOps;
3217 for (int x = 0, xe = AddRec->getNumOperands() +
3218 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3219 SmallVector <const SCEV *, 7> SumOps;
3220 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3221 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3222 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3223 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3224 z < ze && !Overflow; ++z) {
3225 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3226 uint64_t Coeff;
3227 if (LargerThan64Bits)
3228 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3229 else
3230 Coeff = Coeff1*Coeff2;
3231 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3232 const SCEV *Term1 = AddRec->getOperand(y-z);
3233 const SCEV *Term2 = OtherAddRec->getOperand(z);
3234 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3235 SCEV::FlagAnyWrap, Depth + 1));
3236 }
3237 }
3238 if (SumOps.empty())
3239 SumOps.push_back(getZero(Ty));
3240 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3241 }
3242 if (!Overflow) {
3243 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3244 SCEV::FlagAnyWrap);
3245 if (Ops.size() == 2) return NewAddRec;
3246 Ops[Idx] = NewAddRec;
3247 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3248 OpsModified = true;
3249 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3250 if (!AddRec)
3251 break;
3252 }
3253 }
3254 if (OpsModified)
3255 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3256
3257 // Otherwise couldn't fold anything into this recurrence. Move onto the
3258 // next one.
3259 }
3260
3261 // Okay, it looks like we really DO need an mul expr. Check to see if we
3262 // already have one, otherwise create a new one.
3263 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3264 }
3265
3266 /// Represents an unsigned remainder expression based on unsigned division.
getURemExpr(const SCEV * LHS,const SCEV * RHS)3267 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
3268 const SCEV *RHS) {
3269 assert(getEffectiveSCEVType(LHS->getType()) ==
3270 getEffectiveSCEVType(RHS->getType()) &&
3271 "SCEVURemExpr operand types don't match!");
3272
3273 // Short-circuit easy cases
3274 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3275 // If constant is one, the result is trivial
3276 if (RHSC->getValue()->isOne())
3277 return getZero(LHS->getType()); // X urem 1 --> 0
3278
3279 // If constant is a power of two, fold into a zext(trunc(LHS)).
3280 if (RHSC->getAPInt().isPowerOf2()) {
3281 Type *FullTy = LHS->getType();
3282 Type *TruncTy =
3283 IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3284 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3285 }
3286 }
3287
3288 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3289 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3290 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3291 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3292 }
3293
3294 /// Get a canonical unsigned division expression, or something simpler if
3295 /// possible.
getUDivExpr(const SCEV * LHS,const SCEV * RHS)3296 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
3297 const SCEV *RHS) {
3298 assert(!LHS->getType()->isPointerTy() &&
3299 "SCEVUDivExpr operand can't be pointer!");
3300 assert(LHS->getType() == RHS->getType() &&
3301 "SCEVUDivExpr operand types don't match!");
3302
3303 FoldingSetNodeID ID;
3304 ID.AddInteger(scUDivExpr);
3305 ID.AddPointer(LHS);
3306 ID.AddPointer(RHS);
3307 void *IP = nullptr;
3308 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3309 return S;
3310
3311 // 0 udiv Y == 0
3312 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS))
3313 if (LHSC->getValue()->isZero())
3314 return LHS;
3315
3316 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3317 if (RHSC->getValue()->isOne())
3318 return LHS; // X udiv 1 --> x
3319 // If the denominator is zero, the result of the udiv is undefined. Don't
3320 // try to analyze it, because the resolution chosen here may differ from
3321 // the resolution chosen in other parts of the compiler.
3322 if (!RHSC->getValue()->isZero()) {
3323 // Determine if the division can be folded into the operands of
3324 // its operands.
3325 // TODO: Generalize this to non-constants by using known-bits information.
3326 Type *Ty = LHS->getType();
3327 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3328 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3329 // For non-power-of-two values, effectively round the value up to the
3330 // nearest power of two.
3331 if (!RHSC->getAPInt().isPowerOf2())
3332 ++MaxShiftAmt;
3333 IntegerType *ExtTy =
3334 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3335 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3336 if (const SCEVConstant *Step =
3337 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3338 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3339 const APInt &StepInt = Step->getAPInt();
3340 const APInt &DivInt = RHSC->getAPInt();
3341 if (!StepInt.urem(DivInt) &&
3342 getZeroExtendExpr(AR, ExtTy) ==
3343 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3344 getZeroExtendExpr(Step, ExtTy),
3345 AR->getLoop(), SCEV::FlagAnyWrap)) {
3346 SmallVector<const SCEV *, 4> Operands;
3347 for (const SCEV *Op : AR->operands())
3348 Operands.push_back(getUDivExpr(Op, RHS));
3349 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3350 }
3351 /// Get a canonical UDivExpr for a recurrence.
3352 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3353 // We can currently only fold X%N if X is constant.
3354 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3355 if (StartC && !DivInt.urem(StepInt) &&
3356 getZeroExtendExpr(AR, ExtTy) ==
3357 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3358 getZeroExtendExpr(Step, ExtTy),
3359 AR->getLoop(), SCEV::FlagAnyWrap)) {
3360 const APInt &StartInt = StartC->getAPInt();
3361 const APInt &StartRem = StartInt.urem(StepInt);
3362 if (StartRem != 0) {
3363 const SCEV *NewLHS =
3364 getAddRecExpr(getConstant(StartInt - StartRem), Step,
3365 AR->getLoop(), SCEV::FlagNW);
3366 if (LHS != NewLHS) {
3367 LHS = NewLHS;
3368
3369 // Reset the ID to include the new LHS, and check if it is
3370 // already cached.
3371 ID.clear();
3372 ID.AddInteger(scUDivExpr);
3373 ID.AddPointer(LHS);
3374 ID.AddPointer(RHS);
3375 IP = nullptr;
3376 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3377 return S;
3378 }
3379 }
3380 }
3381 }
3382 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3383 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3384 SmallVector<const SCEV *, 4> Operands;
3385 for (const SCEV *Op : M->operands())
3386 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3387 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3388 // Find an operand that's safely divisible.
3389 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3390 const SCEV *Op = M->getOperand(i);
3391 const SCEV *Div = getUDivExpr(Op, RHSC);
3392 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3393 Operands = SmallVector<const SCEV *, 4>(M->operands());
3394 Operands[i] = Div;
3395 return getMulExpr(Operands);
3396 }
3397 }
3398 }
3399
3400 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3401 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3402 if (auto *DivisorConstant =
3403 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3404 bool Overflow = false;
3405 APInt NewRHS =
3406 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3407 if (Overflow) {
3408 return getConstant(RHSC->getType(), 0, false);
3409 }
3410 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3411 }
3412 }
3413
3414 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3415 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3416 SmallVector<const SCEV *, 4> Operands;
3417 for (const SCEV *Op : A->operands())
3418 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3419 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3420 Operands.clear();
3421 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3422 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3423 if (isa<SCEVUDivExpr>(Op) ||
3424 getMulExpr(Op, RHS) != A->getOperand(i))
3425 break;
3426 Operands.push_back(Op);
3427 }
3428 if (Operands.size() == A->getNumOperands())
3429 return getAddExpr(Operands);
3430 }
3431 }
3432
3433 // Fold if both operands are constant.
3434 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3435 Constant *LHSCV = LHSC->getValue();
3436 Constant *RHSCV = RHSC->getValue();
3437 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3438 RHSCV)));
3439 }
3440 }
3441 }
3442
3443 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3444 // changes). Make sure we get a new one.
3445 IP = nullptr;
3446 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3447 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3448 LHS, RHS);
3449 UniqueSCEVs.InsertNode(S, IP);
3450 addToLoopUseLists(S);
3451 return S;
3452 }
3453
gcd(const SCEVConstant * C1,const SCEVConstant * C2)3454 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3455 APInt A = C1->getAPInt().abs();
3456 APInt B = C2->getAPInt().abs();
3457 uint32_t ABW = A.getBitWidth();
3458 uint32_t BBW = B.getBitWidth();
3459
3460 if (ABW > BBW)
3461 B = B.zext(ABW);
3462 else if (ABW < BBW)
3463 A = A.zext(BBW);
3464
3465 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3466 }
3467
3468 /// Get a canonical unsigned division expression, or something simpler if
3469 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3470 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3471 /// it's not exact because the udiv may be clearing bits.
getUDivExactExpr(const SCEV * LHS,const SCEV * RHS)3472 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
3473 const SCEV *RHS) {
3474 // TODO: we could try to find factors in all sorts of things, but for now we
3475 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3476 // end of this file for inspiration.
3477
3478 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3479 if (!Mul || !Mul->hasNoUnsignedWrap())
3480 return getUDivExpr(LHS, RHS);
3481
3482 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3483 // If the mulexpr multiplies by a constant, then that constant must be the
3484 // first element of the mulexpr.
3485 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3486 if (LHSCst == RHSCst) {
3487 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
3488 return getMulExpr(Operands);
3489 }
3490
3491 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3492 // that there's a factor provided by one of the other terms. We need to
3493 // check.
3494 APInt Factor = gcd(LHSCst, RHSCst);
3495 if (!Factor.isIntN(1)) {
3496 LHSCst =
3497 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3498 RHSCst =
3499 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3500 SmallVector<const SCEV *, 2> Operands;
3501 Operands.push_back(LHSCst);
3502 Operands.append(Mul->op_begin() + 1, Mul->op_end());
3503 LHS = getMulExpr(Operands);
3504 RHS = RHSCst;
3505 Mul = dyn_cast<SCEVMulExpr>(LHS);
3506 if (!Mul)
3507 return getUDivExactExpr(LHS, RHS);
3508 }
3509 }
3510 }
3511
3512 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3513 if (Mul->getOperand(i) == RHS) {
3514 SmallVector<const SCEV *, 2> Operands;
3515 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3516 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3517 return getMulExpr(Operands);
3518 }
3519 }
3520
3521 return getUDivExpr(LHS, RHS);
3522 }
3523
3524 /// Get an add recurrence expression for the specified loop. Simplify the
3525 /// expression as much as possible.
getAddRecExpr(const SCEV * Start,const SCEV * Step,const Loop * L,SCEV::NoWrapFlags Flags)3526 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3527 const Loop *L,
3528 SCEV::NoWrapFlags Flags) {
3529 SmallVector<const SCEV *, 4> Operands;
3530 Operands.push_back(Start);
3531 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3532 if (StepChrec->getLoop() == L) {
3533 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3534 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3535 }
3536
3537 Operands.push_back(Step);
3538 return getAddRecExpr(Operands, L, Flags);
3539 }
3540
3541 /// Get an add recurrence expression for the specified loop. Simplify the
3542 /// expression as much as possible.
3543 const SCEV *
getAddRecExpr(SmallVectorImpl<const SCEV * > & Operands,const Loop * L,SCEV::NoWrapFlags Flags)3544 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
3545 const Loop *L, SCEV::NoWrapFlags Flags) {
3546 if (Operands.size() == 1) return Operands[0];
3547 #ifndef NDEBUG
3548 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3549 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3550 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
3551 "SCEVAddRecExpr operand types don't match!");
3552 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer");
3553 }
3554 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3555 assert(isLoopInvariant(Operands[i], L) &&
3556 "SCEVAddRecExpr operand is not loop-invariant!");
3557 #endif
3558
3559 if (Operands.back()->isZero()) {
3560 Operands.pop_back();
3561 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3562 }
3563
3564 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3565 // use that information to infer NUW and NSW flags. However, computing a
3566 // BE count requires calling getAddRecExpr, so we may not yet have a
3567 // meaningful BE count at this point (and if we don't, we'd be stuck
3568 // with a SCEVCouldNotCompute as the cached BE count).
3569
3570 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3571
3572 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3573 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3574 const Loop *NestedLoop = NestedAR->getLoop();
3575 if (L->contains(NestedLoop)
3576 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3577 : (!NestedLoop->contains(L) &&
3578 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3579 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
3580 Operands[0] = NestedAR->getStart();
3581 // AddRecs require their operands be loop-invariant with respect to their
3582 // loops. Don't perform this transformation if it would break this
3583 // requirement.
3584 bool AllInvariant = all_of(
3585 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3586
3587 if (AllInvariant) {
3588 // Create a recurrence for the outer loop with the same step size.
3589 //
3590 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3591 // inner recurrence has the same property.
3592 SCEV::NoWrapFlags OuterFlags =
3593 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3594
3595 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3596 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3597 return isLoopInvariant(Op, NestedLoop);
3598 });
3599
3600 if (AllInvariant) {
3601 // Ok, both add recurrences are valid after the transformation.
3602 //
3603 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3604 // the outer recurrence has the same property.
3605 SCEV::NoWrapFlags InnerFlags =
3606 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3607 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3608 }
3609 }
3610 // Reset Operands to its original state.
3611 Operands[0] = NestedAR;
3612 }
3613 }
3614
3615 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3616 // already have one, otherwise create a new one.
3617 return getOrCreateAddRecExpr(Operands, L, Flags);
3618 }
3619
3620 const SCEV *
getGEPExpr(GEPOperator * GEP,const SmallVectorImpl<const SCEV * > & IndexExprs)3621 ScalarEvolution::getGEPExpr(GEPOperator *GEP,
3622 const SmallVectorImpl<const SCEV *> &IndexExprs) {
3623 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3624 // getSCEV(Base)->getType() has the same address space as Base->getType()
3625 // because SCEV::getType() preserves the address space.
3626 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3627 const bool AssumeInBoundsFlags = [&]() {
3628 if (!GEP->isInBounds())
3629 return false;
3630
3631 // We'd like to propagate flags from the IR to the corresponding SCEV nodes,
3632 // but to do that, we have to ensure that said flag is valid in the entire
3633 // defined scope of the SCEV.
3634 auto *GEPI = dyn_cast<Instruction>(GEP);
3635 // TODO: non-instructions have global scope. We might be able to prove
3636 // some global scope cases
3637 return GEPI && isSCEVExprNeverPoison(GEPI);
3638 }();
3639
3640 SCEV::NoWrapFlags OffsetWrap =
3641 AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3642
3643 Type *CurTy = GEP->getType();
3644 bool FirstIter = true;
3645 SmallVector<const SCEV *, 4> Offsets;
3646 for (const SCEV *IndexExpr : IndexExprs) {
3647 // Compute the (potentially symbolic) offset in bytes for this index.
3648 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3649 // For a struct, add the member offset.
3650 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3651 unsigned FieldNo = Index->getZExtValue();
3652 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3653 Offsets.push_back(FieldOffset);
3654
3655 // Update CurTy to the type of the field at Index.
3656 CurTy = STy->getTypeAtIndex(Index);
3657 } else {
3658 // Update CurTy to its element type.
3659 if (FirstIter) {
3660 assert(isa<PointerType>(CurTy) &&
3661 "The first index of a GEP indexes a pointer");
3662 CurTy = GEP->getSourceElementType();
3663 FirstIter = false;
3664 } else {
3665 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
3666 }
3667 // For an array, add the element offset, explicitly scaled.
3668 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3669 // Getelementptr indices are signed.
3670 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3671
3672 // Multiply the index by the element size to compute the element offset.
3673 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3674 Offsets.push_back(LocalOffset);
3675 }
3676 }
3677
3678 // Handle degenerate case of GEP without offsets.
3679 if (Offsets.empty())
3680 return BaseExpr;
3681
3682 // Add the offsets together, assuming nsw if inbounds.
3683 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
3684 // Add the base address and the offset. We cannot use the nsw flag, as the
3685 // base address is unsigned. However, if we know that the offset is
3686 // non-negative, we can use nuw.
3687 SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset)
3688 ? SCEV::FlagNUW : SCEV::FlagAnyWrap;
3689 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
3690 assert(BaseExpr->getType() == GEPExpr->getType() &&
3691 "GEP should not change type mid-flight.");
3692 return GEPExpr;
3693 }
3694
findExistingSCEVInCache(SCEVTypes SCEVType,ArrayRef<const SCEV * > Ops)3695 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3696 ArrayRef<const SCEV *> Ops) {
3697 FoldingSetNodeID ID;
3698 ID.AddInteger(SCEVType);
3699 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3700 ID.AddPointer(Ops[i]);
3701 void *IP = nullptr;
3702 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
3703 }
3704
getAbsExpr(const SCEV * Op,bool IsNSW)3705 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3706 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3707 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3708 }
3709
getMinMaxExpr(SCEVTypes Kind,SmallVectorImpl<const SCEV * > & Ops)3710 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
3711 SmallVectorImpl<const SCEV *> &Ops) {
3712 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3713 if (Ops.size() == 1) return Ops[0];
3714 #ifndef NDEBUG
3715 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3716 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
3717 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3718 "Operand types don't match!");
3719 assert(Ops[0]->getType()->isPointerTy() ==
3720 Ops[i]->getType()->isPointerTy() &&
3721 "min/max should be consistently pointerish");
3722 }
3723 #endif
3724
3725 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3726 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3727
3728 // Sort by complexity, this groups all similar expression types together.
3729 GroupByComplexity(Ops, &LI, DT);
3730
3731 // Check if we have created the same expression before.
3732 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3733 return S;
3734 }
3735
3736 // If there are any constants, fold them together.
3737 unsigned Idx = 0;
3738 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3739 ++Idx;
3740 assert(Idx < Ops.size());
3741 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3742 if (Kind == scSMaxExpr)
3743 return APIntOps::smax(LHS, RHS);
3744 else if (Kind == scSMinExpr)
3745 return APIntOps::smin(LHS, RHS);
3746 else if (Kind == scUMaxExpr)
3747 return APIntOps::umax(LHS, RHS);
3748 else if (Kind == scUMinExpr)
3749 return APIntOps::umin(LHS, RHS);
3750 llvm_unreachable("Unknown SCEV min/max opcode");
3751 };
3752
3753 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3754 // We found two constants, fold them together!
3755 ConstantInt *Fold = ConstantInt::get(
3756 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3757 Ops[0] = getConstant(Fold);
3758 Ops.erase(Ops.begin()+1); // Erase the folded element
3759 if (Ops.size() == 1) return Ops[0];
3760 LHSC = cast<SCEVConstant>(Ops[0]);
3761 }
3762
3763 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3764 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3765
3766 if (IsMax ? IsMinV : IsMaxV) {
3767 // If we are left with a constant minimum(/maximum)-int, strip it off.
3768 Ops.erase(Ops.begin());
3769 --Idx;
3770 } else if (IsMax ? IsMaxV : IsMinV) {
3771 // If we have a max(/min) with a constant maximum(/minimum)-int,
3772 // it will always be the extremum.
3773 return LHSC;
3774 }
3775
3776 if (Ops.size() == 1) return Ops[0];
3777 }
3778
3779 // Find the first operation of the same kind
3780 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3781 ++Idx;
3782
3783 // Check to see if one of the operands is of the same kind. If so, expand its
3784 // operands onto our operand list, and recurse to simplify.
3785 if (Idx < Ops.size()) {
3786 bool DeletedAny = false;
3787 while (Ops[Idx]->getSCEVType() == Kind) {
3788 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3789 Ops.erase(Ops.begin()+Idx);
3790 Ops.append(SMME->op_begin(), SMME->op_end());
3791 DeletedAny = true;
3792 }
3793
3794 if (DeletedAny)
3795 return getMinMaxExpr(Kind, Ops);
3796 }
3797
3798 // Okay, check to see if the same value occurs in the operand list twice. If
3799 // so, delete one. Since we sorted the list, these values are required to
3800 // be adjacent.
3801 llvm::CmpInst::Predicate GEPred =
3802 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
3803 llvm::CmpInst::Predicate LEPred =
3804 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
3805 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3806 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3807 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3808 if (Ops[i] == Ops[i + 1] ||
3809 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3810 // X op Y op Y --> X op Y
3811 // X op Y --> X, if we know X, Y are ordered appropriately
3812 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3813 --i;
3814 --e;
3815 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3816 Ops[i + 1])) {
3817 // X op Y --> Y, if we know X, Y are ordered appropriately
3818 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3819 --i;
3820 --e;
3821 }
3822 }
3823
3824 if (Ops.size() == 1) return Ops[0];
3825
3826 assert(!Ops.empty() && "Reduced smax down to nothing!");
3827
3828 // Okay, it looks like we really DO need an expr. Check to see if we
3829 // already have one, otherwise create a new one.
3830 FoldingSetNodeID ID;
3831 ID.AddInteger(Kind);
3832 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3833 ID.AddPointer(Ops[i]);
3834 void *IP = nullptr;
3835 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP);
3836 if (ExistingSCEV)
3837 return ExistingSCEV;
3838 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3839 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3840 SCEV *S = new (SCEVAllocator)
3841 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3842
3843 UniqueSCEVs.InsertNode(S, IP);
3844 addToLoopUseLists(S);
3845 return S;
3846 }
3847
getSMaxExpr(const SCEV * LHS,const SCEV * RHS)3848 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3849 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3850 return getSMaxExpr(Ops);
3851 }
3852
getSMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3853 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3854 return getMinMaxExpr(scSMaxExpr, Ops);
3855 }
3856
getUMaxExpr(const SCEV * LHS,const SCEV * RHS)3857 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3858 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3859 return getUMaxExpr(Ops);
3860 }
3861
getUMaxExpr(SmallVectorImpl<const SCEV * > & Ops)3862 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3863 return getMinMaxExpr(scUMaxExpr, Ops);
3864 }
3865
getSMinExpr(const SCEV * LHS,const SCEV * RHS)3866 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3867 const SCEV *RHS) {
3868 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3869 return getSMinExpr(Ops);
3870 }
3871
getSMinExpr(SmallVectorImpl<const SCEV * > & Ops)3872 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3873 return getMinMaxExpr(scSMinExpr, Ops);
3874 }
3875
getUMinExpr(const SCEV * LHS,const SCEV * RHS)3876 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3877 const SCEV *RHS) {
3878 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3879 return getUMinExpr(Ops);
3880 }
3881
getUMinExpr(SmallVectorImpl<const SCEV * > & Ops)3882 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3883 return getMinMaxExpr(scUMinExpr, Ops);
3884 }
3885
3886 const SCEV *
getSizeOfScalableVectorExpr(Type * IntTy,ScalableVectorType * ScalableTy)3887 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy,
3888 ScalableVectorType *ScalableTy) {
3889 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo());
3890 Constant *One = ConstantInt::get(IntTy, 1);
3891 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One);
3892 // Note that the expression we created is the final expression, we don't
3893 // want to simplify it any further Also, if we call a normal getSCEV(),
3894 // we'll end up in an endless recursion. So just create an SCEVUnknown.
3895 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy));
3896 }
3897
getSizeOfExpr(Type * IntTy,Type * AllocTy)3898 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3899 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy))
3900 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy);
3901 // We can bypass creating a target-independent constant expression and then
3902 // folding it back into a ConstantInt. This is just a compile-time
3903 // optimization.
3904 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3905 }
3906
getStoreSizeOfExpr(Type * IntTy,Type * StoreTy)3907 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
3908 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy))
3909 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy);
3910 // We can bypass creating a target-independent constant expression and then
3911 // folding it back into a ConstantInt. This is just a compile-time
3912 // optimization.
3913 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
3914 }
3915
getOffsetOfExpr(Type * IntTy,StructType * STy,unsigned FieldNo)3916 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3917 StructType *STy,
3918 unsigned FieldNo) {
3919 // We can bypass creating a target-independent constant expression and then
3920 // folding it back into a ConstantInt. This is just a compile-time
3921 // optimization.
3922 return getConstant(
3923 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3924 }
3925
getUnknown(Value * V)3926 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3927 // Don't attempt to do anything other than create a SCEVUnknown object
3928 // here. createSCEV only calls getUnknown after checking for all other
3929 // interesting possibilities, and any other code that calls getUnknown
3930 // is doing so in order to hide a value from SCEV canonicalization.
3931
3932 FoldingSetNodeID ID;
3933 ID.AddInteger(scUnknown);
3934 ID.AddPointer(V);
3935 void *IP = nullptr;
3936 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3937 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3938 "Stale SCEVUnknown in uniquing map!");
3939 return S;
3940 }
3941 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3942 FirstUnknown);
3943 FirstUnknown = cast<SCEVUnknown>(S);
3944 UniqueSCEVs.InsertNode(S, IP);
3945 return S;
3946 }
3947
3948 //===----------------------------------------------------------------------===//
3949 // Basic SCEV Analysis and PHI Idiom Recognition Code
3950 //
3951
3952 /// Test if values of the given type are analyzable within the SCEV
3953 /// framework. This primarily includes integer types, and it can optionally
3954 /// include pointer types if the ScalarEvolution class has access to
3955 /// target-specific information.
isSCEVable(Type * Ty) const3956 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3957 // Integers and pointers are always SCEVable.
3958 return Ty->isIntOrPtrTy();
3959 }
3960
3961 /// Return the size in bits of the specified type, for which isSCEVable must
3962 /// return true.
getTypeSizeInBits(Type * Ty) const3963 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3964 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3965 if (Ty->isPointerTy())
3966 return getDataLayout().getIndexTypeSizeInBits(Ty);
3967 return getDataLayout().getTypeSizeInBits(Ty);
3968 }
3969
3970 /// Return a type with the same bitwidth as the given type and which represents
3971 /// how SCEV will treat the given type, for which isSCEVable must return
3972 /// true. For pointer types, this is the pointer index sized integer type.
getEffectiveSCEVType(Type * Ty) const3973 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3974 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3975
3976 if (Ty->isIntegerTy())
3977 return Ty;
3978
3979 // The only other support type is pointer.
3980 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3981 return getDataLayout().getIndexType(Ty);
3982 }
3983
getWiderType(Type * T1,Type * T2) const3984 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
3985 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
3986 }
3987
getCouldNotCompute()3988 const SCEV *ScalarEvolution::getCouldNotCompute() {
3989 return CouldNotCompute.get();
3990 }
3991
checkValidity(const SCEV * S) const3992 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3993 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
3994 auto *SU = dyn_cast<SCEVUnknown>(S);
3995 return SU && SU->getValue() == nullptr;
3996 });
3997
3998 return !ContainsNulls;
3999 }
4000
containsAddRecurrence(const SCEV * S)4001 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
4002 HasRecMapType::iterator I = HasRecMap.find(S);
4003 if (I != HasRecMap.end())
4004 return I->second;
4005
4006 bool FoundAddRec =
4007 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
4008 HasRecMap.insert({S, FoundAddRec});
4009 return FoundAddRec;
4010 }
4011
4012 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
4013 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
4014 /// offset I, then return {S', I}, else return {\p S, nullptr}.
splitAddExpr(const SCEV * S)4015 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
4016 const auto *Add = dyn_cast<SCEVAddExpr>(S);
4017 if (!Add)
4018 return {S, nullptr};
4019
4020 if (Add->getNumOperands() != 2)
4021 return {S, nullptr};
4022
4023 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
4024 if (!ConstOp)
4025 return {S, nullptr};
4026
4027 return {Add->getOperand(1), ConstOp->getValue()};
4028 }
4029
4030 /// Return the ValueOffsetPair set for \p S. \p S can be represented
4031 /// by the value and offset from any ValueOffsetPair in the set.
4032 ScalarEvolution::ValueOffsetPairSetVector *
getSCEVValues(const SCEV * S)4033 ScalarEvolution::getSCEVValues(const SCEV *S) {
4034 ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
4035 if (SI == ExprValueMap.end())
4036 return nullptr;
4037 #ifndef NDEBUG
4038 if (VerifySCEVMap) {
4039 // Check there is no dangling Value in the set returned.
4040 for (const auto &VE : SI->second)
4041 assert(ValueExprMap.count(VE.first));
4042 }
4043 #endif
4044 return &SI->second;
4045 }
4046
4047 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
4048 /// cannot be used separately. eraseValueFromMap should be used to remove
4049 /// V from ValueExprMap and ExprValueMap at the same time.
eraseValueFromMap(Value * V)4050 void ScalarEvolution::eraseValueFromMap(Value *V) {
4051 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4052 if (I != ValueExprMap.end()) {
4053 const SCEV *S = I->second;
4054 // Remove {V, 0} from the set of ExprValueMap[S]
4055 if (auto *SV = getSCEVValues(S))
4056 SV->remove({V, nullptr});
4057
4058 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
4059 const SCEV *Stripped;
4060 ConstantInt *Offset;
4061 std::tie(Stripped, Offset) = splitAddExpr(S);
4062 if (Offset != nullptr) {
4063 if (auto *SV = getSCEVValues(Stripped))
4064 SV->remove({V, Offset});
4065 }
4066 ValueExprMap.erase(V);
4067 }
4068 }
4069
4070 /// Check whether value has nuw/nsw/exact set but SCEV does not.
4071 /// TODO: In reality it is better to check the poison recursively
4072 /// but this is better than nothing.
SCEVLostPoisonFlags(const SCEV * S,const Value * V)4073 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
4074 if (auto *I = dyn_cast<Instruction>(V)) {
4075 if (isa<OverflowingBinaryOperator>(I)) {
4076 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
4077 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
4078 return true;
4079 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
4080 return true;
4081 }
4082 } else if (isa<PossiblyExactOperator>(I) && I->isExact())
4083 return true;
4084 }
4085 return false;
4086 }
4087
4088 /// Return an existing SCEV if it exists, otherwise analyze the expression and
4089 /// create a new one.
getSCEV(Value * V)4090 const SCEV *ScalarEvolution::getSCEV(Value *V) {
4091 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4092
4093 const SCEV *S = getExistingSCEV(V);
4094 if (S == nullptr) {
4095 S = createSCEV(V);
4096 // During PHI resolution, it is possible to create two SCEVs for the same
4097 // V, so it is needed to double check whether V->S is inserted into
4098 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
4099 std::pair<ValueExprMapType::iterator, bool> Pair =
4100 ValueExprMap.insert({SCEVCallbackVH(V, this), S});
4101 if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
4102 ExprValueMap[S].insert({V, nullptr});
4103
4104 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
4105 // ExprValueMap.
4106 const SCEV *Stripped = S;
4107 ConstantInt *Offset = nullptr;
4108 std::tie(Stripped, Offset) = splitAddExpr(S);
4109 // If stripped is SCEVUnknown, don't bother to save
4110 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
4111 // increase the complexity of the expansion code.
4112 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
4113 // because it may generate add/sub instead of GEP in SCEV expansion.
4114 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
4115 !isa<GetElementPtrInst>(V))
4116 ExprValueMap[Stripped].insert({V, Offset});
4117 }
4118 }
4119 return S;
4120 }
4121
getExistingSCEV(Value * V)4122 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
4123 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4124
4125 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4126 if (I != ValueExprMap.end()) {
4127 const SCEV *S = I->second;
4128 if (checkValidity(S))
4129 return S;
4130 eraseValueFromMap(V);
4131 forgetMemoizedResults(S);
4132 }
4133 return nullptr;
4134 }
4135
4136 /// Return a SCEV corresponding to -V = -1*V
getNegativeSCEV(const SCEV * V,SCEV::NoWrapFlags Flags)4137 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
4138 SCEV::NoWrapFlags Flags) {
4139 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4140 return getConstant(
4141 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
4142
4143 Type *Ty = V->getType();
4144 Ty = getEffectiveSCEVType(Ty);
4145 return getMulExpr(V, getMinusOne(Ty), Flags);
4146 }
4147
4148 /// If Expr computes ~A, return A else return nullptr
MatchNotExpr(const SCEV * Expr)4149 static const SCEV *MatchNotExpr(const SCEV *Expr) {
4150 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
4151 if (!Add || Add->getNumOperands() != 2 ||
4152 !Add->getOperand(0)->isAllOnesValue())
4153 return nullptr;
4154
4155 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
4156 if (!AddRHS || AddRHS->getNumOperands() != 2 ||
4157 !AddRHS->getOperand(0)->isAllOnesValue())
4158 return nullptr;
4159
4160 return AddRHS->getOperand(1);
4161 }
4162
4163 /// Return a SCEV corresponding to ~V = -1-V
getNotSCEV(const SCEV * V)4164 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
4165 assert(!V->getType()->isPointerTy() && "Can't negate pointer");
4166
4167 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4168 return getConstant(
4169 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
4170
4171 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
4172 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
4173 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
4174 SmallVector<const SCEV *, 2> MatchedOperands;
4175 for (const SCEV *Operand : MME->operands()) {
4176 const SCEV *Matched = MatchNotExpr(Operand);
4177 if (!Matched)
4178 return (const SCEV *)nullptr;
4179 MatchedOperands.push_back(Matched);
4180 }
4181 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
4182 MatchedOperands);
4183 };
4184 if (const SCEV *Replaced = MatchMinMaxNegation(MME))
4185 return Replaced;
4186 }
4187
4188 Type *Ty = V->getType();
4189 Ty = getEffectiveSCEVType(Ty);
4190 return getMinusSCEV(getMinusOne(Ty), V);
4191 }
4192
removePointerBase(const SCEV * P)4193 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
4194 assert(P->getType()->isPointerTy());
4195
4196 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
4197 // The base of an AddRec is the first operand.
4198 SmallVector<const SCEV *> Ops{AddRec->operands()};
4199 Ops[0] = removePointerBase(Ops[0]);
4200 // Don't try to transfer nowrap flags for now. We could in some cases
4201 // (for example, if pointer operand of the AddRec is a SCEVUnknown).
4202 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
4203 }
4204 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
4205 // The base of an Add is the pointer operand.
4206 SmallVector<const SCEV *> Ops{Add->operands()};
4207 const SCEV **PtrOp = nullptr;
4208 for (const SCEV *&AddOp : Ops) {
4209 if (AddOp->getType()->isPointerTy()) {
4210 assert(!PtrOp && "Cannot have multiple pointer ops");
4211 PtrOp = &AddOp;
4212 }
4213 }
4214 *PtrOp = removePointerBase(*PtrOp);
4215 // Don't try to transfer nowrap flags for now. We could in some cases
4216 // (for example, if the pointer operand of the Add is a SCEVUnknown).
4217 return getAddExpr(Ops);
4218 }
4219 // Any other expression must be a pointer base.
4220 return getZero(P->getType());
4221 }
4222
getMinusSCEV(const SCEV * LHS,const SCEV * RHS,SCEV::NoWrapFlags Flags,unsigned Depth)4223 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
4224 SCEV::NoWrapFlags Flags,
4225 unsigned Depth) {
4226 // Fast path: X - X --> 0.
4227 if (LHS == RHS)
4228 return getZero(LHS->getType());
4229
4230 // If we subtract two pointers with different pointer bases, bail.
4231 // Eventually, we're going to add an assertion to getMulExpr that we
4232 // can't multiply by a pointer.
4233 if (RHS->getType()->isPointerTy()) {
4234 if (!LHS->getType()->isPointerTy() ||
4235 getPointerBase(LHS) != getPointerBase(RHS))
4236 return getCouldNotCompute();
4237 LHS = removePointerBase(LHS);
4238 RHS = removePointerBase(RHS);
4239 }
4240
4241 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4242 // makes it so that we cannot make much use of NUW.
4243 auto AddFlags = SCEV::FlagAnyWrap;
4244 const bool RHSIsNotMinSigned =
4245 !getSignedRangeMin(RHS).isMinSignedValue();
4246 if (hasFlags(Flags, SCEV::FlagNSW)) {
4247 // Let M be the minimum representable signed value. Then (-1)*RHS
4248 // signed-wraps if and only if RHS is M. That can happen even for
4249 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4250 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4251 // (-1)*RHS, we need to prove that RHS != M.
4252 //
4253 // If LHS is non-negative and we know that LHS - RHS does not
4254 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4255 // either by proving that RHS > M or that LHS >= 0.
4256 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
4257 AddFlags = SCEV::FlagNSW;
4258 }
4259 }
4260
4261 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4262 // RHS is NSW and LHS >= 0.
4263 //
4264 // The difficulty here is that the NSW flag may have been proven
4265 // relative to a loop that is to be found in a recurrence in LHS and
4266 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4267 // larger scope than intended.
4268 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4269
4270 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4271 }
4272
getTruncateOrZeroExtend(const SCEV * V,Type * Ty,unsigned Depth)4273 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
4274 unsigned Depth) {
4275 Type *SrcTy = V->getType();
4276 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4277 "Cannot truncate or zero extend with non-integer arguments!");
4278 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4279 return V; // No conversion
4280 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4281 return getTruncateExpr(V, Ty, Depth);
4282 return getZeroExtendExpr(V, Ty, Depth);
4283 }
4284
getTruncateOrSignExtend(const SCEV * V,Type * Ty,unsigned Depth)4285 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
4286 unsigned Depth) {
4287 Type *SrcTy = V->getType();
4288 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4289 "Cannot truncate or zero extend with non-integer arguments!");
4290 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4291 return V; // No conversion
4292 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4293 return getTruncateExpr(V, Ty, Depth);
4294 return getSignExtendExpr(V, Ty, Depth);
4295 }
4296
4297 const SCEV *
getNoopOrZeroExtend(const SCEV * V,Type * Ty)4298 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
4299 Type *SrcTy = V->getType();
4300 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4301 "Cannot noop or zero extend with non-integer arguments!");
4302 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4303 "getNoopOrZeroExtend cannot truncate!");
4304 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4305 return V; // No conversion
4306 return getZeroExtendExpr(V, Ty);
4307 }
4308
4309 const SCEV *
getNoopOrSignExtend(const SCEV * V,Type * Ty)4310 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
4311 Type *SrcTy = V->getType();
4312 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4313 "Cannot noop or sign extend with non-integer arguments!");
4314 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4315 "getNoopOrSignExtend cannot truncate!");
4316 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4317 return V; // No conversion
4318 return getSignExtendExpr(V, Ty);
4319 }
4320
4321 const SCEV *
getNoopOrAnyExtend(const SCEV * V,Type * Ty)4322 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
4323 Type *SrcTy = V->getType();
4324 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4325 "Cannot noop or any extend with non-integer arguments!");
4326 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4327 "getNoopOrAnyExtend cannot truncate!");
4328 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4329 return V; // No conversion
4330 return getAnyExtendExpr(V, Ty);
4331 }
4332
4333 const SCEV *
getTruncateOrNoop(const SCEV * V,Type * Ty)4334 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
4335 Type *SrcTy = V->getType();
4336 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4337 "Cannot truncate or noop with non-integer arguments!");
4338 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4339 "getTruncateOrNoop cannot extend!");
4340 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4341 return V; // No conversion
4342 return getTruncateExpr(V, Ty);
4343 }
4344
getUMaxFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)4345 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
4346 const SCEV *RHS) {
4347 const SCEV *PromotedLHS = LHS;
4348 const SCEV *PromotedRHS = RHS;
4349
4350 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4351 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4352 else
4353 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4354
4355 return getUMaxExpr(PromotedLHS, PromotedRHS);
4356 }
4357
getUMinFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)4358 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
4359 const SCEV *RHS) {
4360 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4361 return getUMinFromMismatchedTypes(Ops);
4362 }
4363
getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV * > & Ops)4364 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
4365 SmallVectorImpl<const SCEV *> &Ops) {
4366 assert(!Ops.empty() && "At least one operand must be!");
4367 // Trivial case.
4368 if (Ops.size() == 1)
4369 return Ops[0];
4370
4371 // Find the max type first.
4372 Type *MaxType = nullptr;
4373 for (auto *S : Ops)
4374 if (MaxType)
4375 MaxType = getWiderType(MaxType, S->getType());
4376 else
4377 MaxType = S->getType();
4378 assert(MaxType && "Failed to find maximum type!");
4379
4380 // Extend all ops to max type.
4381 SmallVector<const SCEV *, 2> PromotedOps;
4382 for (auto *S : Ops)
4383 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4384
4385 // Generate umin.
4386 return getUMinExpr(PromotedOps);
4387 }
4388
getPointerBase(const SCEV * V)4389 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
4390 // A pointer operand may evaluate to a nonpointer expression, such as null.
4391 if (!V->getType()->isPointerTy())
4392 return V;
4393
4394 while (true) {
4395 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4396 V = AddRec->getStart();
4397 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
4398 const SCEV *PtrOp = nullptr;
4399 for (const SCEV *AddOp : Add->operands()) {
4400 if (AddOp->getType()->isPointerTy()) {
4401 assert(!PtrOp && "Cannot have multiple pointer ops");
4402 PtrOp = AddOp;
4403 }
4404 }
4405 assert(PtrOp && "Must have pointer op");
4406 V = PtrOp;
4407 } else // Not something we can look further into.
4408 return V;
4409 }
4410 }
4411
4412 /// Push users of the given Instruction onto the given Worklist.
4413 static void
PushDefUseChildren(Instruction * I,SmallVectorImpl<Instruction * > & Worklist)4414 PushDefUseChildren(Instruction *I,
4415 SmallVectorImpl<Instruction *> &Worklist) {
4416 // Push the def-use children onto the Worklist stack.
4417 for (User *U : I->users())
4418 Worklist.push_back(cast<Instruction>(U));
4419 }
4420
forgetSymbolicName(Instruction * PN,const SCEV * SymName)4421 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
4422 SmallVector<Instruction *, 16> Worklist;
4423 PushDefUseChildren(PN, Worklist);
4424
4425 SmallPtrSet<Instruction *, 8> Visited;
4426 Visited.insert(PN);
4427 while (!Worklist.empty()) {
4428 Instruction *I = Worklist.pop_back_val();
4429 if (!Visited.insert(I).second)
4430 continue;
4431
4432 auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4433 if (It != ValueExprMap.end()) {
4434 const SCEV *Old = It->second;
4435
4436 // Short-circuit the def-use traversal if the symbolic name
4437 // ceases to appear in expressions.
4438 if (Old != SymName && !hasOperand(Old, SymName))
4439 continue;
4440
4441 // SCEVUnknown for a PHI either means that it has an unrecognized
4442 // structure, it's a PHI that's in the progress of being computed
4443 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4444 // additional loop trip count information isn't going to change anything.
4445 // In the second case, createNodeForPHI will perform the necessary
4446 // updates on its own when it gets to that point. In the third, we do
4447 // want to forget the SCEVUnknown.
4448 if (!isa<PHINode>(I) ||
4449 !isa<SCEVUnknown>(Old) ||
4450 (I != PN && Old == SymName)) {
4451 eraseValueFromMap(It->first);
4452 forgetMemoizedResults(Old);
4453 }
4454 }
4455
4456 PushDefUseChildren(I, Worklist);
4457 }
4458 }
4459
4460 namespace {
4461
4462 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4463 /// expression in case its Loop is L. If it is not L then
4464 /// if IgnoreOtherLoops is true then use AddRec itself
4465 /// otherwise rewrite cannot be done.
4466 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4467 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4468 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool IgnoreOtherLoops=true)4469 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4470 bool IgnoreOtherLoops = true) {
4471 SCEVInitRewriter Rewriter(L, SE);
4472 const SCEV *Result = Rewriter.visit(S);
4473 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4474 return SE.getCouldNotCompute();
4475 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4476 ? SE.getCouldNotCompute()
4477 : Result;
4478 }
4479
visitUnknown(const SCEVUnknown * Expr)4480 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4481 if (!SE.isLoopInvariant(Expr, L))
4482 SeenLoopVariantSCEVUnknown = true;
4483 return Expr;
4484 }
4485
visitAddRecExpr(const SCEVAddRecExpr * Expr)4486 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4487 // Only re-write AddRecExprs for this loop.
4488 if (Expr->getLoop() == L)
4489 return Expr->getStart();
4490 SeenOtherLoops = true;
4491 return Expr;
4492 }
4493
hasSeenLoopVariantSCEVUnknown()4494 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4495
hasSeenOtherLoops()4496 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4497
4498 private:
SCEVInitRewriter(const Loop * L,ScalarEvolution & SE)4499 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4500 : SCEVRewriteVisitor(SE), L(L) {}
4501
4502 const Loop *L;
4503 bool SeenLoopVariantSCEVUnknown = false;
4504 bool SeenOtherLoops = false;
4505 };
4506
4507 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4508 /// increment expression in case its Loop is L. If it is not L then
4509 /// use AddRec itself.
4510 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4511 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4512 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4513 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4514 SCEVPostIncRewriter Rewriter(L, SE);
4515 const SCEV *Result = Rewriter.visit(S);
4516 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4517 ? SE.getCouldNotCompute()
4518 : Result;
4519 }
4520
visitUnknown(const SCEVUnknown * Expr)4521 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4522 if (!SE.isLoopInvariant(Expr, L))
4523 SeenLoopVariantSCEVUnknown = true;
4524 return Expr;
4525 }
4526
visitAddRecExpr(const SCEVAddRecExpr * Expr)4527 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4528 // Only re-write AddRecExprs for this loop.
4529 if (Expr->getLoop() == L)
4530 return Expr->getPostIncExpr(SE);
4531 SeenOtherLoops = true;
4532 return Expr;
4533 }
4534
hasSeenLoopVariantSCEVUnknown()4535 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4536
hasSeenOtherLoops()4537 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4538
4539 private:
SCEVPostIncRewriter(const Loop * L,ScalarEvolution & SE)4540 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4541 : SCEVRewriteVisitor(SE), L(L) {}
4542
4543 const Loop *L;
4544 bool SeenLoopVariantSCEVUnknown = false;
4545 bool SeenOtherLoops = false;
4546 };
4547
4548 /// This class evaluates the compare condition by matching it against the
4549 /// condition of loop latch. If there is a match we assume a true value
4550 /// for the condition while building SCEV nodes.
4551 class SCEVBackedgeConditionFolder
4552 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4553 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4554 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4555 ScalarEvolution &SE) {
4556 bool IsPosBECond = false;
4557 Value *BECond = nullptr;
4558 if (BasicBlock *Latch = L->getLoopLatch()) {
4559 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4560 if (BI && BI->isConditional()) {
4561 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
4562 "Both outgoing branches should not target same header!");
4563 BECond = BI->getCondition();
4564 IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4565 } else {
4566 return S;
4567 }
4568 }
4569 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4570 return Rewriter.visit(S);
4571 }
4572
visitUnknown(const SCEVUnknown * Expr)4573 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4574 const SCEV *Result = Expr;
4575 bool InvariantF = SE.isLoopInvariant(Expr, L);
4576
4577 if (!InvariantF) {
4578 Instruction *I = cast<Instruction>(Expr->getValue());
4579 switch (I->getOpcode()) {
4580 case Instruction::Select: {
4581 SelectInst *SI = cast<SelectInst>(I);
4582 Optional<const SCEV *> Res =
4583 compareWithBackedgeCondition(SI->getCondition());
4584 if (Res.hasValue()) {
4585 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4586 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4587 }
4588 break;
4589 }
4590 default: {
4591 Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4592 if (Res.hasValue())
4593 Result = Res.getValue();
4594 break;
4595 }
4596 }
4597 }
4598 return Result;
4599 }
4600
4601 private:
SCEVBackedgeConditionFolder(const Loop * L,Value * BECond,bool IsPosBECond,ScalarEvolution & SE)4602 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4603 bool IsPosBECond, ScalarEvolution &SE)
4604 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4605 IsPositiveBECond(IsPosBECond) {}
4606
4607 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4608
4609 const Loop *L;
4610 /// Loop back condition.
4611 Value *BackedgeCond = nullptr;
4612 /// Set to true if loop back is on positive branch condition.
4613 bool IsPositiveBECond;
4614 };
4615
4616 Optional<const SCEV *>
compareWithBackedgeCondition(Value * IC)4617 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4618
4619 // If value matches the backedge condition for loop latch,
4620 // then return a constant evolution node based on loopback
4621 // branch taken.
4622 if (BackedgeCond == IC)
4623 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4624 : SE.getZero(Type::getInt1Ty(SE.getContext()));
4625 return None;
4626 }
4627
4628 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4629 public:
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE)4630 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4631 ScalarEvolution &SE) {
4632 SCEVShiftRewriter Rewriter(L, SE);
4633 const SCEV *Result = Rewriter.visit(S);
4634 return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4635 }
4636
visitUnknown(const SCEVUnknown * Expr)4637 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4638 // Only allow AddRecExprs for this loop.
4639 if (!SE.isLoopInvariant(Expr, L))
4640 Valid = false;
4641 return Expr;
4642 }
4643
visitAddRecExpr(const SCEVAddRecExpr * Expr)4644 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4645 if (Expr->getLoop() == L && Expr->isAffine())
4646 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4647 Valid = false;
4648 return Expr;
4649 }
4650
isValid()4651 bool isValid() { return Valid; }
4652
4653 private:
SCEVShiftRewriter(const Loop * L,ScalarEvolution & SE)4654 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4655 : SCEVRewriteVisitor(SE), L(L) {}
4656
4657 const Loop *L;
4658 bool Valid = true;
4659 };
4660
4661 } // end anonymous namespace
4662
4663 SCEV::NoWrapFlags
proveNoWrapViaConstantRanges(const SCEVAddRecExpr * AR)4664 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4665 if (!AR->isAffine())
4666 return SCEV::FlagAnyWrap;
4667
4668 using OBO = OverflowingBinaryOperator;
4669
4670 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
4671
4672 if (!AR->hasNoSignedWrap()) {
4673 ConstantRange AddRecRange = getSignedRange(AR);
4674 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4675
4676 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4677 Instruction::Add, IncRange, OBO::NoSignedWrap);
4678 if (NSWRegion.contains(AddRecRange))
4679 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4680 }
4681
4682 if (!AR->hasNoUnsignedWrap()) {
4683 ConstantRange AddRecRange = getUnsignedRange(AR);
4684 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4685
4686 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4687 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4688 if (NUWRegion.contains(AddRecRange))
4689 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4690 }
4691
4692 return Result;
4693 }
4694
4695 SCEV::NoWrapFlags
proveNoSignedWrapViaInduction(const SCEVAddRecExpr * AR)4696 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4697 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4698
4699 if (AR->hasNoSignedWrap())
4700 return Result;
4701
4702 if (!AR->isAffine())
4703 return Result;
4704
4705 const SCEV *Step = AR->getStepRecurrence(*this);
4706 const Loop *L = AR->getLoop();
4707
4708 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4709 // Note that this serves two purposes: It filters out loops that are
4710 // simply not analyzable, and it covers the case where this code is
4711 // being called from within backedge-taken count analysis, such that
4712 // attempting to ask for the backedge-taken count would likely result
4713 // in infinite recursion. In the later case, the analysis code will
4714 // cope with a conservative value, and it will take care to purge
4715 // that value once it has finished.
4716 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4717
4718 // Normally, in the cases we can prove no-overflow via a
4719 // backedge guarding condition, we can also compute a backedge
4720 // taken count for the loop. The exceptions are assumptions and
4721 // guards present in the loop -- SCEV is not great at exploiting
4722 // these to compute max backedge taken counts, but can still use
4723 // these to prove lack of overflow. Use this fact to avoid
4724 // doing extra work that may not pay off.
4725
4726 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4727 AC.assumptions().empty())
4728 return Result;
4729
4730 // If the backedge is guarded by a comparison with the pre-inc value the
4731 // addrec is safe. Also, if the entry is guarded by a comparison with the
4732 // start value and the backedge is guarded by a comparison with the post-inc
4733 // value, the addrec is safe.
4734 ICmpInst::Predicate Pred;
4735 const SCEV *OverflowLimit =
4736 getSignedOverflowLimitForStep(Step, &Pred, this);
4737 if (OverflowLimit &&
4738 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
4739 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
4740 Result = setFlags(Result, SCEV::FlagNSW);
4741 }
4742 return Result;
4743 }
4744 SCEV::NoWrapFlags
proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr * AR)4745 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4746 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4747
4748 if (AR->hasNoUnsignedWrap())
4749 return Result;
4750
4751 if (!AR->isAffine())
4752 return Result;
4753
4754 const SCEV *Step = AR->getStepRecurrence(*this);
4755 unsigned BitWidth = getTypeSizeInBits(AR->getType());
4756 const Loop *L = AR->getLoop();
4757
4758 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4759 // Note that this serves two purposes: It filters out loops that are
4760 // simply not analyzable, and it covers the case where this code is
4761 // being called from within backedge-taken count analysis, such that
4762 // attempting to ask for the backedge-taken count would likely result
4763 // in infinite recursion. In the later case, the analysis code will
4764 // cope with a conservative value, and it will take care to purge
4765 // that value once it has finished.
4766 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4767
4768 // Normally, in the cases we can prove no-overflow via a
4769 // backedge guarding condition, we can also compute a backedge
4770 // taken count for the loop. The exceptions are assumptions and
4771 // guards present in the loop -- SCEV is not great at exploiting
4772 // these to compute max backedge taken counts, but can still use
4773 // these to prove lack of overflow. Use this fact to avoid
4774 // doing extra work that may not pay off.
4775
4776 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4777 AC.assumptions().empty())
4778 return Result;
4779
4780 // If the backedge is guarded by a comparison with the pre-inc value the
4781 // addrec is safe. Also, if the entry is guarded by a comparison with the
4782 // start value and the backedge is guarded by a comparison with the post-inc
4783 // value, the addrec is safe.
4784 if (isKnownPositive(Step)) {
4785 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
4786 getUnsignedRangeMax(Step));
4787 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
4788 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
4789 Result = setFlags(Result, SCEV::FlagNUW);
4790 }
4791 }
4792
4793 return Result;
4794 }
4795
4796 namespace {
4797
4798 /// Represents an abstract binary operation. This may exist as a
4799 /// normal instruction or constant expression, or may have been
4800 /// derived from an expression tree.
4801 struct BinaryOp {
4802 unsigned Opcode;
4803 Value *LHS;
4804 Value *RHS;
4805 bool IsNSW = false;
4806 bool IsNUW = false;
4807
4808 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4809 /// constant expression.
4810 Operator *Op = nullptr;
4811
BinaryOp__anonb5a706171411::BinaryOp4812 explicit BinaryOp(Operator *Op)
4813 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
4814 Op(Op) {
4815 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4816 IsNSW = OBO->hasNoSignedWrap();
4817 IsNUW = OBO->hasNoUnsignedWrap();
4818 }
4819 }
4820
BinaryOp__anonb5a706171411::BinaryOp4821 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
4822 bool IsNUW = false)
4823 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
4824 };
4825
4826 } // end anonymous namespace
4827
4828 /// Try to map \p V into a BinaryOp, and return \c None on failure.
MatchBinaryOp(Value * V,DominatorTree & DT)4829 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
4830 auto *Op = dyn_cast<Operator>(V);
4831 if (!Op)
4832 return None;
4833
4834 // Implementation detail: all the cleverness here should happen without
4835 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4836 // SCEV expressions when possible, and we should not break that.
4837
4838 switch (Op->getOpcode()) {
4839 case Instruction::Add:
4840 case Instruction::Sub:
4841 case Instruction::Mul:
4842 case Instruction::UDiv:
4843 case Instruction::URem:
4844 case Instruction::And:
4845 case Instruction::Or:
4846 case Instruction::AShr:
4847 case Instruction::Shl:
4848 return BinaryOp(Op);
4849
4850 case Instruction::Xor:
4851 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
4852 // If the RHS of the xor is a signmask, then this is just an add.
4853 // Instcombine turns add of signmask into xor as a strength reduction step.
4854 if (RHSC->getValue().isSignMask())
4855 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
4856 return BinaryOp(Op);
4857
4858 case Instruction::LShr:
4859 // Turn logical shift right of a constant into a unsigned divide.
4860 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
4861 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
4862
4863 // If the shift count is not less than the bitwidth, the result of
4864 // the shift is undefined. Don't try to analyze it, because the
4865 // resolution chosen here may differ from the resolution chosen in
4866 // other parts of the compiler.
4867 if (SA->getValue().ult(BitWidth)) {
4868 Constant *X =
4869 ConstantInt::get(SA->getContext(),
4870 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4871 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
4872 }
4873 }
4874 return BinaryOp(Op);
4875
4876 case Instruction::ExtractValue: {
4877 auto *EVI = cast<ExtractValueInst>(Op);
4878 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4879 break;
4880
4881 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
4882 if (!WO)
4883 break;
4884
4885 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4886 bool Signed = WO->isSigned();
4887 // TODO: Should add nuw/nsw flags for mul as well.
4888 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
4889 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
4890
4891 // Now that we know that all uses of the arithmetic-result component of
4892 // CI are guarded by the overflow check, we can go ahead and pretend
4893 // that the arithmetic is non-overflowing.
4894 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
4895 /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
4896 }
4897
4898 default:
4899 break;
4900 }
4901
4902 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
4903 // semantics as a Sub, return a binary sub expression.
4904 if (auto *II = dyn_cast<IntrinsicInst>(V))
4905 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
4906 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
4907
4908 return None;
4909 }
4910
4911 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4912 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4913 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4914 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4915 /// follows one of the following patterns:
4916 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4917 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4918 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4919 /// we return the type of the truncation operation, and indicate whether the
4920 /// truncated type should be treated as signed/unsigned by setting
4921 /// \p Signed to true/false, respectively.
isSimpleCastedPHI(const SCEV * Op,const SCEVUnknown * SymbolicPHI,bool & Signed,ScalarEvolution & SE)4922 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
4923 bool &Signed, ScalarEvolution &SE) {
4924 // The case where Op == SymbolicPHI (that is, with no type conversions on
4925 // the way) is handled by the regular add recurrence creating logic and
4926 // would have already been triggered in createAddRecForPHI. Reaching it here
4927 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4928 // because one of the other operands of the SCEVAddExpr updating this PHI is
4929 // not invariant).
4930 //
4931 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4932 // this case predicates that allow us to prove that Op == SymbolicPHI will
4933 // be added.
4934 if (Op == SymbolicPHI)
4935 return nullptr;
4936
4937 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
4938 unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
4939 if (SourceBits != NewBits)
4940 return nullptr;
4941
4942 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
4943 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
4944 if (!SExt && !ZExt)
4945 return nullptr;
4946 const SCEVTruncateExpr *Trunc =
4947 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
4948 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4949 if (!Trunc)
4950 return nullptr;
4951 const SCEV *X = Trunc->getOperand();
4952 if (X != SymbolicPHI)
4953 return nullptr;
4954 Signed = SExt != nullptr;
4955 return Trunc->getType();
4956 }
4957
isIntegerLoopHeaderPHI(const PHINode * PN,LoopInfo & LI)4958 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
4959 if (!PN->getType()->isIntegerTy())
4960 return nullptr;
4961 const Loop *L = LI.getLoopFor(PN->getParent());
4962 if (!L || L->getHeader() != PN->getParent())
4963 return nullptr;
4964 return L;
4965 }
4966
4967 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4968 // computation that updates the phi follows the following pattern:
4969 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4970 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4971 // If so, try to see if it can be rewritten as an AddRecExpr under some
4972 // Predicates. If successful, return them as a pair. Also cache the results
4973 // of the analysis.
4974 //
4975 // Example usage scenario:
4976 // Say the Rewriter is called for the following SCEV:
4977 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4978 // where:
4979 // %X = phi i64 (%Start, %BEValue)
4980 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4981 // and call this function with %SymbolicPHI = %X.
4982 //
4983 // The analysis will find that the value coming around the backedge has
4984 // the following SCEV:
4985 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4986 // Upon concluding that this matches the desired pattern, the function
4987 // will return the pair {NewAddRec, SmallPredsVec} where:
4988 // NewAddRec = {%Start,+,%Step}
4989 // SmallPredsVec = {P1, P2, P3} as follows:
4990 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4991 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4992 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4993 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4994 // under the predicates {P1,P2,P3}.
4995 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4996 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4997 //
4998 // TODO's:
4999 //
5000 // 1) Extend the Induction descriptor to also support inductions that involve
5001 // casts: When needed (namely, when we are called in the context of the
5002 // vectorizer induction analysis), a Set of cast instructions will be
5003 // populated by this method, and provided back to isInductionPHI. This is
5004 // needed to allow the vectorizer to properly record them to be ignored by
5005 // the cost model and to avoid vectorizing them (otherwise these casts,
5006 // which are redundant under the runtime overflow checks, will be
5007 // vectorized, which can be costly).
5008 //
5009 // 2) Support additional induction/PHISCEV patterns: We also want to support
5010 // inductions where the sext-trunc / zext-trunc operations (partly) occur
5011 // after the induction update operation (the induction increment):
5012 //
5013 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
5014 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
5015 //
5016 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
5017 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
5018 //
5019 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
5020 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCastsImpl(const SCEVUnknown * SymbolicPHI)5021 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
5022 SmallVector<const SCEVPredicate *, 3> Predicates;
5023
5024 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
5025 // return an AddRec expression under some predicate.
5026
5027 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5028 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5029 assert(L && "Expecting an integer loop header phi");
5030
5031 // The loop may have multiple entrances or multiple exits; we can analyze
5032 // this phi as an addrec if it has a unique entry value and a unique
5033 // backedge value.
5034 Value *BEValueV = nullptr, *StartValueV = nullptr;
5035 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5036 Value *V = PN->getIncomingValue(i);
5037 if (L->contains(PN->getIncomingBlock(i))) {
5038 if (!BEValueV) {
5039 BEValueV = V;
5040 } else if (BEValueV != V) {
5041 BEValueV = nullptr;
5042 break;
5043 }
5044 } else if (!StartValueV) {
5045 StartValueV = V;
5046 } else if (StartValueV != V) {
5047 StartValueV = nullptr;
5048 break;
5049 }
5050 }
5051 if (!BEValueV || !StartValueV)
5052 return None;
5053
5054 const SCEV *BEValue = getSCEV(BEValueV);
5055
5056 // If the value coming around the backedge is an add with the symbolic
5057 // value we just inserted, possibly with casts that we can ignore under
5058 // an appropriate runtime guard, then we found a simple induction variable!
5059 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
5060 if (!Add)
5061 return None;
5062
5063 // If there is a single occurrence of the symbolic value, possibly
5064 // casted, replace it with a recurrence.
5065 unsigned FoundIndex = Add->getNumOperands();
5066 Type *TruncTy = nullptr;
5067 bool Signed;
5068 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5069 if ((TruncTy =
5070 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
5071 if (FoundIndex == e) {
5072 FoundIndex = i;
5073 break;
5074 }
5075
5076 if (FoundIndex == Add->getNumOperands())
5077 return None;
5078
5079 // Create an add with everything but the specified operand.
5080 SmallVector<const SCEV *, 8> Ops;
5081 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5082 if (i != FoundIndex)
5083 Ops.push_back(Add->getOperand(i));
5084 const SCEV *Accum = getAddExpr(Ops);
5085
5086 // The runtime checks will not be valid if the step amount is
5087 // varying inside the loop.
5088 if (!isLoopInvariant(Accum, L))
5089 return None;
5090
5091 // *** Part2: Create the predicates
5092
5093 // Analysis was successful: we have a phi-with-cast pattern for which we
5094 // can return an AddRec expression under the following predicates:
5095 //
5096 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
5097 // fits within the truncated type (does not overflow) for i = 0 to n-1.
5098 // P2: An Equal predicate that guarantees that
5099 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
5100 // P3: An Equal predicate that guarantees that
5101 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
5102 //
5103 // As we next prove, the above predicates guarantee that:
5104 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
5105 //
5106 //
5107 // More formally, we want to prove that:
5108 // Expr(i+1) = Start + (i+1) * Accum
5109 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5110 //
5111 // Given that:
5112 // 1) Expr(0) = Start
5113 // 2) Expr(1) = Start + Accum
5114 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
5115 // 3) Induction hypothesis (step i):
5116 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
5117 //
5118 // Proof:
5119 // Expr(i+1) =
5120 // = Start + (i+1)*Accum
5121 // = (Start + i*Accum) + Accum
5122 // = Expr(i) + Accum
5123 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
5124 // :: from step i
5125 //
5126 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
5127 //
5128 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
5129 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
5130 // + Accum :: from P3
5131 //
5132 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
5133 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
5134 //
5135 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
5136 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5137 //
5138 // By induction, the same applies to all iterations 1<=i<n:
5139 //
5140
5141 // Create a truncated addrec for which we will add a no overflow check (P1).
5142 const SCEV *StartVal = getSCEV(StartValueV);
5143 const SCEV *PHISCEV =
5144 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
5145 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
5146
5147 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
5148 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
5149 // will be constant.
5150 //
5151 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
5152 // add P1.
5153 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5154 SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
5155 Signed ? SCEVWrapPredicate::IncrementNSSW
5156 : SCEVWrapPredicate::IncrementNUSW;
5157 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
5158 Predicates.push_back(AddRecPred);
5159 }
5160
5161 // Create the Equal Predicates P2,P3:
5162
5163 // It is possible that the predicates P2 and/or P3 are computable at
5164 // compile time due to StartVal and/or Accum being constants.
5165 // If either one is, then we can check that now and escape if either P2
5166 // or P3 is false.
5167
5168 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
5169 // for each of StartVal and Accum
5170 auto getExtendedExpr = [&](const SCEV *Expr,
5171 bool CreateSignExtend) -> const SCEV * {
5172 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
5173 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
5174 const SCEV *ExtendedExpr =
5175 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
5176 : getZeroExtendExpr(TruncatedExpr, Expr->getType());
5177 return ExtendedExpr;
5178 };
5179
5180 // Given:
5181 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
5182 // = getExtendedExpr(Expr)
5183 // Determine whether the predicate P: Expr == ExtendedExpr
5184 // is known to be false at compile time
5185 auto PredIsKnownFalse = [&](const SCEV *Expr,
5186 const SCEV *ExtendedExpr) -> bool {
5187 return Expr != ExtendedExpr &&
5188 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
5189 };
5190
5191 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
5192 if (PredIsKnownFalse(StartVal, StartExtended)) {
5193 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
5194 return None;
5195 }
5196
5197 // The Step is always Signed (because the overflow checks are either
5198 // NSSW or NUSW)
5199 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
5200 if (PredIsKnownFalse(Accum, AccumExtended)) {
5201 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
5202 return None;
5203 }
5204
5205 auto AppendPredicate = [&](const SCEV *Expr,
5206 const SCEV *ExtendedExpr) -> void {
5207 if (Expr != ExtendedExpr &&
5208 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
5209 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
5210 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
5211 Predicates.push_back(Pred);
5212 }
5213 };
5214
5215 AppendPredicate(StartVal, StartExtended);
5216 AppendPredicate(Accum, AccumExtended);
5217
5218 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
5219 // which the casts had been folded away. The caller can rewrite SymbolicPHI
5220 // into NewAR if it will also add the runtime overflow checks specified in
5221 // Predicates.
5222 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
5223
5224 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5225 std::make_pair(NewAR, Predicates);
5226 // Remember the result of the analysis for this SCEV at this locayyytion.
5227 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
5228 return PredRewrite;
5229 }
5230
5231 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCasts(const SCEVUnknown * SymbolicPHI)5232 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
5233 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5234 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5235 if (!L)
5236 return None;
5237
5238 // Check to see if we already analyzed this PHI.
5239 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5240 if (I != PredicatedSCEVRewrites.end()) {
5241 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5242 I->second;
5243 // Analysis was done before and failed to create an AddRec:
5244 if (Rewrite.first == SymbolicPHI)
5245 return None;
5246 // Analysis was done before and succeeded to create an AddRec under
5247 // a predicate:
5248 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
5249 assert(!(Rewrite.second).empty() && "Expected to find Predicates");
5250 return Rewrite;
5251 }
5252
5253 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5254 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5255
5256 // Record in the cache that the analysis failed
5257 if (!Rewrite) {
5258 SmallVector<const SCEVPredicate *, 3> Predicates;
5259 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5260 return None;
5261 }
5262
5263 return Rewrite;
5264 }
5265
5266 // FIXME: This utility is currently required because the Rewriter currently
5267 // does not rewrite this expression:
5268 // {0, +, (sext ix (trunc iy to ix) to iy)}
5269 // into {0, +, %step},
5270 // even when the following Equal predicate exists:
5271 // "%step == (sext ix (trunc iy to ix) to iy)".
areAddRecsEqualWithPreds(const SCEVAddRecExpr * AR1,const SCEVAddRecExpr * AR2) const5272 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
5273 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
5274 if (AR1 == AR2)
5275 return true;
5276
5277 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
5278 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
5279 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
5280 return false;
5281 return true;
5282 };
5283
5284 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
5285 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
5286 return false;
5287 return true;
5288 }
5289
5290 /// A helper function for createAddRecFromPHI to handle simple cases.
5291 ///
5292 /// This function tries to find an AddRec expression for the simplest (yet most
5293 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5294 /// If it fails, createAddRecFromPHI will use a more general, but slow,
5295 /// technique for finding the AddRec expression.
createSimpleAffineAddRec(PHINode * PN,Value * BEValueV,Value * StartValueV)5296 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
5297 Value *BEValueV,
5298 Value *StartValueV) {
5299 const Loop *L = LI.getLoopFor(PN->getParent());
5300 assert(L && L->getHeader() == PN->getParent());
5301 assert(BEValueV && StartValueV);
5302
5303 auto BO = MatchBinaryOp(BEValueV, DT);
5304 if (!BO)
5305 return nullptr;
5306
5307 if (BO->Opcode != Instruction::Add)
5308 return nullptr;
5309
5310 const SCEV *Accum = nullptr;
5311 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5312 Accum = getSCEV(BO->RHS);
5313 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5314 Accum = getSCEV(BO->LHS);
5315
5316 if (!Accum)
5317 return nullptr;
5318
5319 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5320 if (BO->IsNUW)
5321 Flags = setFlags(Flags, SCEV::FlagNUW);
5322 if (BO->IsNSW)
5323 Flags = setFlags(Flags, SCEV::FlagNSW);
5324
5325 const SCEV *StartVal = getSCEV(StartValueV);
5326 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5327
5328 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5329
5330 // We can add Flags to the post-inc expression only if we
5331 // know that it is *undefined behavior* for BEValueV to
5332 // overflow.
5333 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5334 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5335 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5336
5337 return PHISCEV;
5338 }
5339
createAddRecFromPHI(PHINode * PN)5340 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5341 const Loop *L = LI.getLoopFor(PN->getParent());
5342 if (!L || L->getHeader() != PN->getParent())
5343 return nullptr;
5344
5345 // The loop may have multiple entrances or multiple exits; we can analyze
5346 // this phi as an addrec if it has a unique entry value and a unique
5347 // backedge value.
5348 Value *BEValueV = nullptr, *StartValueV = nullptr;
5349 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5350 Value *V = PN->getIncomingValue(i);
5351 if (L->contains(PN->getIncomingBlock(i))) {
5352 if (!BEValueV) {
5353 BEValueV = V;
5354 } else if (BEValueV != V) {
5355 BEValueV = nullptr;
5356 break;
5357 }
5358 } else if (!StartValueV) {
5359 StartValueV = V;
5360 } else if (StartValueV != V) {
5361 StartValueV = nullptr;
5362 break;
5363 }
5364 }
5365 if (!BEValueV || !StartValueV)
5366 return nullptr;
5367
5368 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5369 "PHI node already processed?");
5370
5371 // First, try to find AddRec expression without creating a fictituos symbolic
5372 // value for PN.
5373 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5374 return S;
5375
5376 // Handle PHI node value symbolically.
5377 const SCEV *SymbolicName = getUnknown(PN);
5378 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
5379
5380 // Using this symbolic name for the PHI, analyze the value coming around
5381 // the back-edge.
5382 const SCEV *BEValue = getSCEV(BEValueV);
5383
5384 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5385 // has a special value for the first iteration of the loop.
5386
5387 // If the value coming around the backedge is an add with the symbolic
5388 // value we just inserted, then we found a simple induction variable!
5389 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5390 // If there is a single occurrence of the symbolic value, replace it
5391 // with a recurrence.
5392 unsigned FoundIndex = Add->getNumOperands();
5393 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5394 if (Add->getOperand(i) == SymbolicName)
5395 if (FoundIndex == e) {
5396 FoundIndex = i;
5397 break;
5398 }
5399
5400 if (FoundIndex != Add->getNumOperands()) {
5401 // Create an add with everything but the specified operand.
5402 SmallVector<const SCEV *, 8> Ops;
5403 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5404 if (i != FoundIndex)
5405 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5406 L, *this));
5407 const SCEV *Accum = getAddExpr(Ops);
5408
5409 // This is not a valid addrec if the step amount is varying each
5410 // loop iteration, but is not itself an addrec in this loop.
5411 if (isLoopInvariant(Accum, L) ||
5412 (isa<SCEVAddRecExpr>(Accum) &&
5413 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5414 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5415
5416 if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5417 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5418 if (BO->IsNUW)
5419 Flags = setFlags(Flags, SCEV::FlagNUW);
5420 if (BO->IsNSW)
5421 Flags = setFlags(Flags, SCEV::FlagNSW);
5422 }
5423 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5424 // If the increment is an inbounds GEP, then we know the address
5425 // space cannot be wrapped around. We cannot make any guarantee
5426 // about signed or unsigned overflow because pointers are
5427 // unsigned but we may have a negative index from the base
5428 // pointer. We can guarantee that no unsigned wrap occurs if the
5429 // indices form a positive value.
5430 if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5431 Flags = setFlags(Flags, SCEV::FlagNW);
5432
5433 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5434 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5435 Flags = setFlags(Flags, SCEV::FlagNUW);
5436 }
5437
5438 // We cannot transfer nuw and nsw flags from subtraction
5439 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5440 // for instance.
5441 }
5442
5443 const SCEV *StartVal = getSCEV(StartValueV);
5444 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5445
5446 // Okay, for the entire analysis of this edge we assumed the PHI
5447 // to be symbolic. We now need to go back and purge all of the
5448 // entries for the scalars that use the symbolic expression.
5449 forgetSymbolicName(PN, SymbolicName);
5450 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5451
5452 // We can add Flags to the post-inc expression only if we
5453 // know that it is *undefined behavior* for BEValueV to
5454 // overflow.
5455 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5456 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5457 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5458
5459 return PHISCEV;
5460 }
5461 }
5462 } else {
5463 // Otherwise, this could be a loop like this:
5464 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5465 // In this case, j = {1,+,1} and BEValue is j.
5466 // Because the other in-value of i (0) fits the evolution of BEValue
5467 // i really is an addrec evolution.
5468 //
5469 // We can generalize this saying that i is the shifted value of BEValue
5470 // by one iteration:
5471 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5472 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5473 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5474 if (Shifted != getCouldNotCompute() &&
5475 Start != getCouldNotCompute()) {
5476 const SCEV *StartVal = getSCEV(StartValueV);
5477 if (Start == StartVal) {
5478 // Okay, for the entire analysis of this edge we assumed the PHI
5479 // to be symbolic. We now need to go back and purge all of the
5480 // entries for the scalars that use the symbolic expression.
5481 forgetSymbolicName(PN, SymbolicName);
5482 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
5483 return Shifted;
5484 }
5485 }
5486 }
5487
5488 // Remove the temporary PHI node SCEV that has been inserted while intending
5489 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5490 // as it will prevent later (possibly simpler) SCEV expressions to be added
5491 // to the ValueExprMap.
5492 eraseValueFromMap(PN);
5493
5494 return nullptr;
5495 }
5496
5497 // Checks if the SCEV S is available at BB. S is considered available at BB
5498 // if S can be materialized at BB without introducing a fault.
IsAvailableOnEntry(const Loop * L,DominatorTree & DT,const SCEV * S,BasicBlock * BB)5499 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5500 BasicBlock *BB) {
5501 struct CheckAvailable {
5502 bool TraversalDone = false;
5503 bool Available = true;
5504
5505 const Loop *L = nullptr; // The loop BB is in (can be nullptr)
5506 BasicBlock *BB = nullptr;
5507 DominatorTree &DT;
5508
5509 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5510 : L(L), BB(BB), DT(DT) {}
5511
5512 bool setUnavailable() {
5513 TraversalDone = true;
5514 Available = false;
5515 return false;
5516 }
5517
5518 bool follow(const SCEV *S) {
5519 switch (S->getSCEVType()) {
5520 case scConstant:
5521 case scPtrToInt:
5522 case scTruncate:
5523 case scZeroExtend:
5524 case scSignExtend:
5525 case scAddExpr:
5526 case scMulExpr:
5527 case scUMaxExpr:
5528 case scSMaxExpr:
5529 case scUMinExpr:
5530 case scSMinExpr:
5531 // These expressions are available if their operand(s) is/are.
5532 return true;
5533
5534 case scAddRecExpr: {
5535 // We allow add recurrences that are on the loop BB is in, or some
5536 // outer loop. This guarantees availability because the value of the
5537 // add recurrence at BB is simply the "current" value of the induction
5538 // variable. We can relax this in the future; for instance an add
5539 // recurrence on a sibling dominating loop is also available at BB.
5540 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5541 if (L && (ARLoop == L || ARLoop->contains(L)))
5542 return true;
5543
5544 return setUnavailable();
5545 }
5546
5547 case scUnknown: {
5548 // For SCEVUnknown, we check for simple dominance.
5549 const auto *SU = cast<SCEVUnknown>(S);
5550 Value *V = SU->getValue();
5551
5552 if (isa<Argument>(V))
5553 return false;
5554
5555 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5556 return false;
5557
5558 return setUnavailable();
5559 }
5560
5561 case scUDivExpr:
5562 case scCouldNotCompute:
5563 // We do not try to smart about these at all.
5564 return setUnavailable();
5565 }
5566 llvm_unreachable("Unknown SCEV kind!");
5567 }
5568
5569 bool isDone() { return TraversalDone; }
5570 };
5571
5572 CheckAvailable CA(L, BB, DT);
5573 SCEVTraversal<CheckAvailable> ST(CA);
5574
5575 ST.visitAll(S);
5576 return CA.Available;
5577 }
5578
5579 // Try to match a control flow sequence that branches out at BI and merges back
5580 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5581 // match.
BrPHIToSelect(DominatorTree & DT,BranchInst * BI,PHINode * Merge,Value * & C,Value * & LHS,Value * & RHS)5582 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
5583 Value *&C, Value *&LHS, Value *&RHS) {
5584 C = BI->getCondition();
5585
5586 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5587 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5588
5589 if (!LeftEdge.isSingleEdge())
5590 return false;
5591
5592 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5593
5594 Use &LeftUse = Merge->getOperandUse(0);
5595 Use &RightUse = Merge->getOperandUse(1);
5596
5597 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5598 LHS = LeftUse;
5599 RHS = RightUse;
5600 return true;
5601 }
5602
5603 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5604 LHS = RightUse;
5605 RHS = LeftUse;
5606 return true;
5607 }
5608
5609 return false;
5610 }
5611
createNodeFromSelectLikePHI(PHINode * PN)5612 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5613 auto IsReachable =
5614 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5615 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5616 const Loop *L = LI.getLoopFor(PN->getParent());
5617
5618 // We don't want to break LCSSA, even in a SCEV expression tree.
5619 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5620 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5621 return nullptr;
5622
5623 // Try to match
5624 //
5625 // br %cond, label %left, label %right
5626 // left:
5627 // br label %merge
5628 // right:
5629 // br label %merge
5630 // merge:
5631 // V = phi [ %x, %left ], [ %y, %right ]
5632 //
5633 // as "select %cond, %x, %y"
5634
5635 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5636 assert(IDom && "At least the entry block should dominate PN");
5637
5638 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5639 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5640
5641 if (BI && BI->isConditional() &&
5642 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5643 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5644 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5645 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5646 }
5647
5648 return nullptr;
5649 }
5650
createNodeForPHI(PHINode * PN)5651 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5652 if (const SCEV *S = createAddRecFromPHI(PN))
5653 return S;
5654
5655 if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5656 return S;
5657
5658 // If the PHI has a single incoming value, follow that value, unless the
5659 // PHI's incoming blocks are in a different loop, in which case doing so
5660 // risks breaking LCSSA form. Instcombine would normally zap these, but
5661 // it doesn't have DominatorTree information, so it may miss cases.
5662 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5663 if (LI.replacementPreservesLCSSAForm(PN, V))
5664 return getSCEV(V);
5665
5666 // If it's not a loop phi, we can't handle it yet.
5667 return getUnknown(PN);
5668 }
5669
createNodeForSelectOrPHI(Instruction * I,Value * Cond,Value * TrueVal,Value * FalseVal)5670 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5671 Value *Cond,
5672 Value *TrueVal,
5673 Value *FalseVal) {
5674 // Handle "constant" branch or select. This can occur for instance when a
5675 // loop pass transforms an inner loop and moves on to process the outer loop.
5676 if (auto *CI = dyn_cast<ConstantInt>(Cond))
5677 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5678
5679 // Try to match some simple smax or umax patterns.
5680 auto *ICI = dyn_cast<ICmpInst>(Cond);
5681 if (!ICI)
5682 return getUnknown(I);
5683
5684 Value *LHS = ICI->getOperand(0);
5685 Value *RHS = ICI->getOperand(1);
5686
5687 switch (ICI->getPredicate()) {
5688 case ICmpInst::ICMP_SLT:
5689 case ICmpInst::ICMP_SLE:
5690 case ICmpInst::ICMP_ULT:
5691 case ICmpInst::ICMP_ULE:
5692 std::swap(LHS, RHS);
5693 LLVM_FALLTHROUGH;
5694 case ICmpInst::ICMP_SGT:
5695 case ICmpInst::ICMP_SGE:
5696 case ICmpInst::ICMP_UGT:
5697 case ICmpInst::ICMP_UGE:
5698 // a > b ? a+x : b+x -> max(a, b)+x
5699 // a > b ? b+x : a+x -> min(a, b)+x
5700 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5701 bool Signed = ICI->isSigned();
5702 const SCEV *LA = getSCEV(TrueVal);
5703 const SCEV *RA = getSCEV(FalseVal);
5704 const SCEV *LS = getSCEV(LHS);
5705 const SCEV *RS = getSCEV(RHS);
5706 if (LA->getType()->isPointerTy()) {
5707 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
5708 // Need to make sure we can't produce weird expressions involving
5709 // negated pointers.
5710 if (LA == LS && RA == RS)
5711 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS);
5712 if (LA == RS && RA == LS)
5713 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
5714 }
5715 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
5716 if (Op->getType()->isPointerTy()) {
5717 Op = getLosslessPtrToIntExpr(Op);
5718 if (isa<SCEVCouldNotCompute>(Op))
5719 return Op;
5720 }
5721 if (Signed)
5722 Op = getNoopOrSignExtend(Op, I->getType());
5723 else
5724 Op = getNoopOrZeroExtend(Op, I->getType());
5725 return Op;
5726 };
5727 LS = CoerceOperand(LS);
5728 RS = CoerceOperand(RS);
5729 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
5730 break;
5731 const SCEV *LDiff = getMinusSCEV(LA, LS);
5732 const SCEV *RDiff = getMinusSCEV(RA, RS);
5733 if (LDiff == RDiff)
5734 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
5735 LDiff);
5736 LDiff = getMinusSCEV(LA, RS);
5737 RDiff = getMinusSCEV(RA, LS);
5738 if (LDiff == RDiff)
5739 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS),
5740 LDiff);
5741 }
5742 break;
5743 case ICmpInst::ICMP_NE:
5744 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5745 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5746 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5747 const SCEV *One = getOne(I->getType());
5748 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5749 const SCEV *LA = getSCEV(TrueVal);
5750 const SCEV *RA = getSCEV(FalseVal);
5751 const SCEV *LDiff = getMinusSCEV(LA, LS);
5752 const SCEV *RDiff = getMinusSCEV(RA, One);
5753 if (LDiff == RDiff)
5754 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5755 }
5756 break;
5757 case ICmpInst::ICMP_EQ:
5758 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5759 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5760 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5761 const SCEV *One = getOne(I->getType());
5762 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5763 const SCEV *LA = getSCEV(TrueVal);
5764 const SCEV *RA = getSCEV(FalseVal);
5765 const SCEV *LDiff = getMinusSCEV(LA, One);
5766 const SCEV *RDiff = getMinusSCEV(RA, LS);
5767 if (LDiff == RDiff)
5768 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5769 }
5770 break;
5771 default:
5772 break;
5773 }
5774
5775 return getUnknown(I);
5776 }
5777
5778 /// Expand GEP instructions into add and multiply operations. This allows them
5779 /// to be analyzed by regular SCEV code.
createNodeForGEP(GEPOperator * GEP)5780 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
5781 // Don't attempt to analyze GEPs over unsized objects.
5782 if (!GEP->getSourceElementType()->isSized())
5783 return getUnknown(GEP);
5784
5785 SmallVector<const SCEV *, 4> IndexExprs;
5786 for (Value *Index : GEP->indices())
5787 IndexExprs.push_back(getSCEV(Index));
5788 return getGEPExpr(GEP, IndexExprs);
5789 }
5790
GetMinTrailingZerosImpl(const SCEV * S)5791 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
5792 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5793 return C->getAPInt().countTrailingZeros();
5794
5795 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S))
5796 return GetMinTrailingZeros(I->getOperand());
5797
5798 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
5799 return std::min(GetMinTrailingZeros(T->getOperand()),
5800 (uint32_t)getTypeSizeInBits(T->getType()));
5801
5802 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
5803 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5804 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5805 ? getTypeSizeInBits(E->getType())
5806 : OpRes;
5807 }
5808
5809 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
5810 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5811 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5812 ? getTypeSizeInBits(E->getType())
5813 : OpRes;
5814 }
5815
5816 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
5817 // The result is the min of all operands results.
5818 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5819 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5820 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5821 return MinOpRes;
5822 }
5823
5824 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
5825 // The result is the sum of all operands results.
5826 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
5827 uint32_t BitWidth = getTypeSizeInBits(M->getType());
5828 for (unsigned i = 1, e = M->getNumOperands();
5829 SumOpRes != BitWidth && i != e; ++i)
5830 SumOpRes =
5831 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
5832 return SumOpRes;
5833 }
5834
5835 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
5836 // The result is the min of all operands results.
5837 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5838 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5839 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5840 return MinOpRes;
5841 }
5842
5843 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
5844 // The result is the min of all operands results.
5845 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5846 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5847 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5848 return MinOpRes;
5849 }
5850
5851 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
5852 // The result is the min of all operands results.
5853 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5854 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5855 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5856 return MinOpRes;
5857 }
5858
5859 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5860 // For a SCEVUnknown, ask ValueTracking.
5861 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
5862 return Known.countMinTrailingZeros();
5863 }
5864
5865 // SCEVUDivExpr
5866 return 0;
5867 }
5868
GetMinTrailingZeros(const SCEV * S)5869 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
5870 auto I = MinTrailingZerosCache.find(S);
5871 if (I != MinTrailingZerosCache.end())
5872 return I->second;
5873
5874 uint32_t Result = GetMinTrailingZerosImpl(S);
5875 auto InsertPair = MinTrailingZerosCache.insert({S, Result});
5876 assert(InsertPair.second && "Should insert a new key");
5877 return InsertPair.first->second;
5878 }
5879
5880 /// Helper method to assign a range to V from metadata present in the IR.
GetRangeFromMetadata(Value * V)5881 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
5882 if (Instruction *I = dyn_cast<Instruction>(V))
5883 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
5884 return getConstantRangeFromMetadata(*MD);
5885
5886 return None;
5887 }
5888
setNoWrapFlags(SCEVAddRecExpr * AddRec,SCEV::NoWrapFlags Flags)5889 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
5890 SCEV::NoWrapFlags Flags) {
5891 if (AddRec->getNoWrapFlags(Flags) != Flags) {
5892 AddRec->setNoWrapFlags(Flags);
5893 UnsignedRanges.erase(AddRec);
5894 SignedRanges.erase(AddRec);
5895 }
5896 }
5897
5898 ConstantRange ScalarEvolution::
getRangeForUnknownRecurrence(const SCEVUnknown * U)5899 getRangeForUnknownRecurrence(const SCEVUnknown *U) {
5900 const DataLayout &DL = getDataLayout();
5901
5902 unsigned BitWidth = getTypeSizeInBits(U->getType());
5903 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true);
5904
5905 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
5906 // use information about the trip count to improve our available range. Note
5907 // that the trip count independent cases are already handled by known bits.
5908 // WARNING: The definition of recurrence used here is subtly different than
5909 // the one used by AddRec (and thus most of this file). Step is allowed to
5910 // be arbitrarily loop varying here, where AddRec allows only loop invariant
5911 // and other addrecs in the same loop (for non-affine addrecs). The code
5912 // below intentionally handles the case where step is not loop invariant.
5913 auto *P = dyn_cast<PHINode>(U->getValue());
5914 if (!P)
5915 return FullSet;
5916
5917 // Make sure that no Phi input comes from an unreachable block. Otherwise,
5918 // even the values that are not available in these blocks may come from them,
5919 // and this leads to false-positive recurrence test.
5920 for (auto *Pred : predecessors(P->getParent()))
5921 if (!DT.isReachableFromEntry(Pred))
5922 return FullSet;
5923
5924 BinaryOperator *BO;
5925 Value *Start, *Step;
5926 if (!matchSimpleRecurrence(P, BO, Start, Step))
5927 return FullSet;
5928
5929 // If we found a recurrence in reachable code, we must be in a loop. Note
5930 // that BO might be in some subloop of L, and that's completely okay.
5931 auto *L = LI.getLoopFor(P->getParent());
5932 assert(L && L->getHeader() == P->getParent());
5933 if (!L->contains(BO->getParent()))
5934 // NOTE: This bailout should be an assert instead. However, asserting
5935 // the condition here exposes a case where LoopFusion is querying SCEV
5936 // with malformed loop information during the midst of the transform.
5937 // There doesn't appear to be an obvious fix, so for the moment bailout
5938 // until the caller issue can be fixed. PR49566 tracks the bug.
5939 return FullSet;
5940
5941 // TODO: Extend to other opcodes such as mul, and div
5942 switch (BO->getOpcode()) {
5943 default:
5944 return FullSet;
5945 case Instruction::AShr:
5946 case Instruction::LShr:
5947 case Instruction::Shl:
5948 break;
5949 };
5950
5951 if (BO->getOperand(0) != P)
5952 // TODO: Handle the power function forms some day.
5953 return FullSet;
5954
5955 unsigned TC = getSmallConstantMaxTripCount(L);
5956 if (!TC || TC >= BitWidth)
5957 return FullSet;
5958
5959 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT);
5960 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT);
5961 assert(KnownStart.getBitWidth() == BitWidth &&
5962 KnownStep.getBitWidth() == BitWidth);
5963
5964 // Compute total shift amount, being careful of overflow and bitwidths.
5965 auto MaxShiftAmt = KnownStep.getMaxValue();
5966 APInt TCAP(BitWidth, TC-1);
5967 bool Overflow = false;
5968 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
5969 if (Overflow)
5970 return FullSet;
5971
5972 switch (BO->getOpcode()) {
5973 default:
5974 llvm_unreachable("filtered out above");
5975 case Instruction::AShr: {
5976 // For each ashr, three cases:
5977 // shift = 0 => unchanged value
5978 // saturation => 0 or -1
5979 // other => a value closer to zero (of the same sign)
5980 // Thus, the end value is closer to zero than the start.
5981 auto KnownEnd = KnownBits::ashr(KnownStart,
5982 KnownBits::makeConstant(TotalShift));
5983 if (KnownStart.isNonNegative())
5984 // Analogous to lshr (simply not yet canonicalized)
5985 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
5986 KnownStart.getMaxValue() + 1);
5987 if (KnownStart.isNegative())
5988 // End >=u Start && End <=s Start
5989 return ConstantRange::getNonEmpty(KnownStart.getMinValue(),
5990 KnownEnd.getMaxValue() + 1);
5991 break;
5992 }
5993 case Instruction::LShr: {
5994 // For each lshr, three cases:
5995 // shift = 0 => unchanged value
5996 // saturation => 0
5997 // other => a smaller positive number
5998 // Thus, the low end of the unsigned range is the last value produced.
5999 auto KnownEnd = KnownBits::lshr(KnownStart,
6000 KnownBits::makeConstant(TotalShift));
6001 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
6002 KnownStart.getMaxValue() + 1);
6003 }
6004 case Instruction::Shl: {
6005 // Iff no bits are shifted out, value increases on every shift.
6006 auto KnownEnd = KnownBits::shl(KnownStart,
6007 KnownBits::makeConstant(TotalShift));
6008 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6009 return ConstantRange(KnownStart.getMinValue(),
6010 KnownEnd.getMaxValue() + 1);
6011 break;
6012 }
6013 };
6014 return FullSet;
6015 }
6016
6017 /// Determine the range for a particular SCEV. If SignHint is
6018 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
6019 /// with a "cleaner" unsigned (resp. signed) representation.
6020 const ConstantRange &
getRangeRef(const SCEV * S,ScalarEvolution::RangeSignHint SignHint)6021 ScalarEvolution::getRangeRef(const SCEV *S,
6022 ScalarEvolution::RangeSignHint SignHint) {
6023 DenseMap<const SCEV *, ConstantRange> &Cache =
6024 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6025 : SignedRanges;
6026 ConstantRange::PreferredRangeType RangeType =
6027 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
6028 ? ConstantRange::Unsigned : ConstantRange::Signed;
6029
6030 // See if we've computed this range already.
6031 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
6032 if (I != Cache.end())
6033 return I->second;
6034
6035 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
6036 return setRange(C, SignHint, ConstantRange(C->getAPInt()));
6037
6038 unsigned BitWidth = getTypeSizeInBits(S->getType());
6039 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
6040 using OBO = OverflowingBinaryOperator;
6041
6042 // If the value has known zeros, the maximum value will have those known zeros
6043 // as well.
6044 uint32_t TZ = GetMinTrailingZeros(S);
6045 if (TZ != 0) {
6046 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
6047 ConservativeResult =
6048 ConstantRange(APInt::getMinValue(BitWidth),
6049 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
6050 else
6051 ConservativeResult = ConstantRange(
6052 APInt::getSignedMinValue(BitWidth),
6053 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
6054 }
6055
6056 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
6057 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
6058 unsigned WrapType = OBO::AnyWrap;
6059 if (Add->hasNoSignedWrap())
6060 WrapType |= OBO::NoSignedWrap;
6061 if (Add->hasNoUnsignedWrap())
6062 WrapType |= OBO::NoUnsignedWrap;
6063 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
6064 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint),
6065 WrapType, RangeType);
6066 return setRange(Add, SignHint,
6067 ConservativeResult.intersectWith(X, RangeType));
6068 }
6069
6070 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
6071 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
6072 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
6073 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
6074 return setRange(Mul, SignHint,
6075 ConservativeResult.intersectWith(X, RangeType));
6076 }
6077
6078 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
6079 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
6080 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
6081 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
6082 return setRange(SMax, SignHint,
6083 ConservativeResult.intersectWith(X, RangeType));
6084 }
6085
6086 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
6087 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
6088 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
6089 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
6090 return setRange(UMax, SignHint,
6091 ConservativeResult.intersectWith(X, RangeType));
6092 }
6093
6094 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
6095 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
6096 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
6097 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
6098 return setRange(SMin, SignHint,
6099 ConservativeResult.intersectWith(X, RangeType));
6100 }
6101
6102 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
6103 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
6104 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
6105 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
6106 return setRange(UMin, SignHint,
6107 ConservativeResult.intersectWith(X, RangeType));
6108 }
6109
6110 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
6111 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
6112 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
6113 return setRange(UDiv, SignHint,
6114 ConservativeResult.intersectWith(X.udiv(Y), RangeType));
6115 }
6116
6117 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
6118 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
6119 return setRange(ZExt, SignHint,
6120 ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
6121 RangeType));
6122 }
6123
6124 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
6125 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
6126 return setRange(SExt, SignHint,
6127 ConservativeResult.intersectWith(X.signExtend(BitWidth),
6128 RangeType));
6129 }
6130
6131 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) {
6132 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint);
6133 return setRange(PtrToInt, SignHint, X);
6134 }
6135
6136 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
6137 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
6138 return setRange(Trunc, SignHint,
6139 ConservativeResult.intersectWith(X.truncate(BitWidth),
6140 RangeType));
6141 }
6142
6143 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
6144 // If there's no unsigned wrap, the value will never be less than its
6145 // initial value.
6146 if (AddRec->hasNoUnsignedWrap()) {
6147 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
6148 if (!UnsignedMinValue.isZero())
6149 ConservativeResult = ConservativeResult.intersectWith(
6150 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
6151 }
6152
6153 // If there's no signed wrap, and all the operands except initial value have
6154 // the same sign or zero, the value won't ever be:
6155 // 1: smaller than initial value if operands are non negative,
6156 // 2: bigger than initial value if operands are non positive.
6157 // For both cases, value can not cross signed min/max boundary.
6158 if (AddRec->hasNoSignedWrap()) {
6159 bool AllNonNeg = true;
6160 bool AllNonPos = true;
6161 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
6162 if (!isKnownNonNegative(AddRec->getOperand(i)))
6163 AllNonNeg = false;
6164 if (!isKnownNonPositive(AddRec->getOperand(i)))
6165 AllNonPos = false;
6166 }
6167 if (AllNonNeg)
6168 ConservativeResult = ConservativeResult.intersectWith(
6169 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()),
6170 APInt::getSignedMinValue(BitWidth)),
6171 RangeType);
6172 else if (AllNonPos)
6173 ConservativeResult = ConservativeResult.intersectWith(
6174 ConstantRange::getNonEmpty(
6175 APInt::getSignedMinValue(BitWidth),
6176 getSignedRangeMax(AddRec->getStart()) + 1),
6177 RangeType);
6178 }
6179
6180 // TODO: non-affine addrec
6181 if (AddRec->isAffine()) {
6182 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
6183 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
6184 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
6185 auto RangeFromAffine = getRangeForAffineAR(
6186 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6187 BitWidth);
6188 ConservativeResult =
6189 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6190
6191 auto RangeFromFactoring = getRangeViaFactoring(
6192 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6193 BitWidth);
6194 ConservativeResult =
6195 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6196 }
6197
6198 // Now try symbolic BE count and more powerful methods.
6199 if (UseExpensiveRangeSharpening) {
6200 const SCEV *SymbolicMaxBECount =
6201 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
6202 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6203 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6204 AddRec->hasNoSelfWrap()) {
6205 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6206 AddRec, SymbolicMaxBECount, BitWidth, SignHint);
6207 ConservativeResult =
6208 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6209 }
6210 }
6211 }
6212
6213 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6214 }
6215
6216 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
6217
6218 // Check if the IR explicitly contains !range metadata.
6219 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
6220 if (MDRange.hasValue())
6221 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
6222 RangeType);
6223
6224 // Use facts about recurrences in the underlying IR. Note that add
6225 // recurrences are AddRecExprs and thus don't hit this path. This
6226 // primarily handles shift recurrences.
6227 auto CR = getRangeForUnknownRecurrence(U);
6228 ConservativeResult = ConservativeResult.intersectWith(CR);
6229
6230 // See if ValueTracking can give us a useful range.
6231 const DataLayout &DL = getDataLayout();
6232 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6233 if (Known.getBitWidth() != BitWidth)
6234 Known = Known.zextOrTrunc(BitWidth);
6235
6236 // ValueTracking may be able to compute a tighter result for the number of
6237 // sign bits than for the value of those sign bits.
6238 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6239 if (U->getType()->isPointerTy()) {
6240 // If the pointer size is larger than the index size type, this can cause
6241 // NS to be larger than BitWidth. So compensate for this.
6242 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
6243 int ptrIdxDiff = ptrSize - BitWidth;
6244 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
6245 NS -= ptrIdxDiff;
6246 }
6247
6248 if (NS > 1) {
6249 // If we know any of the sign bits, we know all of the sign bits.
6250 if (!Known.Zero.getHiBits(NS).isZero())
6251 Known.Zero.setHighBits(NS);
6252 if (!Known.One.getHiBits(NS).isZero())
6253 Known.One.setHighBits(NS);
6254 }
6255
6256 if (Known.getMinValue() != Known.getMaxValue() + 1)
6257 ConservativeResult = ConservativeResult.intersectWith(
6258 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
6259 RangeType);
6260 if (NS > 1)
6261 ConservativeResult = ConservativeResult.intersectWith(
6262 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
6263 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
6264 RangeType);
6265
6266 // A range of Phi is a subset of union of all ranges of its input.
6267 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
6268 // Make sure that we do not run over cycled Phis.
6269 if (PendingPhiRanges.insert(Phi).second) {
6270 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
6271 for (auto &Op : Phi->operands()) {
6272 auto OpRange = getRangeRef(getSCEV(Op), SignHint);
6273 RangeFromOps = RangeFromOps.unionWith(OpRange);
6274 // No point to continue if we already have a full set.
6275 if (RangeFromOps.isFullSet())
6276 break;
6277 }
6278 ConservativeResult =
6279 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6280 bool Erased = PendingPhiRanges.erase(Phi);
6281 assert(Erased && "Failed to erase Phi properly?");
6282 (void) Erased;
6283 }
6284 }
6285
6286 return setRange(U, SignHint, std::move(ConservativeResult));
6287 }
6288
6289 return setRange(S, SignHint, std::move(ConservativeResult));
6290 }
6291
6292 // Given a StartRange, Step and MaxBECount for an expression compute a range of
6293 // values that the expression can take. Initially, the expression has a value
6294 // from StartRange and then is changed by Step up to MaxBECount times. Signed
6295 // argument defines if we treat Step as signed or unsigned.
getRangeForAffineARHelper(APInt Step,const ConstantRange & StartRange,const APInt & MaxBECount,unsigned BitWidth,bool Signed)6296 static ConstantRange getRangeForAffineARHelper(APInt Step,
6297 const ConstantRange &StartRange,
6298 const APInt &MaxBECount,
6299 unsigned BitWidth, bool Signed) {
6300 // If either Step or MaxBECount is 0, then the expression won't change, and we
6301 // just need to return the initial range.
6302 if (Step == 0 || MaxBECount == 0)
6303 return StartRange;
6304
6305 // If we don't know anything about the initial value (i.e. StartRange is
6306 // FullRange), then we don't know anything about the final range either.
6307 // Return FullRange.
6308 if (StartRange.isFullSet())
6309 return ConstantRange::getFull(BitWidth);
6310
6311 // If Step is signed and negative, then we use its absolute value, but we also
6312 // note that we're moving in the opposite direction.
6313 bool Descending = Signed && Step.isNegative();
6314
6315 if (Signed)
6316 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
6317 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
6318 // This equations hold true due to the well-defined wrap-around behavior of
6319 // APInt.
6320 Step = Step.abs();
6321
6322 // Check if Offset is more than full span of BitWidth. If it is, the
6323 // expression is guaranteed to overflow.
6324 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
6325 return ConstantRange::getFull(BitWidth);
6326
6327 // Offset is by how much the expression can change. Checks above guarantee no
6328 // overflow here.
6329 APInt Offset = Step * MaxBECount;
6330
6331 // Minimum value of the final range will match the minimal value of StartRange
6332 // if the expression is increasing and will be decreased by Offset otherwise.
6333 // Maximum value of the final range will match the maximal value of StartRange
6334 // if the expression is decreasing and will be increased by Offset otherwise.
6335 APInt StartLower = StartRange.getLower();
6336 APInt StartUpper = StartRange.getUpper() - 1;
6337 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
6338 : (StartUpper + std::move(Offset));
6339
6340 // It's possible that the new minimum/maximum value will fall into the initial
6341 // range (due to wrap around). This means that the expression can take any
6342 // value in this bitwidth, and we have to return full range.
6343 if (StartRange.contains(MovedBoundary))
6344 return ConstantRange::getFull(BitWidth);
6345
6346 APInt NewLower =
6347 Descending ? std::move(MovedBoundary) : std::move(StartLower);
6348 APInt NewUpper =
6349 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
6350 NewUpper += 1;
6351
6352 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
6353 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
6354 }
6355
getRangeForAffineAR(const SCEV * Start,const SCEV * Step,const SCEV * MaxBECount,unsigned BitWidth)6356 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
6357 const SCEV *Step,
6358 const SCEV *MaxBECount,
6359 unsigned BitWidth) {
6360 assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
6361 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6362 "Precondition!");
6363
6364 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
6365 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
6366
6367 // First, consider step signed.
6368 ConstantRange StartSRange = getSignedRange(Start);
6369 ConstantRange StepSRange = getSignedRange(Step);
6370
6371 // If Step can be both positive and negative, we need to find ranges for the
6372 // maximum absolute step values in both directions and union them.
6373 ConstantRange SR =
6374 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
6375 MaxBECountValue, BitWidth, /* Signed = */ true);
6376 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
6377 StartSRange, MaxBECountValue,
6378 BitWidth, /* Signed = */ true));
6379
6380 // Next, consider step unsigned.
6381 ConstantRange UR = getRangeForAffineARHelper(
6382 getUnsignedRangeMax(Step), getUnsignedRange(Start),
6383 MaxBECountValue, BitWidth, /* Signed = */ false);
6384
6385 // Finally, intersect signed and unsigned ranges.
6386 return SR.intersectWith(UR, ConstantRange::Smallest);
6387 }
6388
getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr * AddRec,const SCEV * MaxBECount,unsigned BitWidth,ScalarEvolution::RangeSignHint SignHint)6389 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
6390 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
6391 ScalarEvolution::RangeSignHint SignHint) {
6392 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
6393 assert(AddRec->hasNoSelfWrap() &&
6394 "This only works for non-self-wrapping AddRecs!");
6395 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
6396 const SCEV *Step = AddRec->getStepRecurrence(*this);
6397 // Only deal with constant step to save compile time.
6398 if (!isa<SCEVConstant>(Step))
6399 return ConstantRange::getFull(BitWidth);
6400 // Let's make sure that we can prove that we do not self-wrap during
6401 // MaxBECount iterations. We need this because MaxBECount is a maximum
6402 // iteration count estimate, and we might infer nw from some exit for which we
6403 // do not know max exit count (or any other side reasoning).
6404 // TODO: Turn into assert at some point.
6405 if (getTypeSizeInBits(MaxBECount->getType()) >
6406 getTypeSizeInBits(AddRec->getType()))
6407 return ConstantRange::getFull(BitWidth);
6408 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
6409 const SCEV *RangeWidth = getMinusOne(AddRec->getType());
6410 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
6411 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
6412 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
6413 MaxItersWithoutWrap))
6414 return ConstantRange::getFull(BitWidth);
6415
6416 ICmpInst::Predicate LEPred =
6417 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
6418 ICmpInst::Predicate GEPred =
6419 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
6420 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
6421
6422 // We know that there is no self-wrap. Let's take Start and End values and
6423 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
6424 // the iteration. They either lie inside the range [Min(Start, End),
6425 // Max(Start, End)] or outside it:
6426 //
6427 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
6428 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
6429 //
6430 // No self wrap flag guarantees that the intermediate values cannot be BOTH
6431 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
6432 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
6433 // Start <= End and step is positive, or Start >= End and step is negative.
6434 const SCEV *Start = AddRec->getStart();
6435 ConstantRange StartRange = getRangeRef(Start, SignHint);
6436 ConstantRange EndRange = getRangeRef(End, SignHint);
6437 ConstantRange RangeBetween = StartRange.unionWith(EndRange);
6438 // If they already cover full iteration space, we will know nothing useful
6439 // even if we prove what we want to prove.
6440 if (RangeBetween.isFullSet())
6441 return RangeBetween;
6442 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
6443 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
6444 : RangeBetween.isWrappedSet();
6445 if (IsWrappedSet)
6446 return ConstantRange::getFull(BitWidth);
6447
6448 if (isKnownPositive(Step) &&
6449 isKnownPredicateViaConstantRanges(LEPred, Start, End))
6450 return RangeBetween;
6451 else if (isKnownNegative(Step) &&
6452 isKnownPredicateViaConstantRanges(GEPred, Start, End))
6453 return RangeBetween;
6454 return ConstantRange::getFull(BitWidth);
6455 }
6456
getRangeViaFactoring(const SCEV * Start,const SCEV * Step,const SCEV * MaxBECount,unsigned BitWidth)6457 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
6458 const SCEV *Step,
6459 const SCEV *MaxBECount,
6460 unsigned BitWidth) {
6461 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
6462 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
6463
6464 struct SelectPattern {
6465 Value *Condition = nullptr;
6466 APInt TrueValue;
6467 APInt FalseValue;
6468
6469 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
6470 const SCEV *S) {
6471 Optional<unsigned> CastOp;
6472 APInt Offset(BitWidth, 0);
6473
6474 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&
6475 "Should be!");
6476
6477 // Peel off a constant offset:
6478 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
6479 // In the future we could consider being smarter here and handle
6480 // {Start+Step,+,Step} too.
6481 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
6482 return;
6483
6484 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
6485 S = SA->getOperand(1);
6486 }
6487
6488 // Peel off a cast operation
6489 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
6490 CastOp = SCast->getSCEVType();
6491 S = SCast->getOperand();
6492 }
6493
6494 using namespace llvm::PatternMatch;
6495
6496 auto *SU = dyn_cast<SCEVUnknown>(S);
6497 const APInt *TrueVal, *FalseVal;
6498 if (!SU ||
6499 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
6500 m_APInt(FalseVal)))) {
6501 Condition = nullptr;
6502 return;
6503 }
6504
6505 TrueValue = *TrueVal;
6506 FalseValue = *FalseVal;
6507
6508 // Re-apply the cast we peeled off earlier
6509 if (CastOp.hasValue())
6510 switch (*CastOp) {
6511 default:
6512 llvm_unreachable("Unknown SCEV cast type!");
6513
6514 case scTruncate:
6515 TrueValue = TrueValue.trunc(BitWidth);
6516 FalseValue = FalseValue.trunc(BitWidth);
6517 break;
6518 case scZeroExtend:
6519 TrueValue = TrueValue.zext(BitWidth);
6520 FalseValue = FalseValue.zext(BitWidth);
6521 break;
6522 case scSignExtend:
6523 TrueValue = TrueValue.sext(BitWidth);
6524 FalseValue = FalseValue.sext(BitWidth);
6525 break;
6526 }
6527
6528 // Re-apply the constant offset we peeled off earlier
6529 TrueValue += Offset;
6530 FalseValue += Offset;
6531 }
6532
6533 bool isRecognized() { return Condition != nullptr; }
6534 };
6535
6536 SelectPattern StartPattern(*this, BitWidth, Start);
6537 if (!StartPattern.isRecognized())
6538 return ConstantRange::getFull(BitWidth);
6539
6540 SelectPattern StepPattern(*this, BitWidth, Step);
6541 if (!StepPattern.isRecognized())
6542 return ConstantRange::getFull(BitWidth);
6543
6544 if (StartPattern.Condition != StepPattern.Condition) {
6545 // We don't handle this case today; but we could, by considering four
6546 // possibilities below instead of two. I'm not sure if there are cases where
6547 // that will help over what getRange already does, though.
6548 return ConstantRange::getFull(BitWidth);
6549 }
6550
6551 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
6552 // construct arbitrary general SCEV expressions here. This function is called
6553 // from deep in the call stack, and calling getSCEV (on a sext instruction,
6554 // say) can end up caching a suboptimal value.
6555
6556 // FIXME: without the explicit `this` receiver below, MSVC errors out with
6557 // C2352 and C2512 (otherwise it isn't needed).
6558
6559 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
6560 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
6561 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
6562 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
6563
6564 ConstantRange TrueRange =
6565 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
6566 ConstantRange FalseRange =
6567 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
6568
6569 return TrueRange.unionWith(FalseRange);
6570 }
6571
getNoWrapFlagsFromUB(const Value * V)6572 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
6573 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
6574 const BinaryOperator *BinOp = cast<BinaryOperator>(V);
6575
6576 // Return early if there are no flags to propagate to the SCEV.
6577 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6578 if (BinOp->hasNoUnsignedWrap())
6579 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
6580 if (BinOp->hasNoSignedWrap())
6581 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
6582 if (Flags == SCEV::FlagAnyWrap)
6583 return SCEV::FlagAnyWrap;
6584
6585 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
6586 }
6587
6588 const Instruction *
getNonTrivialDefiningScopeBound(const SCEV * S)6589 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) {
6590 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
6591 return &*AddRec->getLoop()->getHeader()->begin();
6592 if (auto *U = dyn_cast<SCEVUnknown>(S))
6593 if (auto *I = dyn_cast<Instruction>(U->getValue()))
6594 return I;
6595 return nullptr;
6596 }
6597
6598 const Instruction *
getDefiningScopeBound(ArrayRef<const SCEV * > Ops)6599 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) {
6600 // Do a bounded search of the def relation of the requested SCEVs.
6601 SmallSet<const SCEV *, 16> Visited;
6602 SmallVector<const SCEV *> Worklist;
6603 auto pushOp = [&](const SCEV *S) {
6604 if (!Visited.insert(S).second)
6605 return;
6606 // Threshold of 30 here is arbitrary.
6607 if (Visited.size() > 30)
6608 return;
6609 Worklist.push_back(S);
6610 };
6611
6612 for (auto *S : Ops)
6613 pushOp(S);
6614
6615 const Instruction *Bound = nullptr;
6616 while (!Worklist.empty()) {
6617 auto *S = Worklist.pop_back_val();
6618 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) {
6619 if (!Bound || DT.dominates(Bound, DefI))
6620 Bound = DefI;
6621 } else if (auto *S2 = dyn_cast<SCEVCastExpr>(S))
6622 for (auto *Op : S2->operands())
6623 pushOp(Op);
6624 else if (auto *S2 = dyn_cast<SCEVNAryExpr>(S))
6625 for (auto *Op : S2->operands())
6626 pushOp(Op);
6627 else if (auto *S2 = dyn_cast<SCEVUDivExpr>(S))
6628 for (auto *Op : S2->operands())
6629 pushOp(Op);
6630 }
6631 return Bound ? Bound : &*F.getEntryBlock().begin();
6632 }
6633
isGuaranteedToTransferExecutionTo(const Instruction * A,const Instruction * B)6634 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A,
6635 const Instruction *B) {
6636 if (A->getParent() == B->getParent() &&
6637 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(),
6638 B->getIterator()))
6639 return true;
6640
6641 auto *BLoop = LI.getLoopFor(B->getParent());
6642 if (BLoop && BLoop->getHeader() == B->getParent() &&
6643 BLoop->getLoopPreheader() == A->getParent() &&
6644 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(),
6645 A->getParent()->end()) &&
6646 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(),
6647 B->getIterator()))
6648 return true;
6649 return false;
6650 }
6651
6652
isSCEVExprNeverPoison(const Instruction * I)6653 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
6654 // Only proceed if we can prove that I does not yield poison.
6655 if (!programUndefinedIfPoison(I))
6656 return false;
6657
6658 // At this point we know that if I is executed, then it does not wrap
6659 // according to at least one of NSW or NUW. If I is not executed, then we do
6660 // not know if the calculation that I represents would wrap. Multiple
6661 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6662 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6663 // derived from other instructions that map to the same SCEV. We cannot make
6664 // that guarantee for cases where I is not executed. So we need to find a
6665 // upper bound on the defining scope for the SCEV, and prove that I is
6666 // executed every time we enter that scope. When the bounding scope is a
6667 // loop (the common case), this is equivalent to proving I executes on every
6668 // iteration of that loop.
6669 SmallVector<const SCEV *> SCEVOps;
6670 for (const Use &Op : I->operands()) {
6671 // I could be an extractvalue from a call to an overflow intrinsic.
6672 // TODO: We can do better here in some cases.
6673 if (isSCEVable(Op->getType()))
6674 SCEVOps.push_back(getSCEV(Op));
6675 }
6676 auto *DefI = getDefiningScopeBound(SCEVOps);
6677 return isGuaranteedToTransferExecutionTo(DefI, I);
6678 }
6679
isAddRecNeverPoison(const Instruction * I,const Loop * L)6680 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
6681 // If we know that \c I can never be poison period, then that's enough.
6682 if (isSCEVExprNeverPoison(I))
6683 return true;
6684
6685 // For an add recurrence specifically, we assume that infinite loops without
6686 // side effects are undefined behavior, and then reason as follows:
6687 //
6688 // If the add recurrence is poison in any iteration, it is poison on all
6689 // future iterations (since incrementing poison yields poison). If the result
6690 // of the add recurrence is fed into the loop latch condition and the loop
6691 // does not contain any throws or exiting blocks other than the latch, we now
6692 // have the ability to "choose" whether the backedge is taken or not (by
6693 // choosing a sufficiently evil value for the poison feeding into the branch)
6694 // for every iteration including and after the one in which \p I first became
6695 // poison. There are two possibilities (let's call the iteration in which \p
6696 // I first became poison as K):
6697 //
6698 // 1. In the set of iterations including and after K, the loop body executes
6699 // no side effects. In this case executing the backege an infinte number
6700 // of times will yield undefined behavior.
6701 //
6702 // 2. In the set of iterations including and after K, the loop body executes
6703 // at least one side effect. In this case, that specific instance of side
6704 // effect is control dependent on poison, which also yields undefined
6705 // behavior.
6706
6707 auto *ExitingBB = L->getExitingBlock();
6708 auto *LatchBB = L->getLoopLatch();
6709 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6710 return false;
6711
6712 SmallPtrSet<const Instruction *, 16> Pushed;
6713 SmallVector<const Instruction *, 8> PoisonStack;
6714
6715 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6716 // things that are known to be poison under that assumption go on the
6717 // PoisonStack.
6718 Pushed.insert(I);
6719 PoisonStack.push_back(I);
6720
6721 bool LatchControlDependentOnPoison = false;
6722 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
6723 const Instruction *Poison = PoisonStack.pop_back_val();
6724
6725 for (auto *PoisonUser : Poison->users()) {
6726 if (propagatesPoison(cast<Operator>(PoisonUser))) {
6727 if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
6728 PoisonStack.push_back(cast<Instruction>(PoisonUser));
6729 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6730 assert(BI->isConditional() && "Only possibility!");
6731 if (BI->getParent() == LatchBB) {
6732 LatchControlDependentOnPoison = true;
6733 break;
6734 }
6735 }
6736 }
6737 }
6738
6739 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6740 }
6741
6742 ScalarEvolution::LoopProperties
getLoopProperties(const Loop * L)6743 ScalarEvolution::getLoopProperties(const Loop *L) {
6744 using LoopProperties = ScalarEvolution::LoopProperties;
6745
6746 auto Itr = LoopPropertiesCache.find(L);
6747 if (Itr == LoopPropertiesCache.end()) {
6748 auto HasSideEffects = [](Instruction *I) {
6749 if (auto *SI = dyn_cast<StoreInst>(I))
6750 return !SI->isSimple();
6751
6752 return I->mayThrow() || I->mayWriteToMemory();
6753 };
6754
6755 LoopProperties LP = {/* HasNoAbnormalExits */ true,
6756 /*HasNoSideEffects*/ true};
6757
6758 for (auto *BB : L->getBlocks())
6759 for (auto &I : *BB) {
6760 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6761 LP.HasNoAbnormalExits = false;
6762 if (HasSideEffects(&I))
6763 LP.HasNoSideEffects = false;
6764 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
6765 break; // We're already as pessimistic as we can get.
6766 }
6767
6768 auto InsertPair = LoopPropertiesCache.insert({L, LP});
6769 assert(InsertPair.second && "We just checked!");
6770 Itr = InsertPair.first;
6771 }
6772
6773 return Itr->second;
6774 }
6775
loopIsFiniteByAssumption(const Loop * L)6776 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) {
6777 // A mustprogress loop without side effects must be finite.
6778 // TODO: The check used here is very conservative. It's only *specific*
6779 // side effects which are well defined in infinite loops.
6780 return isMustProgress(L) && loopHasNoSideEffects(L);
6781 }
6782
createSCEV(Value * V)6783 const SCEV *ScalarEvolution::createSCEV(Value *V) {
6784 if (!isSCEVable(V->getType()))
6785 return getUnknown(V);
6786
6787 if (Instruction *I = dyn_cast<Instruction>(V)) {
6788 // Don't attempt to analyze instructions in blocks that aren't
6789 // reachable. Such instructions don't matter, and they aren't required
6790 // to obey basic rules for definitions dominating uses which this
6791 // analysis depends on.
6792 if (!DT.isReachableFromEntry(I->getParent()))
6793 return getUnknown(UndefValue::get(V->getType()));
6794 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
6795 return getConstant(CI);
6796 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
6797 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
6798 else if (!isa<ConstantExpr>(V))
6799 return getUnknown(V);
6800
6801 Operator *U = cast<Operator>(V);
6802 if (auto BO = MatchBinaryOp(U, DT)) {
6803 switch (BO->Opcode) {
6804 case Instruction::Add: {
6805 // The simple thing to do would be to just call getSCEV on both operands
6806 // and call getAddExpr with the result. However if we're looking at a
6807 // bunch of things all added together, this can be quite inefficient,
6808 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6809 // Instead, gather up all the operands and make a single getAddExpr call.
6810 // LLVM IR canonical form means we need only traverse the left operands.
6811 SmallVector<const SCEV *, 4> AddOps;
6812 do {
6813 if (BO->Op) {
6814 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6815 AddOps.push_back(OpSCEV);
6816 break;
6817 }
6818
6819 // If a NUW or NSW flag can be applied to the SCEV for this
6820 // addition, then compute the SCEV for this addition by itself
6821 // with a separate call to getAddExpr. We need to do that
6822 // instead of pushing the operands of the addition onto AddOps,
6823 // since the flags are only known to apply to this particular
6824 // addition - they may not apply to other additions that can be
6825 // formed with operands from AddOps.
6826 const SCEV *RHS = getSCEV(BO->RHS);
6827 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6828 if (Flags != SCEV::FlagAnyWrap) {
6829 const SCEV *LHS = getSCEV(BO->LHS);
6830 if (BO->Opcode == Instruction::Sub)
6831 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
6832 else
6833 AddOps.push_back(getAddExpr(LHS, RHS, Flags));
6834 break;
6835 }
6836 }
6837
6838 if (BO->Opcode == Instruction::Sub)
6839 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
6840 else
6841 AddOps.push_back(getSCEV(BO->RHS));
6842
6843 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6844 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
6845 NewBO->Opcode != Instruction::Sub)) {
6846 AddOps.push_back(getSCEV(BO->LHS));
6847 break;
6848 }
6849 BO = NewBO;
6850 } while (true);
6851
6852 return getAddExpr(AddOps);
6853 }
6854
6855 case Instruction::Mul: {
6856 SmallVector<const SCEV *, 4> MulOps;
6857 do {
6858 if (BO->Op) {
6859 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6860 MulOps.push_back(OpSCEV);
6861 break;
6862 }
6863
6864 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6865 if (Flags != SCEV::FlagAnyWrap) {
6866 MulOps.push_back(
6867 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
6868 break;
6869 }
6870 }
6871
6872 MulOps.push_back(getSCEV(BO->RHS));
6873 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6874 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
6875 MulOps.push_back(getSCEV(BO->LHS));
6876 break;
6877 }
6878 BO = NewBO;
6879 } while (true);
6880
6881 return getMulExpr(MulOps);
6882 }
6883 case Instruction::UDiv:
6884 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6885 case Instruction::URem:
6886 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6887 case Instruction::Sub: {
6888 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6889 if (BO->Op)
6890 Flags = getNoWrapFlagsFromUB(BO->Op);
6891 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
6892 }
6893 case Instruction::And:
6894 // For an expression like x&255 that merely masks off the high bits,
6895 // use zext(trunc(x)) as the SCEV expression.
6896 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6897 if (CI->isZero())
6898 return getSCEV(BO->RHS);
6899 if (CI->isMinusOne())
6900 return getSCEV(BO->LHS);
6901 const APInt &A = CI->getValue();
6902
6903 // Instcombine's ShrinkDemandedConstant may strip bits out of
6904 // constants, obscuring what would otherwise be a low-bits mask.
6905 // Use computeKnownBits to compute what ShrinkDemandedConstant
6906 // knew about to reconstruct a low-bits mask value.
6907 unsigned LZ = A.countLeadingZeros();
6908 unsigned TZ = A.countTrailingZeros();
6909 unsigned BitWidth = A.getBitWidth();
6910 KnownBits Known(BitWidth);
6911 computeKnownBits(BO->LHS, Known, getDataLayout(),
6912 0, &AC, nullptr, &DT);
6913
6914 APInt EffectiveMask =
6915 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
6916 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
6917 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
6918 const SCEV *LHS = getSCEV(BO->LHS);
6919 const SCEV *ShiftedLHS = nullptr;
6920 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
6921 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
6922 // For an expression like (x * 8) & 8, simplify the multiply.
6923 unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
6924 unsigned GCD = std::min(MulZeros, TZ);
6925 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
6926 SmallVector<const SCEV*, 4> MulOps;
6927 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
6928 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
6929 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
6930 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
6931 }
6932 }
6933 if (!ShiftedLHS)
6934 ShiftedLHS = getUDivExpr(LHS, MulCount);
6935 return getMulExpr(
6936 getZeroExtendExpr(
6937 getTruncateExpr(ShiftedLHS,
6938 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
6939 BO->LHS->getType()),
6940 MulCount);
6941 }
6942 }
6943 break;
6944
6945 case Instruction::Or:
6946 // If the RHS of the Or is a constant, we may have something like:
6947 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6948 // optimizations will transparently handle this case.
6949 //
6950 // In order for this transformation to be safe, the LHS must be of the
6951 // form X*(2^n) and the Or constant must be less than 2^n.
6952 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6953 const SCEV *LHS = getSCEV(BO->LHS);
6954 const APInt &CIVal = CI->getValue();
6955 if (GetMinTrailingZeros(LHS) >=
6956 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
6957 // Build a plain add SCEV.
6958 return getAddExpr(LHS, getSCEV(CI),
6959 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
6960 }
6961 }
6962 break;
6963
6964 case Instruction::Xor:
6965 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6966 // If the RHS of xor is -1, then this is a not operation.
6967 if (CI->isMinusOne())
6968 return getNotSCEV(getSCEV(BO->LHS));
6969
6970 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6971 // This is a variant of the check for xor with -1, and it handles
6972 // the case where instcombine has trimmed non-demanded bits out
6973 // of an xor with -1.
6974 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
6975 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
6976 if (LBO->getOpcode() == Instruction::And &&
6977 LCI->getValue() == CI->getValue())
6978 if (const SCEVZeroExtendExpr *Z =
6979 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
6980 Type *UTy = BO->LHS->getType();
6981 const SCEV *Z0 = Z->getOperand();
6982 Type *Z0Ty = Z0->getType();
6983 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
6984
6985 // If C is a low-bits mask, the zero extend is serving to
6986 // mask off the high bits. Complement the operand and
6987 // re-apply the zext.
6988 if (CI->getValue().isMask(Z0TySize))
6989 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
6990
6991 // If C is a single bit, it may be in the sign-bit position
6992 // before the zero-extend. In this case, represent the xor
6993 // using an add, which is equivalent, and re-apply the zext.
6994 APInt Trunc = CI->getValue().trunc(Z0TySize);
6995 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
6996 Trunc.isSignMask())
6997 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
6998 UTy);
6999 }
7000 }
7001 break;
7002
7003 case Instruction::Shl:
7004 // Turn shift left of a constant amount into a multiply.
7005 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
7006 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
7007
7008 // If the shift count is not less than the bitwidth, the result of
7009 // the shift is undefined. Don't try to analyze it, because the
7010 // resolution chosen here may differ from the resolution chosen in
7011 // other parts of the compiler.
7012 if (SA->getValue().uge(BitWidth))
7013 break;
7014
7015 // We can safely preserve the nuw flag in all cases. It's also safe to
7016 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
7017 // requires special handling. It can be preserved as long as we're not
7018 // left shifting by bitwidth - 1.
7019 auto Flags = SCEV::FlagAnyWrap;
7020 if (BO->Op) {
7021 auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
7022 if ((MulFlags & SCEV::FlagNSW) &&
7023 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1)))
7024 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW);
7025 if (MulFlags & SCEV::FlagNUW)
7026 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW);
7027 }
7028
7029 Constant *X = ConstantInt::get(
7030 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
7031 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
7032 }
7033 break;
7034
7035 case Instruction::AShr: {
7036 // AShr X, C, where C is a constant.
7037 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
7038 if (!CI)
7039 break;
7040
7041 Type *OuterTy = BO->LHS->getType();
7042 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
7043 // If the shift count is not less than the bitwidth, the result of
7044 // the shift is undefined. Don't try to analyze it, because the
7045 // resolution chosen here may differ from the resolution chosen in
7046 // other parts of the compiler.
7047 if (CI->getValue().uge(BitWidth))
7048 break;
7049
7050 if (CI->isZero())
7051 return getSCEV(BO->LHS); // shift by zero --> noop
7052
7053 uint64_t AShrAmt = CI->getZExtValue();
7054 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
7055
7056 Operator *L = dyn_cast<Operator>(BO->LHS);
7057 if (L && L->getOpcode() == Instruction::Shl) {
7058 // X = Shl A, n
7059 // Y = AShr X, m
7060 // Both n and m are constant.
7061
7062 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
7063 if (L->getOperand(1) == BO->RHS)
7064 // For a two-shift sext-inreg, i.e. n = m,
7065 // use sext(trunc(x)) as the SCEV expression.
7066 return getSignExtendExpr(
7067 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
7068
7069 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
7070 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
7071 uint64_t ShlAmt = ShlAmtCI->getZExtValue();
7072 if (ShlAmt > AShrAmt) {
7073 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
7074 // expression. We already checked that ShlAmt < BitWidth, so
7075 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
7076 // ShlAmt - AShrAmt < Amt.
7077 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
7078 ShlAmt - AShrAmt);
7079 return getSignExtendExpr(
7080 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
7081 getConstant(Mul)), OuterTy);
7082 }
7083 }
7084 }
7085 break;
7086 }
7087 }
7088 }
7089
7090 switch (U->getOpcode()) {
7091 case Instruction::Trunc:
7092 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
7093
7094 case Instruction::ZExt:
7095 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7096
7097 case Instruction::SExt:
7098 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
7099 // The NSW flag of a subtract does not always survive the conversion to
7100 // A + (-1)*B. By pushing sign extension onto its operands we are much
7101 // more likely to preserve NSW and allow later AddRec optimisations.
7102 //
7103 // NOTE: This is effectively duplicating this logic from getSignExtend:
7104 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
7105 // but by that point the NSW information has potentially been lost.
7106 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
7107 Type *Ty = U->getType();
7108 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
7109 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
7110 return getMinusSCEV(V1, V2, SCEV::FlagNSW);
7111 }
7112 }
7113 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7114
7115 case Instruction::BitCast:
7116 // BitCasts are no-op casts so we just eliminate the cast.
7117 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
7118 return getSCEV(U->getOperand(0));
7119 break;
7120
7121 case Instruction::PtrToInt: {
7122 // Pointer to integer cast is straight-forward, so do model it.
7123 const SCEV *Op = getSCEV(U->getOperand(0));
7124 Type *DstIntTy = U->getType();
7125 // But only if effective SCEV (integer) type is wide enough to represent
7126 // all possible pointer values.
7127 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
7128 if (isa<SCEVCouldNotCompute>(IntOp))
7129 return getUnknown(V);
7130 return IntOp;
7131 }
7132 case Instruction::IntToPtr:
7133 // Just don't deal with inttoptr casts.
7134 return getUnknown(V);
7135
7136 case Instruction::SDiv:
7137 // If both operands are non-negative, this is just an udiv.
7138 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7139 isKnownNonNegative(getSCEV(U->getOperand(1))))
7140 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7141 break;
7142
7143 case Instruction::SRem:
7144 // If both operands are non-negative, this is just an urem.
7145 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7146 isKnownNonNegative(getSCEV(U->getOperand(1))))
7147 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7148 break;
7149
7150 case Instruction::GetElementPtr:
7151 return createNodeForGEP(cast<GEPOperator>(U));
7152
7153 case Instruction::PHI:
7154 return createNodeForPHI(cast<PHINode>(U));
7155
7156 case Instruction::Select:
7157 // U can also be a select constant expr, which let fall through. Since
7158 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
7159 // constant expressions cannot have instructions as operands, we'd have
7160 // returned getUnknown for a select constant expressions anyway.
7161 if (isa<Instruction>(U))
7162 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
7163 U->getOperand(1), U->getOperand(2));
7164 break;
7165
7166 case Instruction::Call:
7167 case Instruction::Invoke:
7168 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
7169 return getSCEV(RV);
7170
7171 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
7172 switch (II->getIntrinsicID()) {
7173 case Intrinsic::abs:
7174 return getAbsExpr(
7175 getSCEV(II->getArgOperand(0)),
7176 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
7177 case Intrinsic::umax:
7178 return getUMaxExpr(getSCEV(II->getArgOperand(0)),
7179 getSCEV(II->getArgOperand(1)));
7180 case Intrinsic::umin:
7181 return getUMinExpr(getSCEV(II->getArgOperand(0)),
7182 getSCEV(II->getArgOperand(1)));
7183 case Intrinsic::smax:
7184 return getSMaxExpr(getSCEV(II->getArgOperand(0)),
7185 getSCEV(II->getArgOperand(1)));
7186 case Intrinsic::smin:
7187 return getSMinExpr(getSCEV(II->getArgOperand(0)),
7188 getSCEV(II->getArgOperand(1)));
7189 case Intrinsic::usub_sat: {
7190 const SCEV *X = getSCEV(II->getArgOperand(0));
7191 const SCEV *Y = getSCEV(II->getArgOperand(1));
7192 const SCEV *ClampedY = getUMinExpr(X, Y);
7193 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
7194 }
7195 case Intrinsic::uadd_sat: {
7196 const SCEV *X = getSCEV(II->getArgOperand(0));
7197 const SCEV *Y = getSCEV(II->getArgOperand(1));
7198 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
7199 return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
7200 }
7201 case Intrinsic::start_loop_iterations:
7202 // A start_loop_iterations is just equivalent to the first operand for
7203 // SCEV purposes.
7204 return getSCEV(II->getArgOperand(0));
7205 default:
7206 break;
7207 }
7208 }
7209 break;
7210 }
7211
7212 return getUnknown(V);
7213 }
7214
7215 //===----------------------------------------------------------------------===//
7216 // Iteration Count Computation Code
7217 //
7218
getTripCountFromExitCount(const SCEV * ExitCount,bool Extend)7219 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount,
7220 bool Extend) {
7221 if (isa<SCEVCouldNotCompute>(ExitCount))
7222 return getCouldNotCompute();
7223
7224 auto *ExitCountType = ExitCount->getType();
7225 assert(ExitCountType->isIntegerTy());
7226
7227 if (!Extend)
7228 return getAddExpr(ExitCount, getOne(ExitCountType));
7229
7230 auto *WiderType = Type::getIntNTy(ExitCountType->getContext(),
7231 1 + ExitCountType->getScalarSizeInBits());
7232 return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType),
7233 getOne(WiderType));
7234 }
7235
getConstantTripCount(const SCEVConstant * ExitCount)7236 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
7237 if (!ExitCount)
7238 return 0;
7239
7240 ConstantInt *ExitConst = ExitCount->getValue();
7241
7242 // Guard against huge trip counts.
7243 if (ExitConst->getValue().getActiveBits() > 32)
7244 return 0;
7245
7246 // In case of integer overflow, this returns 0, which is correct.
7247 return ((unsigned)ExitConst->getZExtValue()) + 1;
7248 }
7249
getSmallConstantTripCount(const Loop * L)7250 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
7251 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact));
7252 return getConstantTripCount(ExitCount);
7253 }
7254
7255 unsigned
getSmallConstantTripCount(const Loop * L,const BasicBlock * ExitingBlock)7256 ScalarEvolution::getSmallConstantTripCount(const Loop *L,
7257 const BasicBlock *ExitingBlock) {
7258 assert(ExitingBlock && "Must pass a non-null exiting block!");
7259 assert(L->isLoopExiting(ExitingBlock) &&
7260 "Exiting block must actually branch out of the loop!");
7261 const SCEVConstant *ExitCount =
7262 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
7263 return getConstantTripCount(ExitCount);
7264 }
7265
getSmallConstantMaxTripCount(const Loop * L)7266 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
7267 const auto *MaxExitCount =
7268 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
7269 return getConstantTripCount(MaxExitCount);
7270 }
7271
getSmallConstantTripMultiple(const Loop * L)7272 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
7273 SmallVector<BasicBlock *, 8> ExitingBlocks;
7274 L->getExitingBlocks(ExitingBlocks);
7275
7276 Optional<unsigned> Res = None;
7277 for (auto *ExitingBB : ExitingBlocks) {
7278 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB);
7279 if (!Res)
7280 Res = Multiple;
7281 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple);
7282 }
7283 return Res.getValueOr(1);
7284 }
7285
getSmallConstantTripMultiple(const Loop * L,const SCEV * ExitCount)7286 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7287 const SCEV *ExitCount) {
7288 if (ExitCount == getCouldNotCompute())
7289 return 1;
7290
7291 // Get the trip count
7292 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount);
7293
7294 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
7295 if (!TC)
7296 // Attempt to factor more general cases. Returns the greatest power of
7297 // two divisor. If overflow happens, the trip count expression is still
7298 // divisible by the greatest power of 2 divisor returned.
7299 return 1U << std::min((uint32_t)31,
7300 GetMinTrailingZeros(applyLoopGuards(TCExpr, L)));
7301
7302 ConstantInt *Result = TC->getValue();
7303
7304 // Guard against huge trip counts (this requires checking
7305 // for zero to handle the case where the trip count == -1 and the
7306 // addition wraps).
7307 if (!Result || Result->getValue().getActiveBits() > 32 ||
7308 Result->getValue().getActiveBits() == 0)
7309 return 1;
7310
7311 return (unsigned)Result->getZExtValue();
7312 }
7313
7314 /// Returns the largest constant divisor of the trip count of this loop as a
7315 /// normal unsigned value, if possible. This means that the actual trip count is
7316 /// always a multiple of the returned value (don't forget the trip count could
7317 /// very well be zero as well!).
7318 ///
7319 /// Returns 1 if the trip count is unknown or not guaranteed to be the
7320 /// multiple of a constant (which is also the case if the trip count is simply
7321 /// constant, use getSmallConstantTripCount for that case), Will also return 1
7322 /// if the trip count is very large (>= 2^32).
7323 ///
7324 /// As explained in the comments for getSmallConstantTripCount, this assumes
7325 /// that control exits the loop via ExitingBlock.
7326 unsigned
getSmallConstantTripMultiple(const Loop * L,const BasicBlock * ExitingBlock)7327 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7328 const BasicBlock *ExitingBlock) {
7329 assert(ExitingBlock && "Must pass a non-null exiting block!");
7330 assert(L->isLoopExiting(ExitingBlock) &&
7331 "Exiting block must actually branch out of the loop!");
7332 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
7333 return getSmallConstantTripMultiple(L, ExitCount);
7334 }
7335
getExitCount(const Loop * L,const BasicBlock * ExitingBlock,ExitCountKind Kind)7336 const SCEV *ScalarEvolution::getExitCount(const Loop *L,
7337 const BasicBlock *ExitingBlock,
7338 ExitCountKind Kind) {
7339 switch (Kind) {
7340 case Exact:
7341 case SymbolicMaximum:
7342 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
7343 case ConstantMaximum:
7344 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
7345 };
7346 llvm_unreachable("Invalid ExitCountKind!");
7347 }
7348
7349 const SCEV *
getPredicatedBackedgeTakenCount(const Loop * L,SCEVUnionPredicate & Preds)7350 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
7351 SCEVUnionPredicate &Preds) {
7352 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
7353 }
7354
getBackedgeTakenCount(const Loop * L,ExitCountKind Kind)7355 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
7356 ExitCountKind Kind) {
7357 switch (Kind) {
7358 case Exact:
7359 return getBackedgeTakenInfo(L).getExact(L, this);
7360 case ConstantMaximum:
7361 return getBackedgeTakenInfo(L).getConstantMax(this);
7362 case SymbolicMaximum:
7363 return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
7364 };
7365 llvm_unreachable("Invalid ExitCountKind!");
7366 }
7367
isBackedgeTakenCountMaxOrZero(const Loop * L)7368 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
7369 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
7370 }
7371
7372 /// Push PHI nodes in the header of the given loop onto the given Worklist.
7373 static void
PushLoopPHIs(const Loop * L,SmallVectorImpl<Instruction * > & Worklist)7374 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
7375 BasicBlock *Header = L->getHeader();
7376
7377 // Push all Loop-header PHIs onto the Worklist stack.
7378 for (PHINode &PN : Header->phis())
7379 Worklist.push_back(&PN);
7380 }
7381
7382 const ScalarEvolution::BackedgeTakenInfo &
getPredicatedBackedgeTakenInfo(const Loop * L)7383 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
7384 auto &BTI = getBackedgeTakenInfo(L);
7385 if (BTI.hasFullInfo())
7386 return BTI;
7387
7388 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7389
7390 if (!Pair.second)
7391 return Pair.first->second;
7392
7393 BackedgeTakenInfo Result =
7394 computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
7395
7396 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
7397 }
7398
7399 ScalarEvolution::BackedgeTakenInfo &
getBackedgeTakenInfo(const Loop * L)7400 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
7401 // Initially insert an invalid entry for this loop. If the insertion
7402 // succeeds, proceed to actually compute a backedge-taken count and
7403 // update the value. The temporary CouldNotCompute value tells SCEV
7404 // code elsewhere that it shouldn't attempt to request a new
7405 // backedge-taken count, which could result in infinite recursion.
7406 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
7407 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7408 if (!Pair.second)
7409 return Pair.first->second;
7410
7411 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
7412 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
7413 // must be cleared in this scope.
7414 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
7415
7416 // In product build, there are no usage of statistic.
7417 (void)NumTripCountsComputed;
7418 (void)NumTripCountsNotComputed;
7419 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
7420 const SCEV *BEExact = Result.getExact(L, this);
7421 if (BEExact != getCouldNotCompute()) {
7422 assert(isLoopInvariant(BEExact, L) &&
7423 isLoopInvariant(Result.getConstantMax(this), L) &&
7424 "Computed backedge-taken count isn't loop invariant for loop!");
7425 ++NumTripCountsComputed;
7426 } else if (Result.getConstantMax(this) == getCouldNotCompute() &&
7427 isa<PHINode>(L->getHeader()->begin())) {
7428 // Only count loops that have phi nodes as not being computable.
7429 ++NumTripCountsNotComputed;
7430 }
7431 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
7432
7433 // Now that we know more about the trip count for this loop, forget any
7434 // existing SCEV values for PHI nodes in this loop since they are only
7435 // conservative estimates made without the benefit of trip count
7436 // information. This is similar to the code in forgetLoop, except that
7437 // it handles SCEVUnknown PHI nodes specially.
7438 if (Result.hasAnyInfo()) {
7439 SmallVector<Instruction *, 16> Worklist;
7440 PushLoopPHIs(L, Worklist);
7441
7442 SmallPtrSet<Instruction *, 8> Discovered;
7443 while (!Worklist.empty()) {
7444 Instruction *I = Worklist.pop_back_val();
7445
7446 ValueExprMapType::iterator It =
7447 ValueExprMap.find_as(static_cast<Value *>(I));
7448 if (It != ValueExprMap.end()) {
7449 const SCEV *Old = It->second;
7450
7451 // SCEVUnknown for a PHI either means that it has an unrecognized
7452 // structure, or it's a PHI that's in the progress of being computed
7453 // by createNodeForPHI. In the former case, additional loop trip
7454 // count information isn't going to change anything. In the later
7455 // case, createNodeForPHI will perform the necessary updates on its
7456 // own when it gets to that point.
7457 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
7458 eraseValueFromMap(It->first);
7459 forgetMemoizedResults(Old);
7460 }
7461 if (PHINode *PN = dyn_cast<PHINode>(I))
7462 ConstantEvolutionLoopExitValue.erase(PN);
7463 }
7464
7465 // Since we don't need to invalidate anything for correctness and we're
7466 // only invalidating to make SCEV's results more precise, we get to stop
7467 // early to avoid invalidating too much. This is especially important in
7468 // cases like:
7469 //
7470 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
7471 // loop0:
7472 // %pn0 = phi
7473 // ...
7474 // loop1:
7475 // %pn1 = phi
7476 // ...
7477 //
7478 // where both loop0 and loop1's backedge taken count uses the SCEV
7479 // expression for %v. If we don't have the early stop below then in cases
7480 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
7481 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
7482 // count for loop1, effectively nullifying SCEV's trip count cache.
7483 for (auto *U : I->users())
7484 if (auto *I = dyn_cast<Instruction>(U)) {
7485 auto *LoopForUser = LI.getLoopFor(I->getParent());
7486 if (LoopForUser && L->contains(LoopForUser) &&
7487 Discovered.insert(I).second)
7488 Worklist.push_back(I);
7489 }
7490 }
7491 }
7492
7493 // Re-lookup the insert position, since the call to
7494 // computeBackedgeTakenCount above could result in a
7495 // recusive call to getBackedgeTakenInfo (on a different
7496 // loop), which would invalidate the iterator computed
7497 // earlier.
7498 return BackedgeTakenCounts.find(L)->second = std::move(Result);
7499 }
7500
forgetAllLoops()7501 void ScalarEvolution::forgetAllLoops() {
7502 // This method is intended to forget all info about loops. It should
7503 // invalidate caches as if the following happened:
7504 // - The trip counts of all loops have changed arbitrarily
7505 // - Every llvm::Value has been updated in place to produce a different
7506 // result.
7507 BackedgeTakenCounts.clear();
7508 PredicatedBackedgeTakenCounts.clear();
7509 LoopPropertiesCache.clear();
7510 ConstantEvolutionLoopExitValue.clear();
7511 ValueExprMap.clear();
7512 ValuesAtScopes.clear();
7513 LoopDispositions.clear();
7514 BlockDispositions.clear();
7515 UnsignedRanges.clear();
7516 SignedRanges.clear();
7517 ExprValueMap.clear();
7518 HasRecMap.clear();
7519 MinTrailingZerosCache.clear();
7520 PredicatedSCEVRewrites.clear();
7521 }
7522
forgetLoop(const Loop * L)7523 void ScalarEvolution::forgetLoop(const Loop *L) {
7524 SmallVector<const Loop *, 16> LoopWorklist(1, L);
7525 SmallVector<Instruction *, 32> Worklist;
7526 SmallPtrSet<Instruction *, 16> Visited;
7527
7528 // Iterate over all the loops and sub-loops to drop SCEV information.
7529 while (!LoopWorklist.empty()) {
7530 auto *CurrL = LoopWorklist.pop_back_val();
7531
7532 // Drop any stored trip count value.
7533 BackedgeTakenCounts.erase(CurrL);
7534 PredicatedBackedgeTakenCounts.erase(CurrL);
7535
7536 // Drop information about predicated SCEV rewrites for this loop.
7537 for (auto I = PredicatedSCEVRewrites.begin();
7538 I != PredicatedSCEVRewrites.end();) {
7539 std::pair<const SCEV *, const Loop *> Entry = I->first;
7540 if (Entry.second == CurrL)
7541 PredicatedSCEVRewrites.erase(I++);
7542 else
7543 ++I;
7544 }
7545
7546 auto LoopUsersItr = LoopUsers.find(CurrL);
7547 if (LoopUsersItr != LoopUsers.end()) {
7548 for (auto *S : LoopUsersItr->second)
7549 forgetMemoizedResults(S);
7550 LoopUsers.erase(LoopUsersItr);
7551 }
7552
7553 // Drop information about expressions based on loop-header PHIs.
7554 PushLoopPHIs(CurrL, Worklist);
7555
7556 while (!Worklist.empty()) {
7557 Instruction *I = Worklist.pop_back_val();
7558 if (!Visited.insert(I).second)
7559 continue;
7560
7561 ValueExprMapType::iterator It =
7562 ValueExprMap.find_as(static_cast<Value *>(I));
7563 if (It != ValueExprMap.end()) {
7564 eraseValueFromMap(It->first);
7565 forgetMemoizedResults(It->second);
7566 if (PHINode *PN = dyn_cast<PHINode>(I))
7567 ConstantEvolutionLoopExitValue.erase(PN);
7568 }
7569
7570 PushDefUseChildren(I, Worklist);
7571 }
7572
7573 LoopPropertiesCache.erase(CurrL);
7574 // Forget all contained loops too, to avoid dangling entries in the
7575 // ValuesAtScopes map.
7576 LoopWorklist.append(CurrL->begin(), CurrL->end());
7577 }
7578 }
7579
forgetTopmostLoop(const Loop * L)7580 void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
7581 while (Loop *Parent = L->getParentLoop())
7582 L = Parent;
7583 forgetLoop(L);
7584 }
7585
forgetValue(Value * V)7586 void ScalarEvolution::forgetValue(Value *V) {
7587 Instruction *I = dyn_cast<Instruction>(V);
7588 if (!I) return;
7589
7590 // Drop information about expressions based on loop-header PHIs.
7591 SmallVector<Instruction *, 16> Worklist;
7592 Worklist.push_back(I);
7593
7594 SmallPtrSet<Instruction *, 8> Visited;
7595 while (!Worklist.empty()) {
7596 I = Worklist.pop_back_val();
7597 if (!Visited.insert(I).second)
7598 continue;
7599
7600 ValueExprMapType::iterator It =
7601 ValueExprMap.find_as(static_cast<Value *>(I));
7602 if (It != ValueExprMap.end()) {
7603 eraseValueFromMap(It->first);
7604 forgetMemoizedResults(It->second);
7605 if (PHINode *PN = dyn_cast<PHINode>(I))
7606 ConstantEvolutionLoopExitValue.erase(PN);
7607 }
7608
7609 PushDefUseChildren(I, Worklist);
7610 }
7611 }
7612
forgetLoopDispositions(const Loop * L)7613 void ScalarEvolution::forgetLoopDispositions(const Loop *L) {
7614 LoopDispositions.clear();
7615 }
7616
7617 /// Get the exact loop backedge taken count considering all loop exits. A
7618 /// computable result can only be returned for loops with all exiting blocks
7619 /// dominating the latch. howFarToZero assumes that the limit of each loop test
7620 /// is never skipped. This is a valid assumption as long as the loop exits via
7621 /// that test. For precise results, it is the caller's responsibility to specify
7622 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
7623 const SCEV *
getExact(const Loop * L,ScalarEvolution * SE,SCEVUnionPredicate * Preds) const7624 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
7625 SCEVUnionPredicate *Preds) const {
7626 // If any exits were not computable, the loop is not computable.
7627 if (!isComplete() || ExitNotTaken.empty())
7628 return SE->getCouldNotCompute();
7629
7630 const BasicBlock *Latch = L->getLoopLatch();
7631 // All exiting blocks we have collected must dominate the only backedge.
7632 if (!Latch)
7633 return SE->getCouldNotCompute();
7634
7635 // All exiting blocks we have gathered dominate loop's latch, so exact trip
7636 // count is simply a minimum out of all these calculated exit counts.
7637 SmallVector<const SCEV *, 2> Ops;
7638 for (auto &ENT : ExitNotTaken) {
7639 const SCEV *BECount = ENT.ExactNotTaken;
7640 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
7641 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
7642 "We should only have known counts for exiting blocks that dominate "
7643 "latch!");
7644
7645 Ops.push_back(BECount);
7646
7647 if (Preds && !ENT.hasAlwaysTruePredicate())
7648 Preds->add(ENT.Predicate.get());
7649
7650 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
7651 "Predicate should be always true!");
7652 }
7653
7654 return SE->getUMinFromMismatchedTypes(Ops);
7655 }
7656
7657 /// Get the exact not taken count for this loop exit.
7658 const SCEV *
getExact(const BasicBlock * ExitingBlock,ScalarEvolution * SE) const7659 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
7660 ScalarEvolution *SE) const {
7661 for (auto &ENT : ExitNotTaken)
7662 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7663 return ENT.ExactNotTaken;
7664
7665 return SE->getCouldNotCompute();
7666 }
7667
getConstantMax(const BasicBlock * ExitingBlock,ScalarEvolution * SE) const7668 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
7669 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
7670 for (auto &ENT : ExitNotTaken)
7671 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7672 return ENT.MaxNotTaken;
7673
7674 return SE->getCouldNotCompute();
7675 }
7676
7677 /// getConstantMax - Get the constant max backedge taken count for the loop.
7678 const SCEV *
getConstantMax(ScalarEvolution * SE) const7679 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
7680 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7681 return !ENT.hasAlwaysTruePredicate();
7682 };
7683
7684 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax())
7685 return SE->getCouldNotCompute();
7686
7687 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
7688 isa<SCEVConstant>(getConstantMax())) &&
7689 "No point in having a non-constant max backedge taken count!");
7690 return getConstantMax();
7691 }
7692
7693 const SCEV *
getSymbolicMax(const Loop * L,ScalarEvolution * SE)7694 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
7695 ScalarEvolution *SE) {
7696 if (!SymbolicMax)
7697 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
7698 return SymbolicMax;
7699 }
7700
isConstantMaxOrZero(ScalarEvolution * SE) const7701 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
7702 ScalarEvolution *SE) const {
7703 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7704 return !ENT.hasAlwaysTruePredicate();
7705 };
7706 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
7707 }
7708
hasOperand(const SCEV * S) const7709 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const {
7710 return Operands.contains(S);
7711 }
7712
ExitLimit(const SCEV * E)7713 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
7714 : ExitLimit(E, E, false, None) {
7715 }
7716
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero,ArrayRef<const SmallPtrSetImpl<const SCEVPredicate * > * > PredSetList)7717 ScalarEvolution::ExitLimit::ExitLimit(
7718 const SCEV *E, const SCEV *M, bool MaxOrZero,
7719 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
7720 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
7721 // If we prove the max count is zero, so is the symbolic bound. This happens
7722 // in practice due to differences in a) how context sensitive we've chosen
7723 // to be and b) how we reason about bounds impied by UB.
7724 if (MaxNotTaken->isZero())
7725 ExactNotTaken = MaxNotTaken;
7726
7727 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
7728 !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
7729 "Exact is not allowed to be less precise than Max");
7730 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7731 isa<SCEVConstant>(MaxNotTaken)) &&
7732 "No point in having a non-constant max backedge taken count!");
7733 for (auto *PredSet : PredSetList)
7734 for (auto *P : *PredSet)
7735 addPredicate(P);
7736 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&
7737 "Backedge count should be int");
7738 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) &&
7739 "Max backedge count should be int");
7740 }
7741
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero,const SmallPtrSetImpl<const SCEVPredicate * > & PredSet)7742 ScalarEvolution::ExitLimit::ExitLimit(
7743 const SCEV *E, const SCEV *M, bool MaxOrZero,
7744 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
7745 : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
7746 }
7747
ExitLimit(const SCEV * E,const SCEV * M,bool MaxOrZero)7748 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
7749 bool MaxOrZero)
7750 : ExitLimit(E, M, MaxOrZero, None) {
7751 }
7752
7753 class SCEVRecordOperands {
7754 SmallPtrSetImpl<const SCEV *> &Operands;
7755
7756 public:
SCEVRecordOperands(SmallPtrSetImpl<const SCEV * > & Operands)7757 SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands)
7758 : Operands(Operands) {}
follow(const SCEV * S)7759 bool follow(const SCEV *S) {
7760 Operands.insert(S);
7761 return true;
7762 }
isDone()7763 bool isDone() { return false; }
7764 };
7765
7766 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
7767 /// computable exit into a persistent ExitNotTakenInfo array.
BackedgeTakenInfo(ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,bool IsComplete,const SCEV * ConstantMax,bool MaxOrZero)7768 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
7769 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
7770 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
7771 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
7772 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7773
7774 ExitNotTaken.reserve(ExitCounts.size());
7775 std::transform(
7776 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
7777 [&](const EdgeExitInfo &EEI) {
7778 BasicBlock *ExitBB = EEI.first;
7779 const ExitLimit &EL = EEI.second;
7780 if (EL.Predicates.empty())
7781 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7782 nullptr);
7783
7784 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate);
7785 for (auto *Pred : EL.Predicates)
7786 Predicate->add(Pred);
7787
7788 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7789 std::move(Predicate));
7790 });
7791 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
7792 isa<SCEVConstant>(ConstantMax)) &&
7793 "No point in having a non-constant max backedge taken count!");
7794
7795 SCEVRecordOperands RecordOperands(Operands);
7796 SCEVTraversal<SCEVRecordOperands> ST(RecordOperands);
7797 if (!isa<SCEVCouldNotCompute>(ConstantMax))
7798 ST.visitAll(ConstantMax);
7799 for (auto &ENT : ExitNotTaken)
7800 if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken))
7801 ST.visitAll(ENT.ExactNotTaken);
7802 }
7803
7804 /// Compute the number of times the backedge of the specified loop will execute.
7805 ScalarEvolution::BackedgeTakenInfo
computeBackedgeTakenCount(const Loop * L,bool AllowPredicates)7806 ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
7807 bool AllowPredicates) {
7808 SmallVector<BasicBlock *, 8> ExitingBlocks;
7809 L->getExitingBlocks(ExitingBlocks);
7810
7811 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7812
7813 SmallVector<EdgeExitInfo, 4> ExitCounts;
7814 bool CouldComputeBECount = true;
7815 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
7816 const SCEV *MustExitMaxBECount = nullptr;
7817 const SCEV *MayExitMaxBECount = nullptr;
7818 bool MustExitMaxOrZero = false;
7819
7820 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7821 // and compute maxBECount.
7822 // Do a union of all the predicates here.
7823 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
7824 BasicBlock *ExitBB = ExitingBlocks[i];
7825
7826 // We canonicalize untaken exits to br (constant), ignore them so that
7827 // proving an exit untaken doesn't negatively impact our ability to reason
7828 // about the loop as whole.
7829 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
7830 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
7831 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7832 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne()))
7833 continue;
7834 }
7835
7836 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
7837
7838 assert((AllowPredicates || EL.Predicates.empty()) &&
7839 "Predicated exit limit when predicates are not allowed!");
7840
7841 // 1. For each exit that can be computed, add an entry to ExitCounts.
7842 // CouldComputeBECount is true only if all exits can be computed.
7843 if (EL.ExactNotTaken == getCouldNotCompute())
7844 // We couldn't compute an exact value for this exit, so
7845 // we won't be able to compute an exact value for the loop.
7846 CouldComputeBECount = false;
7847 else
7848 ExitCounts.emplace_back(ExitBB, EL);
7849
7850 // 2. Derive the loop's MaxBECount from each exit's max number of
7851 // non-exiting iterations. Partition the loop exits into two kinds:
7852 // LoopMustExits and LoopMayExits.
7853 //
7854 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7855 // is a LoopMayExit. If any computable LoopMustExit is found, then
7856 // MaxBECount is the minimum EL.MaxNotTaken of computable
7857 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7858 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7859 // computable EL.MaxNotTaken.
7860 if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
7861 DT.dominates(ExitBB, Latch)) {
7862 if (!MustExitMaxBECount) {
7863 MustExitMaxBECount = EL.MaxNotTaken;
7864 MustExitMaxOrZero = EL.MaxOrZero;
7865 } else {
7866 MustExitMaxBECount =
7867 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
7868 }
7869 } else if (MayExitMaxBECount != getCouldNotCompute()) {
7870 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
7871 MayExitMaxBECount = EL.MaxNotTaken;
7872 else {
7873 MayExitMaxBECount =
7874 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
7875 }
7876 }
7877 }
7878 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
7879 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
7880 // The loop backedge will be taken the maximum or zero times if there's
7881 // a single exit that must be taken the maximum or zero times.
7882 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
7883 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
7884 MaxBECount, MaxOrZero);
7885 }
7886
7887 ScalarEvolution::ExitLimit
computeExitLimit(const Loop * L,BasicBlock * ExitingBlock,bool AllowPredicates)7888 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
7889 bool AllowPredicates) {
7890 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
7891 // If our exiting block does not dominate the latch, then its connection with
7892 // loop's exit limit may be far from trivial.
7893 const BasicBlock *Latch = L->getLoopLatch();
7894 if (!Latch || !DT.dominates(ExitingBlock, Latch))
7895 return getCouldNotCompute();
7896
7897 bool IsOnlyExit = (L->getExitingBlock() != nullptr);
7898 Instruction *Term = ExitingBlock->getTerminator();
7899 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
7900 assert(BI->isConditional() && "If unconditional, it can't be in loop!");
7901 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7902 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
7903 "It should have one successor in loop and one exit block!");
7904 // Proceed to the next level to examine the exit condition expression.
7905 return computeExitLimitFromCond(
7906 L, BI->getCondition(), ExitIfTrue,
7907 /*ControlsExit=*/IsOnlyExit, AllowPredicates);
7908 }
7909
7910 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
7911 // For switch, make sure that there is a single exit from the loop.
7912 BasicBlock *Exit = nullptr;
7913 for (auto *SBB : successors(ExitingBlock))
7914 if (!L->contains(SBB)) {
7915 if (Exit) // Multiple exit successors.
7916 return getCouldNotCompute();
7917 Exit = SBB;
7918 }
7919 assert(Exit && "Exiting block must have at least one exit");
7920 return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
7921 /*ControlsExit=*/IsOnlyExit);
7922 }
7923
7924 return getCouldNotCompute();
7925 }
7926
computeExitLimitFromCond(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7927 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
7928 const Loop *L, Value *ExitCond, bool ExitIfTrue,
7929 bool ControlsExit, bool AllowPredicates) {
7930 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
7931 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
7932 ControlsExit, AllowPredicates);
7933 }
7934
7935 Optional<ScalarEvolution::ExitLimit>
find(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7936 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
7937 bool ExitIfTrue, bool ControlsExit,
7938 bool AllowPredicates) {
7939 (void)this->L;
7940 (void)this->ExitIfTrue;
7941 (void)this->AllowPredicates;
7942
7943 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7944 this->AllowPredicates == AllowPredicates &&
7945 "Variance in assumed invariant key components!");
7946 auto Itr = TripCountMap.find({ExitCond, ControlsExit});
7947 if (Itr == TripCountMap.end())
7948 return None;
7949 return Itr->second;
7950 }
7951
insert(const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates,const ExitLimit & EL)7952 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
7953 bool ExitIfTrue,
7954 bool ControlsExit,
7955 bool AllowPredicates,
7956 const ExitLimit &EL) {
7957 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7958 this->AllowPredicates == AllowPredicates &&
7959 "Variance in assumed invariant key components!");
7960
7961 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
7962 assert(InsertResult.second && "Expected successful insertion!");
7963 (void)InsertResult;
7964 (void)ExitIfTrue;
7965 }
7966
computeExitLimitFromCondCached(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7967 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
7968 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7969 bool ControlsExit, bool AllowPredicates) {
7970
7971 if (auto MaybeEL =
7972 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7973 return *MaybeEL;
7974
7975 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
7976 ControlsExit, AllowPredicates);
7977 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
7978 return EL;
7979 }
7980
computeExitLimitFromCondImpl(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)7981 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
7982 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7983 bool ControlsExit, bool AllowPredicates) {
7984 // Handle BinOp conditions (And, Or).
7985 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
7986 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7987 return *LimitFromBinOp;
7988
7989 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7990 // Proceed to the next level to examine the icmp.
7991 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
7992 ExitLimit EL =
7993 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
7994 if (EL.hasFullInfo() || !AllowPredicates)
7995 return EL;
7996
7997 // Try again, but use SCEV predicates this time.
7998 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
7999 /*AllowPredicates=*/true);
8000 }
8001
8002 // Check for a constant condition. These are normally stripped out by
8003 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
8004 // preserve the CFG and is temporarily leaving constant conditions
8005 // in place.
8006 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
8007 if (ExitIfTrue == !CI->getZExtValue())
8008 // The backedge is always taken.
8009 return getCouldNotCompute();
8010 else
8011 // The backedge is never taken.
8012 return getZero(CI->getType());
8013 }
8014
8015 // If it's not an integer or pointer comparison then compute it the hard way.
8016 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8017 }
8018
8019 Optional<ScalarEvolution::ExitLimit>
computeExitLimitFromCondFromBinOp(ExitLimitCacheTy & Cache,const Loop * L,Value * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)8020 ScalarEvolution::computeExitLimitFromCondFromBinOp(
8021 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
8022 bool ControlsExit, bool AllowPredicates) {
8023 // Check if the controlling expression for this loop is an And or Or.
8024 Value *Op0, *Op1;
8025 bool IsAnd = false;
8026 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
8027 IsAnd = true;
8028 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
8029 IsAnd = false;
8030 else
8031 return None;
8032
8033 // EitherMayExit is true in these two cases:
8034 // br (and Op0 Op1), loop, exit
8035 // br (or Op0 Op1), exit, loop
8036 bool EitherMayExit = IsAnd ^ ExitIfTrue;
8037 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue,
8038 ControlsExit && !EitherMayExit,
8039 AllowPredicates);
8040 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue,
8041 ControlsExit && !EitherMayExit,
8042 AllowPredicates);
8043
8044 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement"
8045 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd);
8046 if (isa<ConstantInt>(Op1))
8047 return Op1 == NeutralElement ? EL0 : EL1;
8048 if (isa<ConstantInt>(Op0))
8049 return Op0 == NeutralElement ? EL1 : EL0;
8050
8051 const SCEV *BECount = getCouldNotCompute();
8052 const SCEV *MaxBECount = getCouldNotCompute();
8053 if (EitherMayExit) {
8054 // Both conditions must be same for the loop to continue executing.
8055 // Choose the less conservative count.
8056 // If ExitCond is a short-circuit form (select), using
8057 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general.
8058 // To see the detailed examples, please see
8059 // test/Analysis/ScalarEvolution/exit-count-select.ll
8060 bool PoisonSafe = isa<BinaryOperator>(ExitCond);
8061 if (!PoisonSafe)
8062 // Even if ExitCond is select, we can safely derive BECount using both
8063 // EL0 and EL1 in these cases:
8064 // (1) EL0.ExactNotTaken is non-zero
8065 // (2) EL1.ExactNotTaken is non-poison
8066 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and
8067 // it cannot be umin(0, ..))
8068 // The PoisonSafe assignment below is simplified and the assertion after
8069 // BECount calculation fully guarantees the condition (3).
8070 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) ||
8071 isa<SCEVConstant>(EL1.ExactNotTaken);
8072 if (EL0.ExactNotTaken != getCouldNotCompute() &&
8073 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) {
8074 BECount =
8075 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
8076
8077 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form,
8078 // it should have been simplified to zero (see the condition (3) above)
8079 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() ||
8080 BECount->isZero());
8081 }
8082 if (EL0.MaxNotTaken == getCouldNotCompute())
8083 MaxBECount = EL1.MaxNotTaken;
8084 else if (EL1.MaxNotTaken == getCouldNotCompute())
8085 MaxBECount = EL0.MaxNotTaken;
8086 else
8087 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
8088 } else {
8089 // Both conditions must be same at the same time for the loop to exit.
8090 // For now, be conservative.
8091 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
8092 BECount = EL0.ExactNotTaken;
8093 }
8094
8095 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
8096 // to be more aggressive when computing BECount than when computing
8097 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
8098 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
8099 // to not.
8100 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
8101 !isa<SCEVCouldNotCompute>(BECount))
8102 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
8103
8104 return ExitLimit(BECount, MaxBECount, false,
8105 { &EL0.Predicates, &EL1.Predicates });
8106 }
8107
8108 ScalarEvolution::ExitLimit
computeExitLimitFromICmp(const Loop * L,ICmpInst * ExitCond,bool ExitIfTrue,bool ControlsExit,bool AllowPredicates)8109 ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
8110 ICmpInst *ExitCond,
8111 bool ExitIfTrue,
8112 bool ControlsExit,
8113 bool AllowPredicates) {
8114 // If the condition was exit on true, convert the condition to exit on false
8115 ICmpInst::Predicate Pred;
8116 if (!ExitIfTrue)
8117 Pred = ExitCond->getPredicate();
8118 else
8119 Pred = ExitCond->getInversePredicate();
8120 const ICmpInst::Predicate OriginalPred = Pred;
8121
8122 // Handle common loops like: for (X = "string"; *X; ++X)
8123 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
8124 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
8125 ExitLimit ItCnt =
8126 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
8127 if (ItCnt.hasAnyInfo())
8128 return ItCnt;
8129 }
8130
8131 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
8132 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
8133
8134 // Try to evaluate any dependencies out of the loop.
8135 LHS = getSCEVAtScope(LHS, L);
8136 RHS = getSCEVAtScope(RHS, L);
8137
8138 // At this point, we would like to compute how many iterations of the
8139 // loop the predicate will return true for these inputs.
8140 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
8141 // If there is a loop-invariant, force it into the RHS.
8142 std::swap(LHS, RHS);
8143 Pred = ICmpInst::getSwappedPredicate(Pred);
8144 }
8145
8146 // Simplify the operands before analyzing them.
8147 (void)SimplifyICmpOperands(Pred, LHS, RHS);
8148
8149 // If we have a comparison of a chrec against a constant, try to use value
8150 // ranges to answer this query.
8151 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
8152 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
8153 if (AddRec->getLoop() == L) {
8154 // Form the constant range.
8155 ConstantRange CompRange =
8156 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
8157
8158 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
8159 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
8160 }
8161
8162 switch (Pred) {
8163 case ICmpInst::ICMP_NE: { // while (X != Y)
8164 // Convert to: while (X-Y != 0)
8165 if (LHS->getType()->isPointerTy()) {
8166 LHS = getLosslessPtrToIntExpr(LHS);
8167 if (isa<SCEVCouldNotCompute>(LHS))
8168 return LHS;
8169 }
8170 if (RHS->getType()->isPointerTy()) {
8171 RHS = getLosslessPtrToIntExpr(RHS);
8172 if (isa<SCEVCouldNotCompute>(RHS))
8173 return RHS;
8174 }
8175 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
8176 AllowPredicates);
8177 if (EL.hasAnyInfo()) return EL;
8178 break;
8179 }
8180 case ICmpInst::ICMP_EQ: { // while (X == Y)
8181 // Convert to: while (X-Y == 0)
8182 if (LHS->getType()->isPointerTy()) {
8183 LHS = getLosslessPtrToIntExpr(LHS);
8184 if (isa<SCEVCouldNotCompute>(LHS))
8185 return LHS;
8186 }
8187 if (RHS->getType()->isPointerTy()) {
8188 RHS = getLosslessPtrToIntExpr(RHS);
8189 if (isa<SCEVCouldNotCompute>(RHS))
8190 return RHS;
8191 }
8192 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
8193 if (EL.hasAnyInfo()) return EL;
8194 break;
8195 }
8196 case ICmpInst::ICMP_SLT:
8197 case ICmpInst::ICMP_ULT: { // while (X < Y)
8198 bool IsSigned = Pred == ICmpInst::ICMP_SLT;
8199 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
8200 AllowPredicates);
8201 if (EL.hasAnyInfo()) return EL;
8202 break;
8203 }
8204 case ICmpInst::ICMP_SGT:
8205 case ICmpInst::ICMP_UGT: { // while (X > Y)
8206 bool IsSigned = Pred == ICmpInst::ICMP_SGT;
8207 ExitLimit EL =
8208 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
8209 AllowPredicates);
8210 if (EL.hasAnyInfo()) return EL;
8211 break;
8212 }
8213 default:
8214 break;
8215 }
8216
8217 auto *ExhaustiveCount =
8218 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8219
8220 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
8221 return ExhaustiveCount;
8222
8223 return computeShiftCompareExitLimit(ExitCond->getOperand(0),
8224 ExitCond->getOperand(1), L, OriginalPred);
8225 }
8226
8227 ScalarEvolution::ExitLimit
computeExitLimitFromSingleExitSwitch(const Loop * L,SwitchInst * Switch,BasicBlock * ExitingBlock,bool ControlsExit)8228 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
8229 SwitchInst *Switch,
8230 BasicBlock *ExitingBlock,
8231 bool ControlsExit) {
8232 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
8233
8234 // Give up if the exit is the default dest of a switch.
8235 if (Switch->getDefaultDest() == ExitingBlock)
8236 return getCouldNotCompute();
8237
8238 assert(L->contains(Switch->getDefaultDest()) &&
8239 "Default case must not exit the loop!");
8240 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
8241 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
8242
8243 // while (X != Y) --> while (X-Y != 0)
8244 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
8245 if (EL.hasAnyInfo())
8246 return EL;
8247
8248 return getCouldNotCompute();
8249 }
8250
8251 static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr * AddRec,ConstantInt * C,ScalarEvolution & SE)8252 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
8253 ScalarEvolution &SE) {
8254 const SCEV *InVal = SE.getConstant(C);
8255 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
8256 assert(isa<SCEVConstant>(Val) &&
8257 "Evaluation of SCEV at constant didn't fold correctly?");
8258 return cast<SCEVConstant>(Val)->getValue();
8259 }
8260
8261 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
8262 /// compute the backedge execution count.
8263 ScalarEvolution::ExitLimit
computeLoadConstantCompareExitLimit(LoadInst * LI,Constant * RHS,const Loop * L,ICmpInst::Predicate predicate)8264 ScalarEvolution::computeLoadConstantCompareExitLimit(
8265 LoadInst *LI,
8266 Constant *RHS,
8267 const Loop *L,
8268 ICmpInst::Predicate predicate) {
8269 if (LI->isVolatile()) return getCouldNotCompute();
8270
8271 // Check to see if the loaded pointer is a getelementptr of a global.
8272 // TODO: Use SCEV instead of manually grubbing with GEPs.
8273 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
8274 if (!GEP) return getCouldNotCompute();
8275
8276 // Make sure that it is really a constant global we are gepping, with an
8277 // initializer, and make sure the first IDX is really 0.
8278 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
8279 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
8280 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
8281 !cast<Constant>(GEP->getOperand(1))->isNullValue())
8282 return getCouldNotCompute();
8283
8284 // Okay, we allow one non-constant index into the GEP instruction.
8285 Value *VarIdx = nullptr;
8286 std::vector<Constant*> Indexes;
8287 unsigned VarIdxNum = 0;
8288 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
8289 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
8290 Indexes.push_back(CI);
8291 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
8292 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
8293 VarIdx = GEP->getOperand(i);
8294 VarIdxNum = i-2;
8295 Indexes.push_back(nullptr);
8296 }
8297
8298 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
8299 if (!VarIdx)
8300 return getCouldNotCompute();
8301
8302 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
8303 // Check to see if X is a loop variant variable value now.
8304 const SCEV *Idx = getSCEV(VarIdx);
8305 Idx = getSCEVAtScope(Idx, L);
8306
8307 // We can only recognize very limited forms of loop index expressions, in
8308 // particular, only affine AddRec's like {C1,+,C2}<L>.
8309 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
8310 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() ||
8311 isLoopInvariant(IdxExpr, L) ||
8312 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
8313 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
8314 return getCouldNotCompute();
8315
8316 unsigned MaxSteps = MaxBruteForceIterations;
8317 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
8318 ConstantInt *ItCst = ConstantInt::get(
8319 cast<IntegerType>(IdxExpr->getType()), IterationNum);
8320 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
8321
8322 // Form the GEP offset.
8323 Indexes[VarIdxNum] = Val;
8324
8325 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
8326 Indexes);
8327 if (!Result) break; // Cannot compute!
8328
8329 // Evaluate the condition for this iteration.
8330 Result = ConstantExpr::getICmp(predicate, Result, RHS);
8331 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
8332 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
8333 ++NumArrayLenItCounts;
8334 return getConstant(ItCst); // Found terminating iteration!
8335 }
8336 }
8337 return getCouldNotCompute();
8338 }
8339
computeShiftCompareExitLimit(Value * LHS,Value * RHSV,const Loop * L,ICmpInst::Predicate Pred)8340 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
8341 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
8342 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
8343 if (!RHS)
8344 return getCouldNotCompute();
8345
8346 const BasicBlock *Latch = L->getLoopLatch();
8347 if (!Latch)
8348 return getCouldNotCompute();
8349
8350 const BasicBlock *Predecessor = L->getLoopPredecessor();
8351 if (!Predecessor)
8352 return getCouldNotCompute();
8353
8354 // Return true if V is of the form "LHS `shift_op` <positive constant>".
8355 // Return LHS in OutLHS and shift_opt in OutOpCode.
8356 auto MatchPositiveShift =
8357 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
8358
8359 using namespace PatternMatch;
8360
8361 ConstantInt *ShiftAmt;
8362 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8363 OutOpCode = Instruction::LShr;
8364 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8365 OutOpCode = Instruction::AShr;
8366 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8367 OutOpCode = Instruction::Shl;
8368 else
8369 return false;
8370
8371 return ShiftAmt->getValue().isStrictlyPositive();
8372 };
8373
8374 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
8375 //
8376 // loop:
8377 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
8378 // %iv.shifted = lshr i32 %iv, <positive constant>
8379 //
8380 // Return true on a successful match. Return the corresponding PHI node (%iv
8381 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
8382 auto MatchShiftRecurrence =
8383 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
8384 Optional<Instruction::BinaryOps> PostShiftOpCode;
8385
8386 {
8387 Instruction::BinaryOps OpC;
8388 Value *V;
8389
8390 // If we encounter a shift instruction, "peel off" the shift operation,
8391 // and remember that we did so. Later when we inspect %iv's backedge
8392 // value, we will make sure that the backedge value uses the same
8393 // operation.
8394 //
8395 // Note: the peeled shift operation does not have to be the same
8396 // instruction as the one feeding into the PHI's backedge value. We only
8397 // really care about it being the same *kind* of shift instruction --
8398 // that's all that is required for our later inferences to hold.
8399 if (MatchPositiveShift(LHS, V, OpC)) {
8400 PostShiftOpCode = OpC;
8401 LHS = V;
8402 }
8403 }
8404
8405 PNOut = dyn_cast<PHINode>(LHS);
8406 if (!PNOut || PNOut->getParent() != L->getHeader())
8407 return false;
8408
8409 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
8410 Value *OpLHS;
8411
8412 return
8413 // The backedge value for the PHI node must be a shift by a positive
8414 // amount
8415 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
8416
8417 // of the PHI node itself
8418 OpLHS == PNOut &&
8419
8420 // and the kind of shift should be match the kind of shift we peeled
8421 // off, if any.
8422 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
8423 };
8424
8425 PHINode *PN;
8426 Instruction::BinaryOps OpCode;
8427 if (!MatchShiftRecurrence(LHS, PN, OpCode))
8428 return getCouldNotCompute();
8429
8430 const DataLayout &DL = getDataLayout();
8431
8432 // The key rationale for this optimization is that for some kinds of shift
8433 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
8434 // within a finite number of iterations. If the condition guarding the
8435 // backedge (in the sense that the backedge is taken if the condition is true)
8436 // is false for the value the shift recurrence stabilizes to, then we know
8437 // that the backedge is taken only a finite number of times.
8438
8439 ConstantInt *StableValue = nullptr;
8440 switch (OpCode) {
8441 default:
8442 llvm_unreachable("Impossible case!");
8443
8444 case Instruction::AShr: {
8445 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
8446 // bitwidth(K) iterations.
8447 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
8448 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC,
8449 Predecessor->getTerminator(), &DT);
8450 auto *Ty = cast<IntegerType>(RHS->getType());
8451 if (Known.isNonNegative())
8452 StableValue = ConstantInt::get(Ty, 0);
8453 else if (Known.isNegative())
8454 StableValue = ConstantInt::get(Ty, -1, true);
8455 else
8456 return getCouldNotCompute();
8457
8458 break;
8459 }
8460 case Instruction::LShr:
8461 case Instruction::Shl:
8462 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
8463 // stabilize to 0 in at most bitwidth(K) iterations.
8464 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
8465 break;
8466 }
8467
8468 auto *Result =
8469 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
8470 assert(Result->getType()->isIntegerTy(1) &&
8471 "Otherwise cannot be an operand to a branch instruction");
8472
8473 if (Result->isZeroValue()) {
8474 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
8475 const SCEV *UpperBound =
8476 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
8477 return ExitLimit(getCouldNotCompute(), UpperBound, false);
8478 }
8479
8480 return getCouldNotCompute();
8481 }
8482
8483 /// Return true if we can constant fold an instruction of the specified type,
8484 /// assuming that all operands were constants.
CanConstantFold(const Instruction * I)8485 static bool CanConstantFold(const Instruction *I) {
8486 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
8487 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
8488 isa<LoadInst>(I) || isa<ExtractValueInst>(I))
8489 return true;
8490
8491 if (const CallInst *CI = dyn_cast<CallInst>(I))
8492 if (const Function *F = CI->getCalledFunction())
8493 return canConstantFoldCallTo(CI, F);
8494 return false;
8495 }
8496
8497 /// Determine whether this instruction can constant evolve within this loop
8498 /// assuming its operands can all constant evolve.
canConstantEvolve(Instruction * I,const Loop * L)8499 static bool canConstantEvolve(Instruction *I, const Loop *L) {
8500 // An instruction outside of the loop can't be derived from a loop PHI.
8501 if (!L->contains(I)) return false;
8502
8503 if (isa<PHINode>(I)) {
8504 // We don't currently keep track of the control flow needed to evaluate
8505 // PHIs, so we cannot handle PHIs inside of loops.
8506 return L->getHeader() == I->getParent();
8507 }
8508
8509 // If we won't be able to constant fold this expression even if the operands
8510 // are constants, bail early.
8511 return CanConstantFold(I);
8512 }
8513
8514 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
8515 /// recursing through each instruction operand until reaching a loop header phi.
8516 static PHINode *
getConstantEvolvingPHIOperands(Instruction * UseInst,const Loop * L,DenseMap<Instruction *,PHINode * > & PHIMap,unsigned Depth)8517 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
8518 DenseMap<Instruction *, PHINode *> &PHIMap,
8519 unsigned Depth) {
8520 if (Depth > MaxConstantEvolvingDepth)
8521 return nullptr;
8522
8523 // Otherwise, we can evaluate this instruction if all of its operands are
8524 // constant or derived from a PHI node themselves.
8525 PHINode *PHI = nullptr;
8526 for (Value *Op : UseInst->operands()) {
8527 if (isa<Constant>(Op)) continue;
8528
8529 Instruction *OpInst = dyn_cast<Instruction>(Op);
8530 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
8531
8532 PHINode *P = dyn_cast<PHINode>(OpInst);
8533 if (!P)
8534 // If this operand is already visited, reuse the prior result.
8535 // We may have P != PHI if this is the deepest point at which the
8536 // inconsistent paths meet.
8537 P = PHIMap.lookup(OpInst);
8538 if (!P) {
8539 // Recurse and memoize the results, whether a phi is found or not.
8540 // This recursive call invalidates pointers into PHIMap.
8541 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
8542 PHIMap[OpInst] = P;
8543 }
8544 if (!P)
8545 return nullptr; // Not evolving from PHI
8546 if (PHI && PHI != P)
8547 return nullptr; // Evolving from multiple different PHIs.
8548 PHI = P;
8549 }
8550 // This is a expression evolving from a constant PHI!
8551 return PHI;
8552 }
8553
8554 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
8555 /// in the loop that V is derived from. We allow arbitrary operations along the
8556 /// way, but the operands of an operation must either be constants or a value
8557 /// derived from a constant PHI. If this expression does not fit with these
8558 /// constraints, return null.
getConstantEvolvingPHI(Value * V,const Loop * L)8559 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
8560 Instruction *I = dyn_cast<Instruction>(V);
8561 if (!I || !canConstantEvolve(I, L)) return nullptr;
8562
8563 if (PHINode *PN = dyn_cast<PHINode>(I))
8564 return PN;
8565
8566 // Record non-constant instructions contained by the loop.
8567 DenseMap<Instruction *, PHINode *> PHIMap;
8568 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
8569 }
8570
8571 /// EvaluateExpression - Given an expression that passes the
8572 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
8573 /// in the loop has the value PHIVal. If we can't fold this expression for some
8574 /// reason, return null.
EvaluateExpression(Value * V,const Loop * L,DenseMap<Instruction *,Constant * > & Vals,const DataLayout & DL,const TargetLibraryInfo * TLI)8575 static Constant *EvaluateExpression(Value *V, const Loop *L,
8576 DenseMap<Instruction *, Constant *> &Vals,
8577 const DataLayout &DL,
8578 const TargetLibraryInfo *TLI) {
8579 // Convenient constant check, but redundant for recursive calls.
8580 if (Constant *C = dyn_cast<Constant>(V)) return C;
8581 Instruction *I = dyn_cast<Instruction>(V);
8582 if (!I) return nullptr;
8583
8584 if (Constant *C = Vals.lookup(I)) return C;
8585
8586 // An instruction inside the loop depends on a value outside the loop that we
8587 // weren't given a mapping for, or a value such as a call inside the loop.
8588 if (!canConstantEvolve(I, L)) return nullptr;
8589
8590 // An unmapped PHI can be due to a branch or another loop inside this loop,
8591 // or due to this not being the initial iteration through a loop where we
8592 // couldn't compute the evolution of this particular PHI last time.
8593 if (isa<PHINode>(I)) return nullptr;
8594
8595 std::vector<Constant*> Operands(I->getNumOperands());
8596
8597 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
8598 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
8599 if (!Operand) {
8600 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
8601 if (!Operands[i]) return nullptr;
8602 continue;
8603 }
8604 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
8605 Vals[Operand] = C;
8606 if (!C) return nullptr;
8607 Operands[i] = C;
8608 }
8609
8610 if (CmpInst *CI = dyn_cast<CmpInst>(I))
8611 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8612 Operands[1], DL, TLI);
8613 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8614 if (!LI->isVolatile())
8615 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
8616 }
8617 return ConstantFoldInstOperands(I, Operands, DL, TLI);
8618 }
8619
8620
8621 // If every incoming value to PN except the one for BB is a specific Constant,
8622 // return that, else return nullptr.
getOtherIncomingValue(PHINode * PN,BasicBlock * BB)8623 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
8624 Constant *IncomingVal = nullptr;
8625
8626 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
8627 if (PN->getIncomingBlock(i) == BB)
8628 continue;
8629
8630 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
8631 if (!CurrentVal)
8632 return nullptr;
8633
8634 if (IncomingVal != CurrentVal) {
8635 if (IncomingVal)
8636 return nullptr;
8637 IncomingVal = CurrentVal;
8638 }
8639 }
8640
8641 return IncomingVal;
8642 }
8643
8644 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
8645 /// in the header of its containing loop, we know the loop executes a
8646 /// constant number of times, and the PHI node is just a recurrence
8647 /// involving constants, fold it.
8648 Constant *
getConstantEvolutionLoopExitValue(PHINode * PN,const APInt & BEs,const Loop * L)8649 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
8650 const APInt &BEs,
8651 const Loop *L) {
8652 auto I = ConstantEvolutionLoopExitValue.find(PN);
8653 if (I != ConstantEvolutionLoopExitValue.end())
8654 return I->second;
8655
8656 if (BEs.ugt(MaxBruteForceIterations))
8657 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
8658
8659 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
8660
8661 DenseMap<Instruction *, Constant *> CurrentIterVals;
8662 BasicBlock *Header = L->getHeader();
8663 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8664
8665 BasicBlock *Latch = L->getLoopLatch();
8666 if (!Latch)
8667 return nullptr;
8668
8669 for (PHINode &PHI : Header->phis()) {
8670 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8671 CurrentIterVals[&PHI] = StartCST;
8672 }
8673 if (!CurrentIterVals.count(PN))
8674 return RetVal = nullptr;
8675
8676 Value *BEValue = PN->getIncomingValueForBlock(Latch);
8677
8678 // Execute the loop symbolically to determine the exit value.
8679 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
8680 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
8681
8682 unsigned NumIterations = BEs.getZExtValue(); // must be in range
8683 unsigned IterationNum = 0;
8684 const DataLayout &DL = getDataLayout();
8685 for (; ; ++IterationNum) {
8686 if (IterationNum == NumIterations)
8687 return RetVal = CurrentIterVals[PN]; // Got exit value!
8688
8689 // Compute the value of the PHIs for the next iteration.
8690 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
8691 DenseMap<Instruction *, Constant *> NextIterVals;
8692 Constant *NextPHI =
8693 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8694 if (!NextPHI)
8695 return nullptr; // Couldn't evaluate!
8696 NextIterVals[PN] = NextPHI;
8697
8698 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
8699
8700 // Also evaluate the other PHI nodes. However, we don't get to stop if we
8701 // cease to be able to evaluate one of them or if they stop evolving,
8702 // because that doesn't necessarily prevent us from computing PN.
8703 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
8704 for (const auto &I : CurrentIterVals) {
8705 PHINode *PHI = dyn_cast<PHINode>(I.first);
8706 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
8707 PHIsToCompute.emplace_back(PHI, I.second);
8708 }
8709 // We use two distinct loops because EvaluateExpression may invalidate any
8710 // iterators into CurrentIterVals.
8711 for (const auto &I : PHIsToCompute) {
8712 PHINode *PHI = I.first;
8713 Constant *&NextPHI = NextIterVals[PHI];
8714 if (!NextPHI) { // Not already computed.
8715 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8716 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8717 }
8718 if (NextPHI != I.second)
8719 StoppedEvolving = false;
8720 }
8721
8722 // If all entries in CurrentIterVals == NextIterVals then we can stop
8723 // iterating, the loop can't continue to change.
8724 if (StoppedEvolving)
8725 return RetVal = CurrentIterVals[PN];
8726
8727 CurrentIterVals.swap(NextIterVals);
8728 }
8729 }
8730
computeExitCountExhaustively(const Loop * L,Value * Cond,bool ExitWhen)8731 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
8732 Value *Cond,
8733 bool ExitWhen) {
8734 PHINode *PN = getConstantEvolvingPHI(Cond, L);
8735 if (!PN) return getCouldNotCompute();
8736
8737 // If the loop is canonicalized, the PHI will have exactly two entries.
8738 // That's the only form we support here.
8739 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
8740
8741 DenseMap<Instruction *, Constant *> CurrentIterVals;
8742 BasicBlock *Header = L->getHeader();
8743 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8744
8745 BasicBlock *Latch = L->getLoopLatch();
8746 assert(Latch && "Should follow from NumIncomingValues == 2!");
8747
8748 for (PHINode &PHI : Header->phis()) {
8749 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8750 CurrentIterVals[&PHI] = StartCST;
8751 }
8752 if (!CurrentIterVals.count(PN))
8753 return getCouldNotCompute();
8754
8755 // Okay, we find a PHI node that defines the trip count of this loop. Execute
8756 // the loop symbolically to determine when the condition gets a value of
8757 // "ExitWhen".
8758 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
8759 const DataLayout &DL = getDataLayout();
8760 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
8761 auto *CondVal = dyn_cast_or_null<ConstantInt>(
8762 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
8763
8764 // Couldn't symbolically evaluate.
8765 if (!CondVal) return getCouldNotCompute();
8766
8767 if (CondVal->getValue() == uint64_t(ExitWhen)) {
8768 ++NumBruteForceTripCountsComputed;
8769 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
8770 }
8771
8772 // Update all the PHI nodes for the next iteration.
8773 DenseMap<Instruction *, Constant *> NextIterVals;
8774
8775 // Create a list of which PHIs we need to compute. We want to do this before
8776 // calling EvaluateExpression on them because that may invalidate iterators
8777 // into CurrentIterVals.
8778 SmallVector<PHINode *, 8> PHIsToCompute;
8779 for (const auto &I : CurrentIterVals) {
8780 PHINode *PHI = dyn_cast<PHINode>(I.first);
8781 if (!PHI || PHI->getParent() != Header) continue;
8782 PHIsToCompute.push_back(PHI);
8783 }
8784 for (PHINode *PHI : PHIsToCompute) {
8785 Constant *&NextPHI = NextIterVals[PHI];
8786 if (NextPHI) continue; // Already computed!
8787
8788 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8789 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8790 }
8791 CurrentIterVals.swap(NextIterVals);
8792 }
8793
8794 // Too many iterations were needed to evaluate.
8795 return getCouldNotCompute();
8796 }
8797
getSCEVAtScope(const SCEV * V,const Loop * L)8798 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
8799 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
8800 ValuesAtScopes[V];
8801 // Check to see if we've folded this expression at this loop before.
8802 for (auto &LS : Values)
8803 if (LS.first == L)
8804 return LS.second ? LS.second : V;
8805
8806 Values.emplace_back(L, nullptr);
8807
8808 // Otherwise compute it.
8809 const SCEV *C = computeSCEVAtScope(V, L);
8810 for (auto &LS : reverse(ValuesAtScopes[V]))
8811 if (LS.first == L) {
8812 LS.second = C;
8813 break;
8814 }
8815 return C;
8816 }
8817
8818 /// This builds up a Constant using the ConstantExpr interface. That way, we
8819 /// will return Constants for objects which aren't represented by a
8820 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8821 /// Returns NULL if the SCEV isn't representable as a Constant.
BuildConstantFromSCEV(const SCEV * V)8822 static Constant *BuildConstantFromSCEV(const SCEV *V) {
8823 switch (V->getSCEVType()) {
8824 case scCouldNotCompute:
8825 case scAddRecExpr:
8826 return nullptr;
8827 case scConstant:
8828 return cast<SCEVConstant>(V)->getValue();
8829 case scUnknown:
8830 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
8831 case scSignExtend: {
8832 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
8833 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
8834 return ConstantExpr::getSExt(CastOp, SS->getType());
8835 return nullptr;
8836 }
8837 case scZeroExtend: {
8838 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
8839 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
8840 return ConstantExpr::getZExt(CastOp, SZ->getType());
8841 return nullptr;
8842 }
8843 case scPtrToInt: {
8844 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V);
8845 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
8846 return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
8847
8848 return nullptr;
8849 }
8850 case scTruncate: {
8851 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
8852 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
8853 return ConstantExpr::getTrunc(CastOp, ST->getType());
8854 return nullptr;
8855 }
8856 case scAddExpr: {
8857 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
8858 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
8859 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8860 unsigned AS = PTy->getAddressSpace();
8861 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8862 C = ConstantExpr::getBitCast(C, DestPtrTy);
8863 }
8864 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
8865 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
8866 if (!C2)
8867 return nullptr;
8868
8869 // First pointer!
8870 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
8871 unsigned AS = C2->getType()->getPointerAddressSpace();
8872 std::swap(C, C2);
8873 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8874 // The offsets have been converted to bytes. We can add bytes to an
8875 // i8* by GEP with the byte count in the first index.
8876 C = ConstantExpr::getBitCast(C, DestPtrTy);
8877 }
8878
8879 // Don't bother trying to sum two pointers. We probably can't
8880 // statically compute a load that results from it anyway.
8881 if (C2->getType()->isPointerTy())
8882 return nullptr;
8883
8884 if (C->getType()->isPointerTy()) {
8885 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()),
8886 C, C2);
8887 } else {
8888 C = ConstantExpr::getAdd(C, C2);
8889 }
8890 }
8891 return C;
8892 }
8893 return nullptr;
8894 }
8895 case scMulExpr: {
8896 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
8897 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
8898 // Don't bother with pointers at all.
8899 if (C->getType()->isPointerTy())
8900 return nullptr;
8901 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
8902 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
8903 if (!C2 || C2->getType()->isPointerTy())
8904 return nullptr;
8905 C = ConstantExpr::getMul(C, C2);
8906 }
8907 return C;
8908 }
8909 return nullptr;
8910 }
8911 case scUDivExpr: {
8912 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
8913 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
8914 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
8915 if (LHS->getType() == RHS->getType())
8916 return ConstantExpr::getUDiv(LHS, RHS);
8917 return nullptr;
8918 }
8919 case scSMaxExpr:
8920 case scUMaxExpr:
8921 case scSMinExpr:
8922 case scUMinExpr:
8923 return nullptr; // TODO: smax, umax, smin, umax.
8924 }
8925 llvm_unreachable("Unknown SCEV kind!");
8926 }
8927
computeSCEVAtScope(const SCEV * V,const Loop * L)8928 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
8929 if (isa<SCEVConstant>(V)) return V;
8930
8931 // If this instruction is evolved from a constant-evolving PHI, compute the
8932 // exit value from the loop without using SCEVs.
8933 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
8934 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
8935 if (PHINode *PN = dyn_cast<PHINode>(I)) {
8936 const Loop *CurrLoop = this->LI[I->getParent()];
8937 // Looking for loop exit value.
8938 if (CurrLoop && CurrLoop->getParentLoop() == L &&
8939 PN->getParent() == CurrLoop->getHeader()) {
8940 // Okay, there is no closed form solution for the PHI node. Check
8941 // to see if the loop that contains it has a known backedge-taken
8942 // count. If so, we may be able to force computation of the exit
8943 // value.
8944 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
8945 // This trivial case can show up in some degenerate cases where
8946 // the incoming IR has not yet been fully simplified.
8947 if (BackedgeTakenCount->isZero()) {
8948 Value *InitValue = nullptr;
8949 bool MultipleInitValues = false;
8950 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
8951 if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
8952 if (!InitValue)
8953 InitValue = PN->getIncomingValue(i);
8954 else if (InitValue != PN->getIncomingValue(i)) {
8955 MultipleInitValues = true;
8956 break;
8957 }
8958 }
8959 }
8960 if (!MultipleInitValues && InitValue)
8961 return getSCEV(InitValue);
8962 }
8963 // Do we have a loop invariant value flowing around the backedge
8964 // for a loop which must execute the backedge?
8965 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
8966 isKnownPositive(BackedgeTakenCount) &&
8967 PN->getNumIncomingValues() == 2) {
8968
8969 unsigned InLoopPred =
8970 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
8971 Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
8972 if (CurrLoop->isLoopInvariant(BackedgeVal))
8973 return getSCEV(BackedgeVal);
8974 }
8975 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
8976 // Okay, we know how many times the containing loop executes. If
8977 // this is a constant evolving PHI node, get the final value at
8978 // the specified iteration number.
8979 Constant *RV = getConstantEvolutionLoopExitValue(
8980 PN, BTCC->getAPInt(), CurrLoop);
8981 if (RV) return getSCEV(RV);
8982 }
8983 }
8984
8985 // If there is a single-input Phi, evaluate it at our scope. If we can
8986 // prove that this replacement does not break LCSSA form, use new value.
8987 if (PN->getNumOperands() == 1) {
8988 const SCEV *Input = getSCEV(PN->getOperand(0));
8989 const SCEV *InputAtScope = getSCEVAtScope(Input, L);
8990 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
8991 // for the simplest case just support constants.
8992 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
8993 }
8994 }
8995
8996 // Okay, this is an expression that we cannot symbolically evaluate
8997 // into a SCEV. Check to see if it's possible to symbolically evaluate
8998 // the arguments into constants, and if so, try to constant propagate the
8999 // result. This is particularly useful for computing loop exit values.
9000 if (CanConstantFold(I)) {
9001 SmallVector<Constant *, 4> Operands;
9002 bool MadeImprovement = false;
9003 for (Value *Op : I->operands()) {
9004 if (Constant *C = dyn_cast<Constant>(Op)) {
9005 Operands.push_back(C);
9006 continue;
9007 }
9008
9009 // If any of the operands is non-constant and if they are
9010 // non-integer and non-pointer, don't even try to analyze them
9011 // with scev techniques.
9012 if (!isSCEVable(Op->getType()))
9013 return V;
9014
9015 const SCEV *OrigV = getSCEV(Op);
9016 const SCEV *OpV = getSCEVAtScope(OrigV, L);
9017 MadeImprovement |= OrigV != OpV;
9018
9019 Constant *C = BuildConstantFromSCEV(OpV);
9020 if (!C) return V;
9021 if (C->getType() != Op->getType())
9022 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
9023 Op->getType(),
9024 false),
9025 C, Op->getType());
9026 Operands.push_back(C);
9027 }
9028
9029 // Check to see if getSCEVAtScope actually made an improvement.
9030 if (MadeImprovement) {
9031 Constant *C = nullptr;
9032 const DataLayout &DL = getDataLayout();
9033 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
9034 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
9035 Operands[1], DL, &TLI);
9036 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) {
9037 if (!Load->isVolatile())
9038 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(),
9039 DL);
9040 } else
9041 C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
9042 if (!C) return V;
9043 return getSCEV(C);
9044 }
9045 }
9046 }
9047
9048 // This is some other type of SCEVUnknown, just return it.
9049 return V;
9050 }
9051
9052 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
9053 // Avoid performing the look-up in the common case where the specified
9054 // expression has no loop-variant portions.
9055 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
9056 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
9057 if (OpAtScope != Comm->getOperand(i)) {
9058 // Okay, at least one of these operands is loop variant but might be
9059 // foldable. Build a new instance of the folded commutative expression.
9060 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
9061 Comm->op_begin()+i);
9062 NewOps.push_back(OpAtScope);
9063
9064 for (++i; i != e; ++i) {
9065 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
9066 NewOps.push_back(OpAtScope);
9067 }
9068 if (isa<SCEVAddExpr>(Comm))
9069 return getAddExpr(NewOps, Comm->getNoWrapFlags());
9070 if (isa<SCEVMulExpr>(Comm))
9071 return getMulExpr(NewOps, Comm->getNoWrapFlags());
9072 if (isa<SCEVMinMaxExpr>(Comm))
9073 return getMinMaxExpr(Comm->getSCEVType(), NewOps);
9074 llvm_unreachable("Unknown commutative SCEV type!");
9075 }
9076 }
9077 // If we got here, all operands are loop invariant.
9078 return Comm;
9079 }
9080
9081 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
9082 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
9083 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
9084 if (LHS == Div->getLHS() && RHS == Div->getRHS())
9085 return Div; // must be loop invariant
9086 return getUDivExpr(LHS, RHS);
9087 }
9088
9089 // If this is a loop recurrence for a loop that does not contain L, then we
9090 // are dealing with the final value computed by the loop.
9091 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
9092 // First, attempt to evaluate each operand.
9093 // Avoid performing the look-up in the common case where the specified
9094 // expression has no loop-variant portions.
9095 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
9096 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
9097 if (OpAtScope == AddRec->getOperand(i))
9098 continue;
9099
9100 // Okay, at least one of these operands is loop variant but might be
9101 // foldable. Build a new instance of the folded commutative expression.
9102 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
9103 AddRec->op_begin()+i);
9104 NewOps.push_back(OpAtScope);
9105 for (++i; i != e; ++i)
9106 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
9107
9108 const SCEV *FoldedRec =
9109 getAddRecExpr(NewOps, AddRec->getLoop(),
9110 AddRec->getNoWrapFlags(SCEV::FlagNW));
9111 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
9112 // The addrec may be folded to a nonrecurrence, for example, if the
9113 // induction variable is multiplied by zero after constant folding. Go
9114 // ahead and return the folded value.
9115 if (!AddRec)
9116 return FoldedRec;
9117 break;
9118 }
9119
9120 // If the scope is outside the addrec's loop, evaluate it by using the
9121 // loop exit value of the addrec.
9122 if (!AddRec->getLoop()->contains(L)) {
9123 // To evaluate this recurrence, we need to know how many times the AddRec
9124 // loop iterates. Compute this now.
9125 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
9126 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
9127
9128 // Then, evaluate the AddRec.
9129 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
9130 }
9131
9132 return AddRec;
9133 }
9134
9135 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
9136 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9137 if (Op == Cast->getOperand())
9138 return Cast; // must be loop invariant
9139 return getZeroExtendExpr(Op, Cast->getType());
9140 }
9141
9142 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
9143 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9144 if (Op == Cast->getOperand())
9145 return Cast; // must be loop invariant
9146 return getSignExtendExpr(Op, Cast->getType());
9147 }
9148
9149 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
9150 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9151 if (Op == Cast->getOperand())
9152 return Cast; // must be loop invariant
9153 return getTruncateExpr(Op, Cast->getType());
9154 }
9155
9156 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) {
9157 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9158 if (Op == Cast->getOperand())
9159 return Cast; // must be loop invariant
9160 return getPtrToIntExpr(Op, Cast->getType());
9161 }
9162
9163 llvm_unreachable("Unknown SCEV type!");
9164 }
9165
getSCEVAtScope(Value * V,const Loop * L)9166 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
9167 return getSCEVAtScope(getSCEV(V), L);
9168 }
9169
stripInjectiveFunctions(const SCEV * S) const9170 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
9171 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
9172 return stripInjectiveFunctions(ZExt->getOperand());
9173 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
9174 return stripInjectiveFunctions(SExt->getOperand());
9175 return S;
9176 }
9177
9178 /// Finds the minimum unsigned root of the following equation:
9179 ///
9180 /// A * X = B (mod N)
9181 ///
9182 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
9183 /// A and B isn't important.
9184 ///
9185 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
SolveLinEquationWithOverflow(const APInt & A,const SCEV * B,ScalarEvolution & SE)9186 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
9187 ScalarEvolution &SE) {
9188 uint32_t BW = A.getBitWidth();
9189 assert(BW == SE.getTypeSizeInBits(B->getType()));
9190 assert(A != 0 && "A must be non-zero.");
9191
9192 // 1. D = gcd(A, N)
9193 //
9194 // The gcd of A and N may have only one prime factor: 2. The number of
9195 // trailing zeros in A is its multiplicity
9196 uint32_t Mult2 = A.countTrailingZeros();
9197 // D = 2^Mult2
9198
9199 // 2. Check if B is divisible by D.
9200 //
9201 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
9202 // is not less than multiplicity of this prime factor for D.
9203 if (SE.GetMinTrailingZeros(B) < Mult2)
9204 return SE.getCouldNotCompute();
9205
9206 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
9207 // modulo (N / D).
9208 //
9209 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
9210 // (N / D) in general. The inverse itself always fits into BW bits, though,
9211 // so we immediately truncate it.
9212 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
9213 APInt Mod(BW + 1, 0);
9214 Mod.setBit(BW - Mult2); // Mod = N / D
9215 APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
9216
9217 // 4. Compute the minimum unsigned root of the equation:
9218 // I * (B / D) mod (N / D)
9219 // To simplify the computation, we factor out the divide by D:
9220 // (I * B mod N) / D
9221 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
9222 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
9223 }
9224
9225 /// For a given quadratic addrec, generate coefficients of the corresponding
9226 /// quadratic equation, multiplied by a common value to ensure that they are
9227 /// integers.
9228 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
9229 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
9230 /// were multiplied by, and BitWidth is the bit width of the original addrec
9231 /// coefficients.
9232 /// This function returns None if the addrec coefficients are not compile-
9233 /// time constants.
9234 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
GetQuadraticEquation(const SCEVAddRecExpr * AddRec)9235 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
9236 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
9237 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
9238 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
9239 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
9240 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
9241 << *AddRec << '\n');
9242
9243 // We currently can only solve this if the coefficients are constants.
9244 if (!LC || !MC || !NC) {
9245 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
9246 return None;
9247 }
9248
9249 APInt L = LC->getAPInt();
9250 APInt M = MC->getAPInt();
9251 APInt N = NC->getAPInt();
9252 assert(!N.isZero() && "This is not a quadratic addrec");
9253
9254 unsigned BitWidth = LC->getAPInt().getBitWidth();
9255 unsigned NewWidth = BitWidth + 1;
9256 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
9257 << BitWidth << '\n');
9258 // The sign-extension (as opposed to a zero-extension) here matches the
9259 // extension used in SolveQuadraticEquationWrap (with the same motivation).
9260 N = N.sext(NewWidth);
9261 M = M.sext(NewWidth);
9262 L = L.sext(NewWidth);
9263
9264 // The increments are M, M+N, M+2N, ..., so the accumulated values are
9265 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
9266 // L+M, L+2M+N, L+3M+3N, ...
9267 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
9268 //
9269 // The equation Acc = 0 is then
9270 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
9271 // In a quadratic form it becomes:
9272 // N n^2 + (2M-N) n + 2L = 0.
9273
9274 APInt A = N;
9275 APInt B = 2 * M - A;
9276 APInt C = 2 * L;
9277 APInt T = APInt(NewWidth, 2);
9278 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
9279 << "x + " << C << ", coeff bw: " << NewWidth
9280 << ", multiplied by " << T << '\n');
9281 return std::make_tuple(A, B, C, T, BitWidth);
9282 }
9283
9284 /// Helper function to compare optional APInts:
9285 /// (a) if X and Y both exist, return min(X, Y),
9286 /// (b) if neither X nor Y exist, return None,
9287 /// (c) if exactly one of X and Y exists, return that value.
MinOptional(Optional<APInt> X,Optional<APInt> Y)9288 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
9289 if (X.hasValue() && Y.hasValue()) {
9290 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
9291 APInt XW = X->sextOrSelf(W);
9292 APInt YW = Y->sextOrSelf(W);
9293 return XW.slt(YW) ? *X : *Y;
9294 }
9295 if (!X.hasValue() && !Y.hasValue())
9296 return None;
9297 return X.hasValue() ? *X : *Y;
9298 }
9299
9300 /// Helper function to truncate an optional APInt to a given BitWidth.
9301 /// When solving addrec-related equations, it is preferable to return a value
9302 /// that has the same bit width as the original addrec's coefficients. If the
9303 /// solution fits in the original bit width, truncate it (except for i1).
9304 /// Returning a value of a different bit width may inhibit some optimizations.
9305 ///
9306 /// In general, a solution to a quadratic equation generated from an addrec
9307 /// may require BW+1 bits, where BW is the bit width of the addrec's
9308 /// coefficients. The reason is that the coefficients of the quadratic
9309 /// equation are BW+1 bits wide (to avoid truncation when converting from
9310 /// the addrec to the equation).
TruncIfPossible(Optional<APInt> X,unsigned BitWidth)9311 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
9312 if (!X.hasValue())
9313 return None;
9314 unsigned W = X->getBitWidth();
9315 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
9316 return X->trunc(BitWidth);
9317 return X;
9318 }
9319
9320 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
9321 /// iterations. The values L, M, N are assumed to be signed, and they
9322 /// should all have the same bit widths.
9323 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
9324 /// where BW is the bit width of the addrec's coefficients.
9325 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
9326 /// returned as such, otherwise the bit width of the returned value may
9327 /// be greater than BW.
9328 ///
9329 /// This function returns None if
9330 /// (a) the addrec coefficients are not constant, or
9331 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
9332 /// like x^2 = 5, no integer solutions exist, in other cases an integer
9333 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
9334 static Optional<APInt>
SolveQuadraticAddRecExact(const SCEVAddRecExpr * AddRec,ScalarEvolution & SE)9335 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
9336 APInt A, B, C, M;
9337 unsigned BitWidth;
9338 auto T = GetQuadraticEquation(AddRec);
9339 if (!T.hasValue())
9340 return None;
9341
9342 std::tie(A, B, C, M, BitWidth) = *T;
9343 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
9344 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
9345 if (!X.hasValue())
9346 return None;
9347
9348 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
9349 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
9350 if (!V->isZero())
9351 return None;
9352
9353 return TruncIfPossible(X, BitWidth);
9354 }
9355
9356 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
9357 /// iterations. The values M, N are assumed to be signed, and they
9358 /// should all have the same bit widths.
9359 /// Find the least n such that c(n) does not belong to the given range,
9360 /// while c(n-1) does.
9361 ///
9362 /// This function returns None if
9363 /// (a) the addrec coefficients are not constant, or
9364 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
9365 /// bounds of the range.
9366 static Optional<APInt>
SolveQuadraticAddRecRange(const SCEVAddRecExpr * AddRec,const ConstantRange & Range,ScalarEvolution & SE)9367 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
9368 const ConstantRange &Range, ScalarEvolution &SE) {
9369 assert(AddRec->getOperand(0)->isZero() &&
9370 "Starting value of addrec should be 0");
9371 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
9372 << Range << ", addrec " << *AddRec << '\n');
9373 // This case is handled in getNumIterationsInRange. Here we can assume that
9374 // we start in the range.
9375 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
9376 "Addrec's initial value should be in range");
9377
9378 APInt A, B, C, M;
9379 unsigned BitWidth;
9380 auto T = GetQuadraticEquation(AddRec);
9381 if (!T.hasValue())
9382 return None;
9383
9384 // Be careful about the return value: there can be two reasons for not
9385 // returning an actual number. First, if no solutions to the equations
9386 // were found, and second, if the solutions don't leave the given range.
9387 // The first case means that the actual solution is "unknown", the second
9388 // means that it's known, but not valid. If the solution is unknown, we
9389 // cannot make any conclusions.
9390 // Return a pair: the optional solution and a flag indicating if the
9391 // solution was found.
9392 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
9393 // Solve for signed overflow and unsigned overflow, pick the lower
9394 // solution.
9395 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
9396 << Bound << " (before multiplying by " << M << ")\n");
9397 Bound *= M; // The quadratic equation multiplier.
9398
9399 Optional<APInt> SO = None;
9400 if (BitWidth > 1) {
9401 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
9402 "signed overflow\n");
9403 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
9404 }
9405 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
9406 "unsigned overflow\n");
9407 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
9408 BitWidth+1);
9409
9410 auto LeavesRange = [&] (const APInt &X) {
9411 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
9412 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
9413 if (Range.contains(V0->getValue()))
9414 return false;
9415 // X should be at least 1, so X-1 is non-negative.
9416 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
9417 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
9418 if (Range.contains(V1->getValue()))
9419 return true;
9420 return false;
9421 };
9422
9423 // If SolveQuadraticEquationWrap returns None, it means that there can
9424 // be a solution, but the function failed to find it. We cannot treat it
9425 // as "no solution".
9426 if (!SO.hasValue() || !UO.hasValue())
9427 return { None, false };
9428
9429 // Check the smaller value first to see if it leaves the range.
9430 // At this point, both SO and UO must have values.
9431 Optional<APInt> Min = MinOptional(SO, UO);
9432 if (LeavesRange(*Min))
9433 return { Min, true };
9434 Optional<APInt> Max = Min == SO ? UO : SO;
9435 if (LeavesRange(*Max))
9436 return { Max, true };
9437
9438 // Solutions were found, but were eliminated, hence the "true".
9439 return { None, true };
9440 };
9441
9442 std::tie(A, B, C, M, BitWidth) = *T;
9443 // Lower bound is inclusive, subtract 1 to represent the exiting value.
9444 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
9445 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
9446 auto SL = SolveForBoundary(Lower);
9447 auto SU = SolveForBoundary(Upper);
9448 // If any of the solutions was unknown, no meaninigful conclusions can
9449 // be made.
9450 if (!SL.second || !SU.second)
9451 return None;
9452
9453 // Claim: The correct solution is not some value between Min and Max.
9454 //
9455 // Justification: Assuming that Min and Max are different values, one of
9456 // them is when the first signed overflow happens, the other is when the
9457 // first unsigned overflow happens. Crossing the range boundary is only
9458 // possible via an overflow (treating 0 as a special case of it, modeling
9459 // an overflow as crossing k*2^W for some k).
9460 //
9461 // The interesting case here is when Min was eliminated as an invalid
9462 // solution, but Max was not. The argument is that if there was another
9463 // overflow between Min and Max, it would also have been eliminated if
9464 // it was considered.
9465 //
9466 // For a given boundary, it is possible to have two overflows of the same
9467 // type (signed/unsigned) without having the other type in between: this
9468 // can happen when the vertex of the parabola is between the iterations
9469 // corresponding to the overflows. This is only possible when the two
9470 // overflows cross k*2^W for the same k. In such case, if the second one
9471 // left the range (and was the first one to do so), the first overflow
9472 // would have to enter the range, which would mean that either we had left
9473 // the range before or that we started outside of it. Both of these cases
9474 // are contradictions.
9475 //
9476 // Claim: In the case where SolveForBoundary returns None, the correct
9477 // solution is not some value between the Max for this boundary and the
9478 // Min of the other boundary.
9479 //
9480 // Justification: Assume that we had such Max_A and Min_B corresponding
9481 // to range boundaries A and B and such that Max_A < Min_B. If there was
9482 // a solution between Max_A and Min_B, it would have to be caused by an
9483 // overflow corresponding to either A or B. It cannot correspond to B,
9484 // since Min_B is the first occurrence of such an overflow. If it
9485 // corresponded to A, it would have to be either a signed or an unsigned
9486 // overflow that is larger than both eliminated overflows for A. But
9487 // between the eliminated overflows and this overflow, the values would
9488 // cover the entire value space, thus crossing the other boundary, which
9489 // is a contradiction.
9490
9491 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
9492 }
9493
9494 ScalarEvolution::ExitLimit
howFarToZero(const SCEV * V,const Loop * L,bool ControlsExit,bool AllowPredicates)9495 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
9496 bool AllowPredicates) {
9497
9498 // This is only used for loops with a "x != y" exit test. The exit condition
9499 // is now expressed as a single expression, V = x-y. So the exit test is
9500 // effectively V != 0. We know and take advantage of the fact that this
9501 // expression only being used in a comparison by zero context.
9502
9503 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
9504 // If the value is a constant
9505 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9506 // If the value is already zero, the branch will execute zero times.
9507 if (C->getValue()->isZero()) return C;
9508 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9509 }
9510
9511 const SCEVAddRecExpr *AddRec =
9512 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
9513
9514 if (!AddRec && AllowPredicates)
9515 // Try to make this an AddRec using runtime tests, in the first X
9516 // iterations of this loop, where X is the SCEV expression found by the
9517 // algorithm below.
9518 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
9519
9520 if (!AddRec || AddRec->getLoop() != L)
9521 return getCouldNotCompute();
9522
9523 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
9524 // the quadratic equation to solve it.
9525 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
9526 // We can only use this value if the chrec ends up with an exact zero
9527 // value at this index. When solving for "X*X != 5", for example, we
9528 // should not accept a root of 2.
9529 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
9530 const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
9531 return ExitLimit(R, R, false, Predicates);
9532 }
9533 return getCouldNotCompute();
9534 }
9535
9536 // Otherwise we can only handle this if it is affine.
9537 if (!AddRec->isAffine())
9538 return getCouldNotCompute();
9539
9540 // If this is an affine expression, the execution count of this branch is
9541 // the minimum unsigned root of the following equation:
9542 //
9543 // Start + Step*N = 0 (mod 2^BW)
9544 //
9545 // equivalent to:
9546 //
9547 // Step*N = -Start (mod 2^BW)
9548 //
9549 // where BW is the common bit width of Start and Step.
9550
9551 // Get the initial value for the loop.
9552 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
9553 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
9554
9555 // For now we handle only constant steps.
9556 //
9557 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
9558 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
9559 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
9560 // We have not yet seen any such cases.
9561 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
9562 if (!StepC || StepC->getValue()->isZero())
9563 return getCouldNotCompute();
9564
9565 // For positive steps (counting up until unsigned overflow):
9566 // N = -Start/Step (as unsigned)
9567 // For negative steps (counting down to zero):
9568 // N = Start/-Step
9569 // First compute the unsigned distance from zero in the direction of Step.
9570 bool CountDown = StepC->getAPInt().isNegative();
9571 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
9572
9573 // Handle unitary steps, which cannot wraparound.
9574 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
9575 // N = Distance (as unsigned)
9576 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
9577 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
9578 APInt MaxBECountBase = getUnsignedRangeMax(Distance);
9579 if (MaxBECountBase.ult(MaxBECount))
9580 MaxBECount = MaxBECountBase;
9581
9582 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
9583 // we end up with a loop whose backedge-taken count is n - 1. Detect this
9584 // case, and see if we can improve the bound.
9585 //
9586 // Explicitly handling this here is necessary because getUnsignedRange
9587 // isn't context-sensitive; it doesn't know that we only care about the
9588 // range inside the loop.
9589 const SCEV *Zero = getZero(Distance->getType());
9590 const SCEV *One = getOne(Distance->getType());
9591 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
9592 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
9593 // If Distance + 1 doesn't overflow, we can compute the maximum distance
9594 // as "unsigned_max(Distance + 1) - 1".
9595 ConstantRange CR = getUnsignedRange(DistancePlusOne);
9596 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
9597 }
9598 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
9599 }
9600
9601 // If the condition controls loop exit (the loop exits only if the expression
9602 // is true) and the addition is no-wrap we can use unsigned divide to
9603 // compute the backedge count. In this case, the step may not divide the
9604 // distance, but we don't care because if the condition is "missed" the loop
9605 // will have undefined behavior due to wrapping.
9606 if (ControlsExit && AddRec->hasNoSelfWrap() &&
9607 loopHasNoAbnormalExits(AddRec->getLoop())) {
9608 const SCEV *Exact =
9609 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
9610 const SCEV *Max = getCouldNotCompute();
9611 if (Exact != getCouldNotCompute()) {
9612 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
9613 APInt BaseMaxInt = getUnsignedRangeMax(Exact);
9614 if (BaseMaxInt.ult(MaxInt))
9615 Max = getConstant(BaseMaxInt);
9616 else
9617 Max = getConstant(MaxInt);
9618 }
9619 return ExitLimit(Exact, Max, false, Predicates);
9620 }
9621
9622 // Solve the general equation.
9623 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
9624 getNegativeSCEV(Start), *this);
9625 const SCEV *M = E == getCouldNotCompute()
9626 ? E
9627 : getConstant(getUnsignedRangeMax(E));
9628 return ExitLimit(E, M, false, Predicates);
9629 }
9630
9631 ScalarEvolution::ExitLimit
howFarToNonZero(const SCEV * V,const Loop * L)9632 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
9633 // Loops that look like: while (X == 0) are very strange indeed. We don't
9634 // handle them yet except for the trivial case. This could be expanded in the
9635 // future as needed.
9636
9637 // If the value is a constant, check to see if it is known to be non-zero
9638 // already. If so, the backedge will execute zero times.
9639 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9640 if (!C->getValue()->isZero())
9641 return getZero(C->getType());
9642 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9643 }
9644
9645 // We could implement others, but I really doubt anyone writes loops like
9646 // this, and if they did, they would already be constant folded.
9647 return getCouldNotCompute();
9648 }
9649
9650 std::pair<const BasicBlock *, const BasicBlock *>
getPredecessorWithUniqueSuccessorForBB(const BasicBlock * BB) const9651 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
9652 const {
9653 // If the block has a unique predecessor, then there is no path from the
9654 // predecessor to the block that does not go through the direct edge
9655 // from the predecessor to the block.
9656 if (const BasicBlock *Pred = BB->getSinglePredecessor())
9657 return {Pred, BB};
9658
9659 // A loop's header is defined to be a block that dominates the loop.
9660 // If the header has a unique predecessor outside the loop, it must be
9661 // a block that has exactly one successor that can reach the loop.
9662 if (const Loop *L = LI.getLoopFor(BB))
9663 return {L->getLoopPredecessor(), L->getHeader()};
9664
9665 return {nullptr, nullptr};
9666 }
9667
9668 /// SCEV structural equivalence is usually sufficient for testing whether two
9669 /// expressions are equal, however for the purposes of looking for a condition
9670 /// guarding a loop, it can be useful to be a little more general, since a
9671 /// front-end may have replicated the controlling expression.
HasSameValue(const SCEV * A,const SCEV * B)9672 static bool HasSameValue(const SCEV *A, const SCEV *B) {
9673 // Quick check to see if they are the same SCEV.
9674 if (A == B) return true;
9675
9676 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
9677 // Not all instructions that are "identical" compute the same value. For
9678 // instance, two distinct alloca instructions allocating the same type are
9679 // identical and do not read memory; but compute distinct values.
9680 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
9681 };
9682
9683 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
9684 // two different instructions with the same value. Check for this case.
9685 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
9686 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
9687 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
9688 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
9689 if (ComputesEqualValues(AI, BI))
9690 return true;
9691
9692 // Otherwise assume they may have a different value.
9693 return false;
9694 }
9695
SimplifyICmpOperands(ICmpInst::Predicate & Pred,const SCEV * & LHS,const SCEV * & RHS,unsigned Depth)9696 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
9697 const SCEV *&LHS, const SCEV *&RHS,
9698 unsigned Depth) {
9699 bool Changed = false;
9700 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
9701 // '0 != 0'.
9702 auto TrivialCase = [&](bool TriviallyTrue) {
9703 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
9704 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
9705 return true;
9706 };
9707 // If we hit the max recursion limit bail out.
9708 if (Depth >= 3)
9709 return false;
9710
9711 // Canonicalize a constant to the right side.
9712 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
9713 // Check for both operands constant.
9714 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
9715 if (ConstantExpr::getICmp(Pred,
9716 LHSC->getValue(),
9717 RHSC->getValue())->isNullValue())
9718 return TrivialCase(false);
9719 else
9720 return TrivialCase(true);
9721 }
9722 // Otherwise swap the operands to put the constant on the right.
9723 std::swap(LHS, RHS);
9724 Pred = ICmpInst::getSwappedPredicate(Pred);
9725 Changed = true;
9726 }
9727
9728 // If we're comparing an addrec with a value which is loop-invariant in the
9729 // addrec's loop, put the addrec on the left. Also make a dominance check,
9730 // as both operands could be addrecs loop-invariant in each other's loop.
9731 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
9732 const Loop *L = AR->getLoop();
9733 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
9734 std::swap(LHS, RHS);
9735 Pred = ICmpInst::getSwappedPredicate(Pred);
9736 Changed = true;
9737 }
9738 }
9739
9740 // If there's a constant operand, canonicalize comparisons with boundary
9741 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
9742 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
9743 const APInt &RA = RC->getAPInt();
9744
9745 bool SimplifiedByConstantRange = false;
9746
9747 if (!ICmpInst::isEquality(Pred)) {
9748 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
9749 if (ExactCR.isFullSet())
9750 return TrivialCase(true);
9751 else if (ExactCR.isEmptySet())
9752 return TrivialCase(false);
9753
9754 APInt NewRHS;
9755 CmpInst::Predicate NewPred;
9756 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
9757 ICmpInst::isEquality(NewPred)) {
9758 // We were able to convert an inequality to an equality.
9759 Pred = NewPred;
9760 RHS = getConstant(NewRHS);
9761 Changed = SimplifiedByConstantRange = true;
9762 }
9763 }
9764
9765 if (!SimplifiedByConstantRange) {
9766 switch (Pred) {
9767 default:
9768 break;
9769 case ICmpInst::ICMP_EQ:
9770 case ICmpInst::ICMP_NE:
9771 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
9772 if (!RA)
9773 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
9774 if (const SCEVMulExpr *ME =
9775 dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
9776 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
9777 ME->getOperand(0)->isAllOnesValue()) {
9778 RHS = AE->getOperand(1);
9779 LHS = ME->getOperand(1);
9780 Changed = true;
9781 }
9782 break;
9783
9784
9785 // The "Should have been caught earlier!" messages refer to the fact
9786 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
9787 // should have fired on the corresponding cases, and canonicalized the
9788 // check to trivial case.
9789
9790 case ICmpInst::ICMP_UGE:
9791 assert(!RA.isMinValue() && "Should have been caught earlier!");
9792 Pred = ICmpInst::ICMP_UGT;
9793 RHS = getConstant(RA - 1);
9794 Changed = true;
9795 break;
9796 case ICmpInst::ICMP_ULE:
9797 assert(!RA.isMaxValue() && "Should have been caught earlier!");
9798 Pred = ICmpInst::ICMP_ULT;
9799 RHS = getConstant(RA + 1);
9800 Changed = true;
9801 break;
9802 case ICmpInst::ICMP_SGE:
9803 assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
9804 Pred = ICmpInst::ICMP_SGT;
9805 RHS = getConstant(RA - 1);
9806 Changed = true;
9807 break;
9808 case ICmpInst::ICMP_SLE:
9809 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
9810 Pred = ICmpInst::ICMP_SLT;
9811 RHS = getConstant(RA + 1);
9812 Changed = true;
9813 break;
9814 }
9815 }
9816 }
9817
9818 // Check for obvious equality.
9819 if (HasSameValue(LHS, RHS)) {
9820 if (ICmpInst::isTrueWhenEqual(Pred))
9821 return TrivialCase(true);
9822 if (ICmpInst::isFalseWhenEqual(Pred))
9823 return TrivialCase(false);
9824 }
9825
9826 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
9827 // adding or subtracting 1 from one of the operands.
9828 switch (Pred) {
9829 case ICmpInst::ICMP_SLE:
9830 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
9831 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9832 SCEV::FlagNSW);
9833 Pred = ICmpInst::ICMP_SLT;
9834 Changed = true;
9835 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
9836 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
9837 SCEV::FlagNSW);
9838 Pred = ICmpInst::ICMP_SLT;
9839 Changed = true;
9840 }
9841 break;
9842 case ICmpInst::ICMP_SGE:
9843 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
9844 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
9845 SCEV::FlagNSW);
9846 Pred = ICmpInst::ICMP_SGT;
9847 Changed = true;
9848 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
9849 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9850 SCEV::FlagNSW);
9851 Pred = ICmpInst::ICMP_SGT;
9852 Changed = true;
9853 }
9854 break;
9855 case ICmpInst::ICMP_ULE:
9856 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
9857 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9858 SCEV::FlagNUW);
9859 Pred = ICmpInst::ICMP_ULT;
9860 Changed = true;
9861 } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
9862 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
9863 Pred = ICmpInst::ICMP_ULT;
9864 Changed = true;
9865 }
9866 break;
9867 case ICmpInst::ICMP_UGE:
9868 if (!getUnsignedRangeMin(RHS).isMinValue()) {
9869 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
9870 Pred = ICmpInst::ICMP_UGT;
9871 Changed = true;
9872 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
9873 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9874 SCEV::FlagNUW);
9875 Pred = ICmpInst::ICMP_UGT;
9876 Changed = true;
9877 }
9878 break;
9879 default:
9880 break;
9881 }
9882
9883 // TODO: More simplifications are possible here.
9884
9885 // Recursively simplify until we either hit a recursion limit or nothing
9886 // changes.
9887 if (Changed)
9888 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
9889
9890 return Changed;
9891 }
9892
isKnownNegative(const SCEV * S)9893 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
9894 return getSignedRangeMax(S).isNegative();
9895 }
9896
isKnownPositive(const SCEV * S)9897 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
9898 return getSignedRangeMin(S).isStrictlyPositive();
9899 }
9900
isKnownNonNegative(const SCEV * S)9901 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
9902 return !getSignedRangeMin(S).isNegative();
9903 }
9904
isKnownNonPositive(const SCEV * S)9905 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
9906 return !getSignedRangeMax(S).isStrictlyPositive();
9907 }
9908
isKnownNonZero(const SCEV * S)9909 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
9910 return getUnsignedRangeMin(S) != 0;
9911 }
9912
9913 std::pair<const SCEV *, const SCEV *>
SplitIntoInitAndPostInc(const Loop * L,const SCEV * S)9914 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
9915 // Compute SCEV on entry of loop L.
9916 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
9917 if (Start == getCouldNotCompute())
9918 return { Start, Start };
9919 // Compute post increment SCEV for loop L.
9920 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
9921 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
9922 return { Start, PostInc };
9923 }
9924
isKnownViaInduction(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9925 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
9926 const SCEV *LHS, const SCEV *RHS) {
9927 // First collect all loops.
9928 SmallPtrSet<const Loop *, 8> LoopsUsed;
9929 getUsedLoops(LHS, LoopsUsed);
9930 getUsedLoops(RHS, LoopsUsed);
9931
9932 if (LoopsUsed.empty())
9933 return false;
9934
9935 // Domination relationship must be a linear order on collected loops.
9936 #ifndef NDEBUG
9937 for (auto *L1 : LoopsUsed)
9938 for (auto *L2 : LoopsUsed)
9939 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
9940 DT.dominates(L2->getHeader(), L1->getHeader())) &&
9941 "Domination relationship is not a linear order");
9942 #endif
9943
9944 const Loop *MDL =
9945 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
9946 [&](const Loop *L1, const Loop *L2) {
9947 return DT.properlyDominates(L1->getHeader(), L2->getHeader());
9948 });
9949
9950 // Get init and post increment value for LHS.
9951 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
9952 // if LHS contains unknown non-invariant SCEV then bail out.
9953 if (SplitLHS.first == getCouldNotCompute())
9954 return false;
9955 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
9956 // Get init and post increment value for RHS.
9957 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
9958 // if RHS contains unknown non-invariant SCEV then bail out.
9959 if (SplitRHS.first == getCouldNotCompute())
9960 return false;
9961 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
9962 // It is possible that init SCEV contains an invariant load but it does
9963 // not dominate MDL and is not available at MDL loop entry, so we should
9964 // check it here.
9965 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
9966 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
9967 return false;
9968
9969 // It seems backedge guard check is faster than entry one so in some cases
9970 // it can speed up whole estimation by short circuit
9971 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
9972 SplitRHS.second) &&
9973 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
9974 }
9975
isKnownPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9976 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
9977 const SCEV *LHS, const SCEV *RHS) {
9978 // Canonicalize the inputs first.
9979 (void)SimplifyICmpOperands(Pred, LHS, RHS);
9980
9981 if (isKnownViaInduction(Pred, LHS, RHS))
9982 return true;
9983
9984 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
9985 return true;
9986
9987 // Otherwise see what can be done with some simple reasoning.
9988 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
9989 }
9990
evaluatePredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)9991 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
9992 const SCEV *LHS,
9993 const SCEV *RHS) {
9994 if (isKnownPredicate(Pred, LHS, RHS))
9995 return true;
9996 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
9997 return false;
9998 return None;
9999 }
10000
isKnownPredicateAt(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Instruction * CtxI)10001 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
10002 const SCEV *LHS, const SCEV *RHS,
10003 const Instruction *CtxI) {
10004 // TODO: Analyze guards and assumes from Context's block.
10005 return isKnownPredicate(Pred, LHS, RHS) ||
10006 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS);
10007 }
10008
evaluatePredicateAt(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Instruction * CtxI)10009 Optional<bool> ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred,
10010 const SCEV *LHS,
10011 const SCEV *RHS,
10012 const Instruction *CtxI) {
10013 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
10014 if (KnownWithoutContext)
10015 return KnownWithoutContext;
10016
10017 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS))
10018 return true;
10019 else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(),
10020 ICmpInst::getInversePredicate(Pred),
10021 LHS, RHS))
10022 return false;
10023 return None;
10024 }
10025
isKnownOnEveryIteration(ICmpInst::Predicate Pred,const SCEVAddRecExpr * LHS,const SCEV * RHS)10026 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
10027 const SCEVAddRecExpr *LHS,
10028 const SCEV *RHS) {
10029 const Loop *L = LHS->getLoop();
10030 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
10031 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
10032 }
10033
10034 Optional<ScalarEvolution::MonotonicPredicateType>
getMonotonicPredicateType(const SCEVAddRecExpr * LHS,ICmpInst::Predicate Pred)10035 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
10036 ICmpInst::Predicate Pred) {
10037 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
10038
10039 #ifndef NDEBUG
10040 // Verify an invariant: inverting the predicate should turn a monotonically
10041 // increasing change to a monotonically decreasing one, and vice versa.
10042 if (Result) {
10043 auto ResultSwapped =
10044 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
10045
10046 assert(ResultSwapped.hasValue() && "should be able to analyze both!");
10047 assert(ResultSwapped.getValue() != Result.getValue() &&
10048 "monotonicity should flip as we flip the predicate");
10049 }
10050 #endif
10051
10052 return Result;
10053 }
10054
10055 Optional<ScalarEvolution::MonotonicPredicateType>
getMonotonicPredicateTypeImpl(const SCEVAddRecExpr * LHS,ICmpInst::Predicate Pred)10056 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
10057 ICmpInst::Predicate Pred) {
10058 // A zero step value for LHS means the induction variable is essentially a
10059 // loop invariant value. We don't really depend on the predicate actually
10060 // flipping from false to true (for increasing predicates, and the other way
10061 // around for decreasing predicates), all we care about is that *if* the
10062 // predicate changes then it only changes from false to true.
10063 //
10064 // A zero step value in itself is not very useful, but there may be places
10065 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
10066 // as general as possible.
10067
10068 // Only handle LE/LT/GE/GT predicates.
10069 if (!ICmpInst::isRelational(Pred))
10070 return None;
10071
10072 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
10073 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
10074 "Should be greater or less!");
10075
10076 // Check that AR does not wrap.
10077 if (ICmpInst::isUnsigned(Pred)) {
10078 if (!LHS->hasNoUnsignedWrap())
10079 return None;
10080 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10081 } else {
10082 assert(ICmpInst::isSigned(Pred) &&
10083 "Relational predicate is either signed or unsigned!");
10084 if (!LHS->hasNoSignedWrap())
10085 return None;
10086
10087 const SCEV *Step = LHS->getStepRecurrence(*this);
10088
10089 if (isKnownNonNegative(Step))
10090 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10091
10092 if (isKnownNonPositive(Step))
10093 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10094
10095 return None;
10096 }
10097 }
10098
10099 Optional<ScalarEvolution::LoopInvariantPredicate>
getLoopInvariantPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Loop * L)10100 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
10101 const SCEV *LHS, const SCEV *RHS,
10102 const Loop *L) {
10103
10104 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10105 if (!isLoopInvariant(RHS, L)) {
10106 if (!isLoopInvariant(LHS, L))
10107 return None;
10108
10109 std::swap(LHS, RHS);
10110 Pred = ICmpInst::getSwappedPredicate(Pred);
10111 }
10112
10113 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10114 if (!ArLHS || ArLHS->getLoop() != L)
10115 return None;
10116
10117 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
10118 if (!MonotonicType)
10119 return None;
10120 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
10121 // true as the loop iterates, and the backedge is control dependent on
10122 // "ArLHS `Pred` RHS" == true then we can reason as follows:
10123 //
10124 // * if the predicate was false in the first iteration then the predicate
10125 // is never evaluated again, since the loop exits without taking the
10126 // backedge.
10127 // * if the predicate was true in the first iteration then it will
10128 // continue to be true for all future iterations since it is
10129 // monotonically increasing.
10130 //
10131 // For both the above possibilities, we can replace the loop varying
10132 // predicate with its value on the first iteration of the loop (which is
10133 // loop invariant).
10134 //
10135 // A similar reasoning applies for a monotonically decreasing predicate, by
10136 // replacing true with false and false with true in the above two bullets.
10137 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing;
10138 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
10139
10140 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
10141 return None;
10142
10143 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS);
10144 }
10145
10146 Optional<ScalarEvolution::LoopInvariantPredicate>
getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Loop * L,const Instruction * CtxI,const SCEV * MaxIter)10147 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
10148 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
10149 const Instruction *CtxI, const SCEV *MaxIter) {
10150 // Try to prove the following set of facts:
10151 // - The predicate is monotonic in the iteration space.
10152 // - If the check does not fail on the 1st iteration:
10153 // - No overflow will happen during first MaxIter iterations;
10154 // - It will not fail on the MaxIter'th iteration.
10155 // If the check does fail on the 1st iteration, we leave the loop and no
10156 // other checks matter.
10157
10158 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10159 if (!isLoopInvariant(RHS, L)) {
10160 if (!isLoopInvariant(LHS, L))
10161 return None;
10162
10163 std::swap(LHS, RHS);
10164 Pred = ICmpInst::getSwappedPredicate(Pred);
10165 }
10166
10167 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
10168 if (!AR || AR->getLoop() != L)
10169 return None;
10170
10171 // The predicate must be relational (i.e. <, <=, >=, >).
10172 if (!ICmpInst::isRelational(Pred))
10173 return None;
10174
10175 // TODO: Support steps other than +/- 1.
10176 const SCEV *Step = AR->getStepRecurrence(*this);
10177 auto *One = getOne(Step->getType());
10178 auto *MinusOne = getNegativeSCEV(One);
10179 if (Step != One && Step != MinusOne)
10180 return None;
10181
10182 // Type mismatch here means that MaxIter is potentially larger than max
10183 // unsigned value in start type, which mean we cannot prove no wrap for the
10184 // indvar.
10185 if (AR->getType() != MaxIter->getType())
10186 return None;
10187
10188 // Value of IV on suggested last iteration.
10189 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
10190 // Does it still meet the requirement?
10191 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
10192 return None;
10193 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
10194 // not exceed max unsigned value of this type), this effectively proves
10195 // that there is no wrap during the iteration. To prove that there is no
10196 // signed/unsigned wrap, we need to check that
10197 // Start <= Last for step = 1 or Start >= Last for step = -1.
10198 ICmpInst::Predicate NoOverflowPred =
10199 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
10200 if (Step == MinusOne)
10201 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
10202 const SCEV *Start = AR->getStart();
10203 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI))
10204 return None;
10205
10206 // Everything is fine.
10207 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
10208 }
10209
isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10210 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
10211 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
10212 if (HasSameValue(LHS, RHS))
10213 return ICmpInst::isTrueWhenEqual(Pred);
10214
10215 // This code is split out from isKnownPredicate because it is called from
10216 // within isLoopEntryGuardedByCond.
10217
10218 auto CheckRanges = [&](const ConstantRange &RangeLHS,
10219 const ConstantRange &RangeRHS) {
10220 return RangeLHS.icmp(Pred, RangeRHS);
10221 };
10222
10223 // The check at the top of the function catches the case where the values are
10224 // known to be equal.
10225 if (Pred == CmpInst::ICMP_EQ)
10226 return false;
10227
10228 if (Pred == CmpInst::ICMP_NE) {
10229 if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
10230 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)))
10231 return true;
10232 auto *Diff = getMinusSCEV(LHS, RHS);
10233 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
10234 }
10235
10236 if (CmpInst::isSigned(Pred))
10237 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
10238
10239 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
10240 }
10241
isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10242 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
10243 const SCEV *LHS,
10244 const SCEV *RHS) {
10245 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
10246 // C1 and C2 are constant integers. If either X or Y are not add expressions,
10247 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
10248 // OutC1 and OutC2.
10249 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y,
10250 APInt &OutC1, APInt &OutC2,
10251 SCEV::NoWrapFlags ExpectedFlags) {
10252 const SCEV *XNonConstOp, *XConstOp;
10253 const SCEV *YNonConstOp, *YConstOp;
10254 SCEV::NoWrapFlags XFlagsPresent;
10255 SCEV::NoWrapFlags YFlagsPresent;
10256
10257 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) {
10258 XConstOp = getZero(X->getType());
10259 XNonConstOp = X;
10260 XFlagsPresent = ExpectedFlags;
10261 }
10262 if (!isa<SCEVConstant>(XConstOp) ||
10263 (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
10264 return false;
10265
10266 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) {
10267 YConstOp = getZero(Y->getType());
10268 YNonConstOp = Y;
10269 YFlagsPresent = ExpectedFlags;
10270 }
10271
10272 if (!isa<SCEVConstant>(YConstOp) ||
10273 (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
10274 return false;
10275
10276 if (YNonConstOp != XNonConstOp)
10277 return false;
10278
10279 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
10280 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
10281
10282 return true;
10283 };
10284
10285 APInt C1;
10286 APInt C2;
10287
10288 switch (Pred) {
10289 default:
10290 break;
10291
10292 case ICmpInst::ICMP_SGE:
10293 std::swap(LHS, RHS);
10294 LLVM_FALLTHROUGH;
10295 case ICmpInst::ICMP_SLE:
10296 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
10297 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
10298 return true;
10299
10300 break;
10301
10302 case ICmpInst::ICMP_SGT:
10303 std::swap(LHS, RHS);
10304 LLVM_FALLTHROUGH;
10305 case ICmpInst::ICMP_SLT:
10306 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
10307 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
10308 return true;
10309
10310 break;
10311
10312 case ICmpInst::ICMP_UGE:
10313 std::swap(LHS, RHS);
10314 LLVM_FALLTHROUGH;
10315 case ICmpInst::ICMP_ULE:
10316 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
10317 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
10318 return true;
10319
10320 break;
10321
10322 case ICmpInst::ICMP_UGT:
10323 std::swap(LHS, RHS);
10324 LLVM_FALLTHROUGH;
10325 case ICmpInst::ICMP_ULT:
10326 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
10327 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
10328 return true;
10329 break;
10330 }
10331
10332 return false;
10333 }
10334
isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10335 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
10336 const SCEV *LHS,
10337 const SCEV *RHS) {
10338 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
10339 return false;
10340
10341 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
10342 // the stack can result in exponential time complexity.
10343 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);
10344
10345 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
10346 //
10347 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
10348 // isKnownPredicate. isKnownPredicate is more powerful, but also more
10349 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
10350 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
10351 // use isKnownPredicate later if needed.
10352 return isKnownNonNegative(RHS) &&
10353 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
10354 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
10355 }
10356
isImpliedViaGuard(const BasicBlock * BB,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10357 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
10358 ICmpInst::Predicate Pred,
10359 const SCEV *LHS, const SCEV *RHS) {
10360 // No need to even try if we know the module has no guards.
10361 if (!HasGuards)
10362 return false;
10363
10364 return any_of(*BB, [&](const Instruction &I) {
10365 using namespace llvm::PatternMatch;
10366
10367 Value *Condition;
10368 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
10369 m_Value(Condition))) &&
10370 isImpliedCond(Pred, LHS, RHS, Condition, false);
10371 });
10372 }
10373
10374 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
10375 /// protected by a conditional between LHS and RHS. This is used to
10376 /// to eliminate casts.
10377 bool
isLoopBackedgeGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10378 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
10379 ICmpInst::Predicate Pred,
10380 const SCEV *LHS, const SCEV *RHS) {
10381 // Interpret a null as meaning no loop, where there is obviously no guard
10382 // (interprocedural conditions notwithstanding).
10383 if (!L) return true;
10384
10385 if (VerifyIR)
10386 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
10387 "This cannot be done on broken IR!");
10388
10389
10390 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10391 return true;
10392
10393 BasicBlock *Latch = L->getLoopLatch();
10394 if (!Latch)
10395 return false;
10396
10397 BranchInst *LoopContinuePredicate =
10398 dyn_cast<BranchInst>(Latch->getTerminator());
10399 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
10400 isImpliedCond(Pred, LHS, RHS,
10401 LoopContinuePredicate->getCondition(),
10402 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
10403 return true;
10404
10405 // We don't want more than one activation of the following loops on the stack
10406 // -- that can lead to O(n!) time complexity.
10407 if (WalkingBEDominatingConds)
10408 return false;
10409
10410 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);
10411
10412 // See if we can exploit a trip count to prove the predicate.
10413 const auto &BETakenInfo = getBackedgeTakenInfo(L);
10414 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
10415 if (LatchBECount != getCouldNotCompute()) {
10416 // We know that Latch branches back to the loop header exactly
10417 // LatchBECount times. This means the backdege condition at Latch is
10418 // equivalent to "{0,+,1} u< LatchBECount".
10419 Type *Ty = LatchBECount->getType();
10420 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
10421 const SCEV *LoopCounter =
10422 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
10423 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
10424 LatchBECount))
10425 return true;
10426 }
10427
10428 // Check conditions due to any @llvm.assume intrinsics.
10429 for (auto &AssumeVH : AC.assumptions()) {
10430 if (!AssumeVH)
10431 continue;
10432 auto *CI = cast<CallInst>(AssumeVH);
10433 if (!DT.dominates(CI, Latch->getTerminator()))
10434 continue;
10435
10436 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
10437 return true;
10438 }
10439
10440 // If the loop is not reachable from the entry block, we risk running into an
10441 // infinite loop as we walk up into the dom tree. These loops do not matter
10442 // anyway, so we just return a conservative answer when we see them.
10443 if (!DT.isReachableFromEntry(L->getHeader()))
10444 return false;
10445
10446 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
10447 return true;
10448
10449 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
10450 DTN != HeaderDTN; DTN = DTN->getIDom()) {
10451 assert(DTN && "should reach the loop header before reaching the root!");
10452
10453 BasicBlock *BB = DTN->getBlock();
10454 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
10455 return true;
10456
10457 BasicBlock *PBB = BB->getSinglePredecessor();
10458 if (!PBB)
10459 continue;
10460
10461 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
10462 if (!ContinuePredicate || !ContinuePredicate->isConditional())
10463 continue;
10464
10465 Value *Condition = ContinuePredicate->getCondition();
10466
10467 // If we have an edge `E` within the loop body that dominates the only
10468 // latch, the condition guarding `E` also guards the backedge. This
10469 // reasoning works only for loops with a single latch.
10470
10471 BasicBlockEdge DominatingEdge(PBB, BB);
10472 if (DominatingEdge.isSingleEdge()) {
10473 // We're constructively (and conservatively) enumerating edges within the
10474 // loop body that dominate the latch. The dominator tree better agree
10475 // with us on this:
10476 assert(DT.dominates(DominatingEdge, Latch) && "should be!");
10477
10478 if (isImpliedCond(Pred, LHS, RHS, Condition,
10479 BB != ContinuePredicate->getSuccessor(0)))
10480 return true;
10481 }
10482 }
10483
10484 return false;
10485 }
10486
isBasicBlockEntryGuardedByCond(const BasicBlock * BB,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10487 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
10488 ICmpInst::Predicate Pred,
10489 const SCEV *LHS,
10490 const SCEV *RHS) {
10491 if (VerifyIR)
10492 assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
10493 "This cannot be done on broken IR!");
10494
10495 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
10496 // the facts (a >= b && a != b) separately. A typical situation is when the
10497 // non-strict comparison is known from ranges and non-equality is known from
10498 // dominating predicates. If we are proving strict comparison, we always try
10499 // to prove non-equality and non-strict comparison separately.
10500 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
10501 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
10502 bool ProvedNonStrictComparison = false;
10503 bool ProvedNonEquality = false;
10504
10505 auto SplitAndProve =
10506 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool {
10507 if (!ProvedNonStrictComparison)
10508 ProvedNonStrictComparison = Fn(NonStrictPredicate);
10509 if (!ProvedNonEquality)
10510 ProvedNonEquality = Fn(ICmpInst::ICMP_NE);
10511 if (ProvedNonStrictComparison && ProvedNonEquality)
10512 return true;
10513 return false;
10514 };
10515
10516 if (ProvingStrictComparison) {
10517 auto ProofFn = [&](ICmpInst::Predicate P) {
10518 return isKnownViaNonRecursiveReasoning(P, LHS, RHS);
10519 };
10520 if (SplitAndProve(ProofFn))
10521 return true;
10522 }
10523
10524 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
10525 auto ProveViaGuard = [&](const BasicBlock *Block) {
10526 if (isImpliedViaGuard(Block, Pred, LHS, RHS))
10527 return true;
10528 if (ProvingStrictComparison) {
10529 auto ProofFn = [&](ICmpInst::Predicate P) {
10530 return isImpliedViaGuard(Block, P, LHS, RHS);
10531 };
10532 if (SplitAndProve(ProofFn))
10533 return true;
10534 }
10535 return false;
10536 };
10537
10538 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
10539 auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
10540 const Instruction *CtxI = &BB->front();
10541 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI))
10542 return true;
10543 if (ProvingStrictComparison) {
10544 auto ProofFn = [&](ICmpInst::Predicate P) {
10545 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI);
10546 };
10547 if (SplitAndProve(ProofFn))
10548 return true;
10549 }
10550 return false;
10551 };
10552
10553 // Starting at the block's predecessor, climb up the predecessor chain, as long
10554 // as there are predecessors that can be found that have unique successors
10555 // leading to the original block.
10556 const Loop *ContainingLoop = LI.getLoopFor(BB);
10557 const BasicBlock *PredBB;
10558 if (ContainingLoop && ContainingLoop->getHeader() == BB)
10559 PredBB = ContainingLoop->getLoopPredecessor();
10560 else
10561 PredBB = BB->getSinglePredecessor();
10562 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
10563 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
10564 if (ProveViaGuard(Pair.first))
10565 return true;
10566
10567 const BranchInst *LoopEntryPredicate =
10568 dyn_cast<BranchInst>(Pair.first->getTerminator());
10569 if (!LoopEntryPredicate ||
10570 LoopEntryPredicate->isUnconditional())
10571 continue;
10572
10573 if (ProveViaCond(LoopEntryPredicate->getCondition(),
10574 LoopEntryPredicate->getSuccessor(0) != Pair.second))
10575 return true;
10576 }
10577
10578 // Check conditions due to any @llvm.assume intrinsics.
10579 for (auto &AssumeVH : AC.assumptions()) {
10580 if (!AssumeVH)
10581 continue;
10582 auto *CI = cast<CallInst>(AssumeVH);
10583 if (!DT.dominates(CI, BB))
10584 continue;
10585
10586 if (ProveViaCond(CI->getArgOperand(0), false))
10587 return true;
10588 }
10589
10590 return false;
10591 }
10592
isLoopEntryGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)10593 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
10594 ICmpInst::Predicate Pred,
10595 const SCEV *LHS,
10596 const SCEV *RHS) {
10597 // Interpret a null as meaning no loop, where there is obviously no guard
10598 // (interprocedural conditions notwithstanding).
10599 if (!L)
10600 return false;
10601
10602 // Both LHS and RHS must be available at loop entry.
10603 assert(isAvailableAtLoopEntry(LHS, L) &&
10604 "LHS is not available at Loop Entry");
10605 assert(isAvailableAtLoopEntry(RHS, L) &&
10606 "RHS is not available at Loop Entry");
10607
10608 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10609 return true;
10610
10611 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
10612 }
10613
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const Value * FoundCondValue,bool Inverse,const Instruction * CtxI)10614 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10615 const SCEV *RHS,
10616 const Value *FoundCondValue, bool Inverse,
10617 const Instruction *CtxI) {
10618 // False conditions implies anything. Do not bother analyzing it further.
10619 if (FoundCondValue ==
10620 ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
10621 return true;
10622
10623 if (!PendingLoopPredicates.insert(FoundCondValue).second)
10624 return false;
10625
10626 auto ClearOnExit =
10627 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
10628
10629 // Recursively handle And and Or conditions.
10630 const Value *Op0, *Op1;
10631 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
10632 if (!Inverse)
10633 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) ||
10634 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI);
10635 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
10636 if (Inverse)
10637 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) ||
10638 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI);
10639 }
10640
10641 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
10642 if (!ICI) return false;
10643
10644 // Now that we found a conditional branch that dominates the loop or controls
10645 // the loop latch. Check to see if it is the comparison we are looking for.
10646 ICmpInst::Predicate FoundPred;
10647 if (Inverse)
10648 FoundPred = ICI->getInversePredicate();
10649 else
10650 FoundPred = ICI->getPredicate();
10651
10652 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
10653 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
10654
10655 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
10656 }
10657
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,ICmpInst::Predicate FoundPred,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * CtxI)10658 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10659 const SCEV *RHS,
10660 ICmpInst::Predicate FoundPred,
10661 const SCEV *FoundLHS, const SCEV *FoundRHS,
10662 const Instruction *CtxI) {
10663 // Balance the types.
10664 if (getTypeSizeInBits(LHS->getType()) <
10665 getTypeSizeInBits(FoundLHS->getType())) {
10666 // For unsigned and equality predicates, try to prove that both found
10667 // operands fit into narrow unsigned range. If so, try to prove facts in
10668 // narrow types.
10669 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) {
10670 auto *NarrowType = LHS->getType();
10671 auto *WideType = FoundLHS->getType();
10672 auto BitWidth = getTypeSizeInBits(NarrowType);
10673 const SCEV *MaxValue = getZeroExtendExpr(
10674 getConstant(APInt::getMaxValue(BitWidth)), WideType);
10675 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) &&
10676 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) {
10677 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
10678 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
10679 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
10680 TruncFoundRHS, CtxI))
10681 return true;
10682 }
10683 }
10684
10685 if (LHS->getType()->isPointerTy())
10686 return false;
10687 if (CmpInst::isSigned(Pred)) {
10688 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
10689 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
10690 } else {
10691 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
10692 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
10693 }
10694 } else if (getTypeSizeInBits(LHS->getType()) >
10695 getTypeSizeInBits(FoundLHS->getType())) {
10696 if (FoundLHS->getType()->isPointerTy())
10697 return false;
10698 if (CmpInst::isSigned(FoundPred)) {
10699 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
10700 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
10701 } else {
10702 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
10703 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
10704 }
10705 }
10706 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
10707 FoundRHS, CtxI);
10708 }
10709
isImpliedCondBalancedTypes(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,ICmpInst::Predicate FoundPred,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * CtxI)10710 bool ScalarEvolution::isImpliedCondBalancedTypes(
10711 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10712 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
10713 const Instruction *CtxI) {
10714 assert(getTypeSizeInBits(LHS->getType()) ==
10715 getTypeSizeInBits(FoundLHS->getType()) &&
10716 "Types should be balanced!");
10717 // Canonicalize the query to match the way instcombine will have
10718 // canonicalized the comparison.
10719 if (SimplifyICmpOperands(Pred, LHS, RHS))
10720 if (LHS == RHS)
10721 return CmpInst::isTrueWhenEqual(Pred);
10722 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
10723 if (FoundLHS == FoundRHS)
10724 return CmpInst::isFalseWhenEqual(FoundPred);
10725
10726 // Check to see if we can make the LHS or RHS match.
10727 if (LHS == FoundRHS || RHS == FoundLHS) {
10728 if (isa<SCEVConstant>(RHS)) {
10729 std::swap(FoundLHS, FoundRHS);
10730 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
10731 } else {
10732 std::swap(LHS, RHS);
10733 Pred = ICmpInst::getSwappedPredicate(Pred);
10734 }
10735 }
10736
10737 // Check whether the found predicate is the same as the desired predicate.
10738 if (FoundPred == Pred)
10739 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
10740
10741 // Check whether swapping the found predicate makes it the same as the
10742 // desired predicate.
10743 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
10744 // We can write the implication
10745 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS
10746 // using one of the following ways:
10747 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS
10748 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS
10749 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS
10750 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS
10751 // Forms 1. and 2. require swapping the operands of one condition. Don't
10752 // do this if it would break canonical constant/addrec ordering.
10753 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
10754 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
10755 CtxI);
10756 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
10757 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI);
10758
10759 // There's no clear preference between forms 3. and 4., try both. Avoid
10760 // forming getNotSCEV of pointer values as the resulting subtract is
10761 // not legal.
10762 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() &&
10763 isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS),
10764 FoundLHS, FoundRHS, CtxI))
10765 return true;
10766
10767 if (!FoundLHS->getType()->isPointerTy() &&
10768 !FoundRHS->getType()->isPointerTy() &&
10769 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS),
10770 getNotSCEV(FoundRHS), CtxI))
10771 return true;
10772
10773 return false;
10774 }
10775
10776 // Unsigned comparison is the same as signed comparison when both the operands
10777 // are non-negative or negative.
10778 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1,
10779 CmpInst::Predicate P2) {
10780 assert(P1 != P2 && "Handled earlier!");
10781 return CmpInst::isRelational(P2) &&
10782 P1 == CmpInst::getFlippedSignednessPredicate(P2);
10783 };
10784 if (IsSignFlippedPredicate(Pred, FoundPred) &&
10785 ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) ||
10786 (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))))
10787 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
10788
10789 // Check if we can make progress by sharpening ranges.
10790 if (FoundPred == ICmpInst::ICMP_NE &&
10791 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
10792
10793 const SCEVConstant *C = nullptr;
10794 const SCEV *V = nullptr;
10795
10796 if (isa<SCEVConstant>(FoundLHS)) {
10797 C = cast<SCEVConstant>(FoundLHS);
10798 V = FoundRHS;
10799 } else {
10800 C = cast<SCEVConstant>(FoundRHS);
10801 V = FoundLHS;
10802 }
10803
10804 // The guarding predicate tells us that C != V. If the known range
10805 // of V is [C, t), we can sharpen the range to [C + 1, t). The
10806 // range we consider has to correspond to same signedness as the
10807 // predicate we're interested in folding.
10808
10809 APInt Min = ICmpInst::isSigned(Pred) ?
10810 getSignedRangeMin(V) : getUnsignedRangeMin(V);
10811
10812 if (Min == C->getAPInt()) {
10813 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
10814 // This is true even if (Min + 1) wraps around -- in case of
10815 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
10816
10817 APInt SharperMin = Min + 1;
10818
10819 switch (Pred) {
10820 case ICmpInst::ICMP_SGE:
10821 case ICmpInst::ICMP_UGE:
10822 // We know V `Pred` SharperMin. If this implies LHS `Pred`
10823 // RHS, we're done.
10824 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
10825 CtxI))
10826 return true;
10827 LLVM_FALLTHROUGH;
10828
10829 case ICmpInst::ICMP_SGT:
10830 case ICmpInst::ICMP_UGT:
10831 // We know from the range information that (V `Pred` Min ||
10832 // V == Min). We know from the guarding condition that !(V
10833 // == Min). This gives us
10834 //
10835 // V `Pred` Min || V == Min && !(V == Min)
10836 // => V `Pred` Min
10837 //
10838 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
10839
10840 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
10841 return true;
10842 break;
10843
10844 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
10845 case ICmpInst::ICMP_SLE:
10846 case ICmpInst::ICMP_ULE:
10847 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10848 LHS, V, getConstant(SharperMin), CtxI))
10849 return true;
10850 LLVM_FALLTHROUGH;
10851
10852 case ICmpInst::ICMP_SLT:
10853 case ICmpInst::ICMP_ULT:
10854 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10855 LHS, V, getConstant(Min), CtxI))
10856 return true;
10857 break;
10858
10859 default:
10860 // No change
10861 break;
10862 }
10863 }
10864 }
10865
10866 // Check whether the actual condition is beyond sufficient.
10867 if (FoundPred == ICmpInst::ICMP_EQ)
10868 if (ICmpInst::isTrueWhenEqual(Pred))
10869 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
10870 return true;
10871 if (Pred == ICmpInst::ICMP_NE)
10872 if (!ICmpInst::isTrueWhenEqual(FoundPred))
10873 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
10874 return true;
10875
10876 // Otherwise assume the worst.
10877 return false;
10878 }
10879
splitBinaryAdd(const SCEV * Expr,const SCEV * & L,const SCEV * & R,SCEV::NoWrapFlags & Flags)10880 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
10881 const SCEV *&L, const SCEV *&R,
10882 SCEV::NoWrapFlags &Flags) {
10883 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
10884 if (!AE || AE->getNumOperands() != 2)
10885 return false;
10886
10887 L = AE->getOperand(0);
10888 R = AE->getOperand(1);
10889 Flags = AE->getNoWrapFlags();
10890 return true;
10891 }
10892
computeConstantDifference(const SCEV * More,const SCEV * Less)10893 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
10894 const SCEV *Less) {
10895 // We avoid subtracting expressions here because this function is usually
10896 // fairly deep in the call stack (i.e. is called many times).
10897
10898 // X - X = 0.
10899 if (More == Less)
10900 return APInt(getTypeSizeInBits(More->getType()), 0);
10901
10902 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
10903 const auto *LAR = cast<SCEVAddRecExpr>(Less);
10904 const auto *MAR = cast<SCEVAddRecExpr>(More);
10905
10906 if (LAR->getLoop() != MAR->getLoop())
10907 return None;
10908
10909 // We look at affine expressions only; not for correctness but to keep
10910 // getStepRecurrence cheap.
10911 if (!LAR->isAffine() || !MAR->isAffine())
10912 return None;
10913
10914 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
10915 return None;
10916
10917 Less = LAR->getStart();
10918 More = MAR->getStart();
10919
10920 // fall through
10921 }
10922
10923 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
10924 const auto &M = cast<SCEVConstant>(More)->getAPInt();
10925 const auto &L = cast<SCEVConstant>(Less)->getAPInt();
10926 return M - L;
10927 }
10928
10929 SCEV::NoWrapFlags Flags;
10930 const SCEV *LLess = nullptr, *RLess = nullptr;
10931 const SCEV *LMore = nullptr, *RMore = nullptr;
10932 const SCEVConstant *C1 = nullptr, *C2 = nullptr;
10933 // Compare (X + C1) vs X.
10934 if (splitBinaryAdd(Less, LLess, RLess, Flags))
10935 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
10936 if (RLess == More)
10937 return -(C1->getAPInt());
10938
10939 // Compare X vs (X + C2).
10940 if (splitBinaryAdd(More, LMore, RMore, Flags))
10941 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
10942 if (RMore == Less)
10943 return C2->getAPInt();
10944
10945 // Compare (X + C1) vs (X + C2).
10946 if (C1 && C2 && RLess == RMore)
10947 return C2->getAPInt() - C1->getAPInt();
10948
10949 return None;
10950 }
10951
isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * CtxI)10952 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
10953 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10954 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) {
10955 // Try to recognize the following pattern:
10956 //
10957 // FoundRHS = ...
10958 // ...
10959 // loop:
10960 // FoundLHS = {Start,+,W}
10961 // context_bb: // Basic block from the same loop
10962 // known(Pred, FoundLHS, FoundRHS)
10963 //
10964 // If some predicate is known in the context of a loop, it is also known on
10965 // each iteration of this loop, including the first iteration. Therefore, in
10966 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
10967 // prove the original pred using this fact.
10968 if (!CtxI)
10969 return false;
10970 const BasicBlock *ContextBB = CtxI->getParent();
10971 // Make sure AR varies in the context block.
10972 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
10973 const Loop *L = AR->getLoop();
10974 // Make sure that context belongs to the loop and executes on 1st iteration
10975 // (if it ever executes at all).
10976 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10977 return false;
10978 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
10979 return false;
10980 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
10981 }
10982
10983 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
10984 const Loop *L = AR->getLoop();
10985 // Make sure that context belongs to the loop and executes on 1st iteration
10986 // (if it ever executes at all).
10987 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10988 return false;
10989 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
10990 return false;
10991 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
10992 }
10993
10994 return false;
10995 }
10996
isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)10997 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
10998 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10999 const SCEV *FoundLHS, const SCEV *FoundRHS) {
11000 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
11001 return false;
11002
11003 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
11004 if (!AddRecLHS)
11005 return false;
11006
11007 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
11008 if (!AddRecFoundLHS)
11009 return false;
11010
11011 // We'd like to let SCEV reason about control dependencies, so we constrain
11012 // both the inequalities to be about add recurrences on the same loop. This
11013 // way we can use isLoopEntryGuardedByCond later.
11014
11015 const Loop *L = AddRecFoundLHS->getLoop();
11016 if (L != AddRecLHS->getLoop())
11017 return false;
11018
11019 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
11020 //
11021 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
11022 // ... (2)
11023 //
11024 // Informal proof for (2), assuming (1) [*]:
11025 //
11026 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
11027 //
11028 // Then
11029 //
11030 // FoundLHS s< FoundRHS s< INT_MIN - C
11031 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
11032 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
11033 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
11034 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
11035 // <=> FoundLHS + C s< FoundRHS + C
11036 //
11037 // [*]: (1) can be proved by ruling out overflow.
11038 //
11039 // [**]: This can be proved by analyzing all the four possibilities:
11040 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
11041 // (A s>= 0, B s>= 0).
11042 //
11043 // Note:
11044 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
11045 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
11046 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
11047 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
11048 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
11049 // C)".
11050
11051 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
11052 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
11053 if (!LDiff || !RDiff || *LDiff != *RDiff)
11054 return false;
11055
11056 if (LDiff->isMinValue())
11057 return true;
11058
11059 APInt FoundRHSLimit;
11060
11061 if (Pred == CmpInst::ICMP_ULT) {
11062 FoundRHSLimit = -(*RDiff);
11063 } else {
11064 assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
11065 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
11066 }
11067
11068 // Try to prove (1) or (2), as needed.
11069 return isAvailableAtLoopEntry(FoundRHS, L) &&
11070 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
11071 getConstant(FoundRHSLimit));
11072 }
11073
isImpliedViaMerge(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,unsigned Depth)11074 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
11075 const SCEV *LHS, const SCEV *RHS,
11076 const SCEV *FoundLHS,
11077 const SCEV *FoundRHS, unsigned Depth) {
11078 const PHINode *LPhi = nullptr, *RPhi = nullptr;
11079
11080 auto ClearOnExit = make_scope_exit([&]() {
11081 if (LPhi) {
11082 bool Erased = PendingMerges.erase(LPhi);
11083 assert(Erased && "Failed to erase LPhi!");
11084 (void)Erased;
11085 }
11086 if (RPhi) {
11087 bool Erased = PendingMerges.erase(RPhi);
11088 assert(Erased && "Failed to erase RPhi!");
11089 (void)Erased;
11090 }
11091 });
11092
11093 // Find respective Phis and check that they are not being pending.
11094 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
11095 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
11096 if (!PendingMerges.insert(Phi).second)
11097 return false;
11098 LPhi = Phi;
11099 }
11100 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
11101 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
11102 // If we detect a loop of Phi nodes being processed by this method, for
11103 // example:
11104 //
11105 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
11106 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
11107 //
11108 // we don't want to deal with a case that complex, so return conservative
11109 // answer false.
11110 if (!PendingMerges.insert(Phi).second)
11111 return false;
11112 RPhi = Phi;
11113 }
11114
11115 // If none of LHS, RHS is a Phi, nothing to do here.
11116 if (!LPhi && !RPhi)
11117 return false;
11118
11119 // If there is a SCEVUnknown Phi we are interested in, make it left.
11120 if (!LPhi) {
11121 std::swap(LHS, RHS);
11122 std::swap(FoundLHS, FoundRHS);
11123 std::swap(LPhi, RPhi);
11124 Pred = ICmpInst::getSwappedPredicate(Pred);
11125 }
11126
11127 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
11128 const BasicBlock *LBB = LPhi->getParent();
11129 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11130
11131 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
11132 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
11133 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
11134 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
11135 };
11136
11137 if (RPhi && RPhi->getParent() == LBB) {
11138 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
11139 // If we compare two Phis from the same block, and for each entry block
11140 // the predicate is true for incoming values from this block, then the
11141 // predicate is also true for the Phis.
11142 for (const BasicBlock *IncBB : predecessors(LBB)) {
11143 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11144 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
11145 if (!ProvedEasily(L, R))
11146 return false;
11147 }
11148 } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
11149 // Case two: RHS is also a Phi from the same basic block, and it is an
11150 // AddRec. It means that there is a loop which has both AddRec and Unknown
11151 // PHIs, for it we can compare incoming values of AddRec from above the loop
11152 // and latch with their respective incoming values of LPhi.
11153 // TODO: Generalize to handle loops with many inputs in a header.
11154 if (LPhi->getNumIncomingValues() != 2) return false;
11155
11156 auto *RLoop = RAR->getLoop();
11157 auto *Predecessor = RLoop->getLoopPredecessor();
11158 assert(Predecessor && "Loop with AddRec with no predecessor?");
11159 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
11160 if (!ProvedEasily(L1, RAR->getStart()))
11161 return false;
11162 auto *Latch = RLoop->getLoopLatch();
11163 assert(Latch && "Loop with AddRec with no latch?");
11164 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
11165 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
11166 return false;
11167 } else {
11168 // In all other cases go over inputs of LHS and compare each of them to RHS,
11169 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
11170 // At this point RHS is either a non-Phi, or it is a Phi from some block
11171 // different from LBB.
11172 for (const BasicBlock *IncBB : predecessors(LBB)) {
11173 // Check that RHS is available in this block.
11174 if (!dominates(RHS, IncBB))
11175 return false;
11176 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11177 // Make sure L does not refer to a value from a potentially previous
11178 // iteration of a loop.
11179 if (!properlyDominates(L, IncBB))
11180 return false;
11181 if (!ProvedEasily(L, RHS))
11182 return false;
11183 }
11184 }
11185 return true;
11186 }
11187
isImpliedCondOperands(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,const Instruction * CtxI)11188 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
11189 const SCEV *LHS, const SCEV *RHS,
11190 const SCEV *FoundLHS,
11191 const SCEV *FoundRHS,
11192 const Instruction *CtxI) {
11193 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
11194 return true;
11195
11196 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
11197 return true;
11198
11199 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
11200 CtxI))
11201 return true;
11202
11203 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
11204 FoundLHS, FoundRHS);
11205 }
11206
11207 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
11208 template <typename MinMaxExprType>
IsMinMaxConsistingOf(const SCEV * MaybeMinMaxExpr,const SCEV * Candidate)11209 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
11210 const SCEV *Candidate) {
11211 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
11212 if (!MinMaxExpr)
11213 return false;
11214
11215 return is_contained(MinMaxExpr->operands(), Candidate);
11216 }
11217
IsKnownPredicateViaAddRecStart(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)11218 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
11219 ICmpInst::Predicate Pred,
11220 const SCEV *LHS, const SCEV *RHS) {
11221 // If both sides are affine addrecs for the same loop, with equal
11222 // steps, and we know the recurrences don't wrap, then we only
11223 // need to check the predicate on the starting values.
11224
11225 if (!ICmpInst::isRelational(Pred))
11226 return false;
11227
11228 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
11229 if (!LAR)
11230 return false;
11231 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11232 if (!RAR)
11233 return false;
11234 if (LAR->getLoop() != RAR->getLoop())
11235 return false;
11236 if (!LAR->isAffine() || !RAR->isAffine())
11237 return false;
11238
11239 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
11240 return false;
11241
11242 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
11243 SCEV::FlagNSW : SCEV::FlagNUW;
11244 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
11245 return false;
11246
11247 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
11248 }
11249
11250 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
11251 /// expression?
IsKnownPredicateViaMinOrMax(ScalarEvolution & SE,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)11252 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
11253 ICmpInst::Predicate Pred,
11254 const SCEV *LHS, const SCEV *RHS) {
11255 switch (Pred) {
11256 default:
11257 return false;
11258
11259 case ICmpInst::ICMP_SGE:
11260 std::swap(LHS, RHS);
11261 LLVM_FALLTHROUGH;
11262 case ICmpInst::ICMP_SLE:
11263 return
11264 // min(A, ...) <= A
11265 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
11266 // A <= max(A, ...)
11267 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
11268
11269 case ICmpInst::ICMP_UGE:
11270 std::swap(LHS, RHS);
11271 LLVM_FALLTHROUGH;
11272 case ICmpInst::ICMP_ULE:
11273 return
11274 // min(A, ...) <= A
11275 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
11276 // A <= max(A, ...)
11277 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
11278 }
11279
11280 llvm_unreachable("covered switch fell through?!");
11281 }
11282
isImpliedViaOperations(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS,unsigned Depth)11283 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
11284 const SCEV *LHS, const SCEV *RHS,
11285 const SCEV *FoundLHS,
11286 const SCEV *FoundRHS,
11287 unsigned Depth) {
11288 assert(getTypeSizeInBits(LHS->getType()) ==
11289 getTypeSizeInBits(RHS->getType()) &&
11290 "LHS and RHS have different sizes?");
11291 assert(getTypeSizeInBits(FoundLHS->getType()) ==
11292 getTypeSizeInBits(FoundRHS->getType()) &&
11293 "FoundLHS and FoundRHS have different sizes?");
11294 // We want to avoid hurting the compile time with analysis of too big trees.
11295 if (Depth > MaxSCEVOperationsImplicationDepth)
11296 return false;
11297
11298 // We only want to work with GT comparison so far.
11299 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) {
11300 Pred = CmpInst::getSwappedPredicate(Pred);
11301 std::swap(LHS, RHS);
11302 std::swap(FoundLHS, FoundRHS);
11303 }
11304
11305 // For unsigned, try to reduce it to corresponding signed comparison.
11306 if (Pred == ICmpInst::ICMP_UGT)
11307 // We can replace unsigned predicate with its signed counterpart if all
11308 // involved values are non-negative.
11309 // TODO: We could have better support for unsigned.
11310 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
11311 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
11312 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
11313 // use this fact to prove that LHS and RHS are non-negative.
11314 const SCEV *MinusOne = getMinusOne(LHS->getType());
11315 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
11316 FoundRHS) &&
11317 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
11318 FoundRHS))
11319 Pred = ICmpInst::ICMP_SGT;
11320 }
11321
11322 if (Pred != ICmpInst::ICMP_SGT)
11323 return false;
11324
11325 auto GetOpFromSExt = [&](const SCEV *S) {
11326 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
11327 return Ext->getOperand();
11328 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
11329 // the constant in some cases.
11330 return S;
11331 };
11332
11333 // Acquire values from extensions.
11334 auto *OrigLHS = LHS;
11335 auto *OrigFoundLHS = FoundLHS;
11336 LHS = GetOpFromSExt(LHS);
11337 FoundLHS = GetOpFromSExt(FoundLHS);
11338
11339 // Is the SGT predicate can be proved trivially or using the found context.
11340 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
11341 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
11342 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
11343 FoundRHS, Depth + 1);
11344 };
11345
11346 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
11347 // We want to avoid creation of any new non-constant SCEV. Since we are
11348 // going to compare the operands to RHS, we should be certain that we don't
11349 // need any size extensions for this. So let's decline all cases when the
11350 // sizes of types of LHS and RHS do not match.
11351 // TODO: Maybe try to get RHS from sext to catch more cases?
11352 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
11353 return false;
11354
11355 // Should not overflow.
11356 if (!LHSAddExpr->hasNoSignedWrap())
11357 return false;
11358
11359 auto *LL = LHSAddExpr->getOperand(0);
11360 auto *LR = LHSAddExpr->getOperand(1);
11361 auto *MinusOne = getMinusOne(RHS->getType());
11362
11363 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
11364 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
11365 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
11366 };
11367 // Try to prove the following rule:
11368 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
11369 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
11370 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
11371 return true;
11372 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
11373 Value *LL, *LR;
11374 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
11375
11376 using namespace llvm::PatternMatch;
11377
11378 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
11379 // Rules for division.
11380 // We are going to perform some comparisons with Denominator and its
11381 // derivative expressions. In general case, creating a SCEV for it may
11382 // lead to a complex analysis of the entire graph, and in particular it
11383 // can request trip count recalculation for the same loop. This would
11384 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
11385 // this, we only want to create SCEVs that are constants in this section.
11386 // So we bail if Denominator is not a constant.
11387 if (!isa<ConstantInt>(LR))
11388 return false;
11389
11390 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
11391
11392 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
11393 // then a SCEV for the numerator already exists and matches with FoundLHS.
11394 auto *Numerator = getExistingSCEV(LL);
11395 if (!Numerator || Numerator->getType() != FoundLHS->getType())
11396 return false;
11397
11398 // Make sure that the numerator matches with FoundLHS and the denominator
11399 // is positive.
11400 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
11401 return false;
11402
11403 auto *DTy = Denominator->getType();
11404 auto *FRHSTy = FoundRHS->getType();
11405 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
11406 // One of types is a pointer and another one is not. We cannot extend
11407 // them properly to a wider type, so let us just reject this case.
11408 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
11409 // to avoid this check.
11410 return false;
11411
11412 // Given that:
11413 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
11414 auto *WTy = getWiderType(DTy, FRHSTy);
11415 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
11416 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
11417
11418 // Try to prove the following rule:
11419 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
11420 // For example, given that FoundLHS > 2. It means that FoundLHS is at
11421 // least 3. If we divide it by Denominator < 4, we will have at least 1.
11422 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
11423 if (isKnownNonPositive(RHS) &&
11424 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
11425 return true;
11426
11427 // Try to prove the following rule:
11428 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
11429 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
11430 // If we divide it by Denominator > 2, then:
11431 // 1. If FoundLHS is negative, then the result is 0.
11432 // 2. If FoundLHS is non-negative, then the result is non-negative.
11433 // Anyways, the result is non-negative.
11434 auto *MinusOne = getMinusOne(WTy);
11435 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
11436 if (isKnownNegative(RHS) &&
11437 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
11438 return true;
11439 }
11440 }
11441
11442 // If our expression contained SCEVUnknown Phis, and we split it down and now
11443 // need to prove something for them, try to prove the predicate for every
11444 // possible incoming values of those Phis.
11445 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
11446 return true;
11447
11448 return false;
11449 }
11450
isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)11451 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
11452 const SCEV *LHS, const SCEV *RHS) {
11453 // zext x u<= sext x, sext x s<= zext x
11454 switch (Pred) {
11455 case ICmpInst::ICMP_SGE:
11456 std::swap(LHS, RHS);
11457 LLVM_FALLTHROUGH;
11458 case ICmpInst::ICMP_SLE: {
11459 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
11460 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
11461 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
11462 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11463 return true;
11464 break;
11465 }
11466 case ICmpInst::ICMP_UGE:
11467 std::swap(LHS, RHS);
11468 LLVM_FALLTHROUGH;
11469 case ICmpInst::ICMP_ULE: {
11470 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
11471 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
11472 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
11473 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11474 return true;
11475 break;
11476 }
11477 default:
11478 break;
11479 };
11480 return false;
11481 }
11482
11483 bool
isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)11484 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
11485 const SCEV *LHS, const SCEV *RHS) {
11486 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
11487 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
11488 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
11489 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
11490 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
11491 }
11492
11493 bool
isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)11494 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
11495 const SCEV *LHS, const SCEV *RHS,
11496 const SCEV *FoundLHS,
11497 const SCEV *FoundRHS) {
11498 switch (Pred) {
11499 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
11500 case ICmpInst::ICMP_EQ:
11501 case ICmpInst::ICMP_NE:
11502 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
11503 return true;
11504 break;
11505 case ICmpInst::ICMP_SLT:
11506 case ICmpInst::ICMP_SLE:
11507 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
11508 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
11509 return true;
11510 break;
11511 case ICmpInst::ICMP_SGT:
11512 case ICmpInst::ICMP_SGE:
11513 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
11514 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
11515 return true;
11516 break;
11517 case ICmpInst::ICMP_ULT:
11518 case ICmpInst::ICMP_ULE:
11519 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
11520 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
11521 return true;
11522 break;
11523 case ICmpInst::ICMP_UGT:
11524 case ICmpInst::ICMP_UGE:
11525 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
11526 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
11527 return true;
11528 break;
11529 }
11530
11531 // Maybe it can be proved via operations?
11532 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
11533 return true;
11534
11535 return false;
11536 }
11537
isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)11538 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
11539 const SCEV *LHS,
11540 const SCEV *RHS,
11541 const SCEV *FoundLHS,
11542 const SCEV *FoundRHS) {
11543 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
11544 // The restriction on `FoundRHS` be lifted easily -- it exists only to
11545 // reduce the compile time impact of this optimization.
11546 return false;
11547
11548 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
11549 if (!Addend)
11550 return false;
11551
11552 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
11553
11554 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
11555 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
11556 ConstantRange FoundLHSRange =
11557 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS);
11558
11559 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
11560 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
11561
11562 // We can also compute the range of values for `LHS` that satisfy the
11563 // consequent, "`LHS` `Pred` `RHS`":
11564 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
11565 // The antecedent implies the consequent if every value of `LHS` that
11566 // satisfies the antecedent also satisfies the consequent.
11567 return LHSRange.icmp(Pred, ConstRHS);
11568 }
11569
canIVOverflowOnLT(const SCEV * RHS,const SCEV * Stride,bool IsSigned)11570 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
11571 bool IsSigned) {
11572 assert(isKnownPositive(Stride) && "Positive stride expected!");
11573
11574 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11575 const SCEV *One = getOne(Stride->getType());
11576
11577 if (IsSigned) {
11578 APInt MaxRHS = getSignedRangeMax(RHS);
11579 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
11580 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11581
11582 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
11583 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
11584 }
11585
11586 APInt MaxRHS = getUnsignedRangeMax(RHS);
11587 APInt MaxValue = APInt::getMaxValue(BitWidth);
11588 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11589
11590 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
11591 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
11592 }
11593
canIVOverflowOnGT(const SCEV * RHS,const SCEV * Stride,bool IsSigned)11594 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
11595 bool IsSigned) {
11596
11597 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11598 const SCEV *One = getOne(Stride->getType());
11599
11600 if (IsSigned) {
11601 APInt MinRHS = getSignedRangeMin(RHS);
11602 APInt MinValue = APInt::getSignedMinValue(BitWidth);
11603 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11604
11605 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
11606 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
11607 }
11608
11609 APInt MinRHS = getUnsignedRangeMin(RHS);
11610 APInt MinValue = APInt::getMinValue(BitWidth);
11611 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11612
11613 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
11614 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
11615 }
11616
getUDivCeilSCEV(const SCEV * N,const SCEV * D)11617 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) {
11618 // umin(N, 1) + floor((N - umin(N, 1)) / D)
11619 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
11620 // expression fixes the case of N=0.
11621 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
11622 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
11623 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
11624 }
11625
computeMaxBECountForLT(const SCEV * Start,const SCEV * Stride,const SCEV * End,unsigned BitWidth,bool IsSigned)11626 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
11627 const SCEV *Stride,
11628 const SCEV *End,
11629 unsigned BitWidth,
11630 bool IsSigned) {
11631 // The logic in this function assumes we can represent a positive stride.
11632 // If we can't, the backedge-taken count must be zero.
11633 if (IsSigned && BitWidth == 1)
11634 return getZero(Stride->getType());
11635
11636 // This code has only been closely audited for negative strides in the
11637 // unsigned comparison case, it may be correct for signed comparison, but
11638 // that needs to be established.
11639 assert((!IsSigned || !isKnownNonPositive(Stride)) &&
11640 "Stride is expected strictly positive for signed case!");
11641
11642 // Calculate the maximum backedge count based on the range of values
11643 // permitted by Start, End, and Stride.
11644 APInt MinStart =
11645 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
11646
11647 APInt MinStride =
11648 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
11649
11650 // We assume either the stride is positive, or the backedge-taken count
11651 // is zero. So force StrideForMaxBECount to be at least one.
11652 APInt One(BitWidth, 1);
11653 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride)
11654 : APIntOps::umax(One, MinStride);
11655
11656 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
11657 : APInt::getMaxValue(BitWidth);
11658 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
11659
11660 // Although End can be a MAX expression we estimate MaxEnd considering only
11661 // the case End = RHS of the loop termination condition. This is safe because
11662 // in the other case (End - Start) is zero, leading to a zero maximum backedge
11663 // taken count.
11664 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
11665 : APIntOps::umin(getUnsignedRangeMax(End), Limit);
11666
11667 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
11668 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart)
11669 : APIntOps::umax(MaxEnd, MinStart);
11670
11671 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */,
11672 getConstant(StrideForMaxBECount) /* Step */);
11673 }
11674
11675 ScalarEvolution::ExitLimit
howManyLessThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit,bool AllowPredicates)11676 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
11677 const Loop *L, bool IsSigned,
11678 bool ControlsExit, bool AllowPredicates) {
11679 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11680
11681 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11682 bool PredicatedIV = false;
11683
11684 auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) {
11685 // Can we prove this loop *must* be UB if overflow of IV occurs?
11686 // Reasoning goes as follows:
11687 // * Suppose the IV did self wrap.
11688 // * If Stride evenly divides the iteration space, then once wrap
11689 // occurs, the loop must revisit the same values.
11690 // * We know that RHS is invariant, and that none of those values
11691 // caused this exit to be taken previously. Thus, this exit is
11692 // dynamically dead.
11693 // * If this is the sole exit, then a dead exit implies the loop
11694 // must be infinite if there are no abnormal exits.
11695 // * If the loop were infinite, then it must either not be mustprogress
11696 // or have side effects. Otherwise, it must be UB.
11697 // * It can't (by assumption), be UB so we have contradicted our
11698 // premise and can conclude the IV did not in fact self-wrap.
11699 if (!isLoopInvariant(RHS, L))
11700 return false;
11701
11702 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this));
11703 if (!StrideC || !StrideC->getAPInt().isPowerOf2())
11704 return false;
11705
11706 if (!ControlsExit || !loopHasNoAbnormalExits(L))
11707 return false;
11708
11709 return loopIsFiniteByAssumption(L);
11710 };
11711
11712 if (!IV) {
11713 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) {
11714 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand());
11715 if (AR && AR->getLoop() == L && AR->isAffine()) {
11716 auto Flags = AR->getNoWrapFlags();
11717 if (!hasFlags(Flags, SCEV::FlagNW) && canAssumeNoSelfWrap(AR)) {
11718 Flags = setFlags(Flags, SCEV::FlagNW);
11719
11720 SmallVector<const SCEV*> Operands{AR->operands()};
11721 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
11722
11723 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags);
11724 }
11725 if (AR->hasNoUnsignedWrap()) {
11726 // Emulate what getZeroExtendExpr would have done during construction
11727 // if we'd been able to infer the fact just above at that time.
11728 const SCEV *Step = AR->getStepRecurrence(*this);
11729 Type *Ty = ZExt->getType();
11730 auto *S = getAddRecExpr(
11731 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0),
11732 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags());
11733 IV = dyn_cast<SCEVAddRecExpr>(S);
11734 }
11735 }
11736 }
11737 }
11738
11739
11740 if (!IV && AllowPredicates) {
11741 // Try to make this an AddRec using runtime tests, in the first X
11742 // iterations of this loop, where X is the SCEV expression found by the
11743 // algorithm below.
11744 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11745 PredicatedIV = true;
11746 }
11747
11748 // Avoid weird loops
11749 if (!IV || IV->getLoop() != L || !IV->isAffine())
11750 return getCouldNotCompute();
11751
11752 // A precondition of this method is that the condition being analyzed
11753 // reaches an exiting branch which dominates the latch. Given that, we can
11754 // assume that an increment which violates the nowrap specification and
11755 // produces poison must cause undefined behavior when the resulting poison
11756 // value is branched upon and thus we can conclude that the backedge is
11757 // taken no more often than would be required to produce that poison value.
11758 // Note that a well defined loop can exit on the iteration which violates
11759 // the nowrap specification if there is another exit (either explicit or
11760 // implicit/exceptional) which causes the loop to execute before the
11761 // exiting instruction we're analyzing would trigger UB.
11762 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
11763 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
11764 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11765
11766 const SCEV *Stride = IV->getStepRecurrence(*this);
11767
11768 bool PositiveStride = isKnownPositive(Stride);
11769
11770 // Avoid negative or zero stride values.
11771 if (!PositiveStride) {
11772 // We can compute the correct backedge taken count for loops with unknown
11773 // strides if we can prove that the loop is not an infinite loop with side
11774 // effects. Here's the loop structure we are trying to handle -
11775 //
11776 // i = start
11777 // do {
11778 // A[i] = i;
11779 // i += s;
11780 // } while (i < end);
11781 //
11782 // The backedge taken count for such loops is evaluated as -
11783 // (max(end, start + stride) - start - 1) /u stride
11784 //
11785 // The additional preconditions that we need to check to prove correctness
11786 // of the above formula is as follows -
11787 //
11788 // a) IV is either nuw or nsw depending upon signedness (indicated by the
11789 // NoWrap flag).
11790 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has
11791 // no side effects within the loop)
11792 // c) loop has a single static exit (with no abnormal exits)
11793 //
11794 // Precondition a) implies that if the stride is negative, this is a single
11795 // trip loop. The backedge taken count formula reduces to zero in this case.
11796 //
11797 // Precondition b) and c) combine to imply that if rhs is invariant in L,
11798 // then a zero stride means the backedge can't be taken without executing
11799 // undefined behavior.
11800 //
11801 // The positive stride case is the same as isKnownPositive(Stride) returning
11802 // true (original behavior of the function).
11803 //
11804 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) ||
11805 !loopHasNoAbnormalExits(L))
11806 return getCouldNotCompute();
11807
11808 // This bailout is protecting the logic in computeMaxBECountForLT which
11809 // has not yet been sufficiently auditted or tested with negative strides.
11810 // We used to filter out all known-non-positive cases here, we're in the
11811 // process of being less restrictive bit by bit.
11812 if (IsSigned && isKnownNonPositive(Stride))
11813 return getCouldNotCompute();
11814
11815 if (!isKnownNonZero(Stride)) {
11816 // If we have a step of zero, and RHS isn't invariant in L, we don't know
11817 // if it might eventually be greater than start and if so, on which
11818 // iteration. We can't even produce a useful upper bound.
11819 if (!isLoopInvariant(RHS, L))
11820 return getCouldNotCompute();
11821
11822 // We allow a potentially zero stride, but we need to divide by stride
11823 // below. Since the loop can't be infinite and this check must control
11824 // the sole exit, we can infer the exit must be taken on the first
11825 // iteration (e.g. backedge count = 0) if the stride is zero. Given that,
11826 // we know the numerator in the divides below must be zero, so we can
11827 // pick an arbitrary non-zero value for the denominator (e.g. stride)
11828 // and produce the right result.
11829 // FIXME: Handle the case where Stride is poison?
11830 auto wouldZeroStrideBeUB = [&]() {
11831 // Proof by contradiction. Suppose the stride were zero. If we can
11832 // prove that the backedge *is* taken on the first iteration, then since
11833 // we know this condition controls the sole exit, we must have an
11834 // infinite loop. We can't have a (well defined) infinite loop per
11835 // check just above.
11836 // Note: The (Start - Stride) term is used to get the start' term from
11837 // (start' + stride,+,stride). Remember that we only care about the
11838 // result of this expression when stride == 0 at runtime.
11839 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
11840 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
11841 };
11842 if (!wouldZeroStrideBeUB()) {
11843 Stride = getUMaxExpr(Stride, getOne(Stride->getType()));
11844 }
11845 }
11846 } else if (!Stride->isOne() && !NoWrap) {
11847 auto isUBOnWrap = [&]() {
11848 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This
11849 // follows trivially from the fact that every (un)signed-wrapped, but
11850 // not self-wrapped value must be LT than the last value before
11851 // (un)signed wrap. Since we know that last value didn't exit, nor
11852 // will any smaller one.
11853 return canAssumeNoSelfWrap(IV);
11854 };
11855
11856 // Avoid proven overflow cases: this will ensure that the backedge taken
11857 // count will not generate any unsigned overflow. Relaxed no-overflow
11858 // conditions exploit NoWrapFlags, allowing to optimize in presence of
11859 // undefined behaviors like the case of C language.
11860 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap())
11861 return getCouldNotCompute();
11862 }
11863
11864 // On all paths just preceeding, we established the following invariant:
11865 // IV can be assumed not to overflow up to and including the exiting
11866 // iteration. We proved this in one of two ways:
11867 // 1) We can show overflow doesn't occur before the exiting iteration
11868 // 1a) canIVOverflowOnLT, and b) step of one
11869 // 2) We can show that if overflow occurs, the loop must execute UB
11870 // before any possible exit.
11871 // Note that we have not yet proved RHS invariant (in general).
11872
11873 const SCEV *Start = IV->getStart();
11874
11875 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
11876 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases.
11877 // Use integer-typed versions for actual computation; we can't subtract
11878 // pointers in general.
11879 const SCEV *OrigStart = Start;
11880 const SCEV *OrigRHS = RHS;
11881 if (Start->getType()->isPointerTy()) {
11882 Start = getLosslessPtrToIntExpr(Start);
11883 if (isa<SCEVCouldNotCompute>(Start))
11884 return Start;
11885 }
11886 if (RHS->getType()->isPointerTy()) {
11887 RHS = getLosslessPtrToIntExpr(RHS);
11888 if (isa<SCEVCouldNotCompute>(RHS))
11889 return RHS;
11890 }
11891
11892 // When the RHS is not invariant, we do not know the end bound of the loop and
11893 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
11894 // calculate the MaxBECount, given the start, stride and max value for the end
11895 // bound of the loop (RHS), and the fact that IV does not overflow (which is
11896 // checked above).
11897 if (!isLoopInvariant(RHS, L)) {
11898 const SCEV *MaxBECount = computeMaxBECountForLT(
11899 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11900 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
11901 false /*MaxOrZero*/, Predicates);
11902 }
11903
11904 // We use the expression (max(End,Start)-Start)/Stride to describe the
11905 // backedge count, as if the backedge is taken at least once max(End,Start)
11906 // is End and so the result is as above, and if not max(End,Start) is Start
11907 // so we get a backedge count of zero.
11908 const SCEV *BECount = nullptr;
11909 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride);
11910 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!");
11911 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!");
11912 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!");
11913 // Can we prove (max(RHS,Start) > Start - Stride?
11914 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) &&
11915 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) {
11916 // In this case, we can use a refined formula for computing backedge taken
11917 // count. The general formula remains:
11918 // "End-Start /uceiling Stride" where "End = max(RHS,Start)"
11919 // We want to use the alternate formula:
11920 // "((End - 1) - (Start - Stride)) /u Stride"
11921 // Let's do a quick case analysis to show these are equivalent under
11922 // our precondition that max(RHS,Start) > Start - Stride.
11923 // * For RHS <= Start, the backedge-taken count must be zero.
11924 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
11925 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
11926 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values
11927 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing
11928 // this to the stride of 1 case.
11929 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride".
11930 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
11931 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
11932 // "((RHS - (Start - Stride) - 1) /u Stride".
11933 // Our preconditions trivially imply no overflow in that form.
11934 const SCEV *MinusOne = getMinusOne(Stride->getType());
11935 const SCEV *Numerator =
11936 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride));
11937 BECount = getUDivExpr(Numerator, Stride);
11938 }
11939
11940 const SCEV *BECountIfBackedgeTaken = nullptr;
11941 if (!BECount) {
11942 auto canProveRHSGreaterThanEqualStart = [&]() {
11943 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11944 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart))
11945 return true;
11946
11947 // (RHS > Start - 1) implies RHS >= Start.
11948 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
11949 // "Start - 1" doesn't overflow.
11950 // * For signed comparison, if Start - 1 does overflow, it's equal
11951 // to INT_MAX, and "RHS >s INT_MAX" is trivially false.
11952 // * For unsigned comparison, if Start - 1 does overflow, it's equal
11953 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
11954 //
11955 // FIXME: Should isLoopEntryGuardedByCond do this for us?
11956 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11957 auto *StartMinusOne = getAddExpr(OrigStart,
11958 getMinusOne(OrigStart->getType()));
11959 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
11960 };
11961
11962 // If we know that RHS >= Start in the context of loop, then we know that
11963 // max(RHS, Start) = RHS at this point.
11964 const SCEV *End;
11965 if (canProveRHSGreaterThanEqualStart()) {
11966 End = RHS;
11967 } else {
11968 // If RHS < Start, the backedge will be taken zero times. So in
11969 // general, we can write the backedge-taken count as:
11970 //
11971 // RHS >= Start ? ceil(RHS - Start) / Stride : 0
11972 //
11973 // We convert it to the following to make it more convenient for SCEV:
11974 //
11975 // ceil(max(RHS, Start) - Start) / Stride
11976 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
11977
11978 // See what would happen if we assume the backedge is taken. This is
11979 // used to compute MaxBECount.
11980 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride);
11981 }
11982
11983 // At this point, we know:
11984 //
11985 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
11986 // 2. The index variable doesn't overflow.
11987 //
11988 // Therefore, we know N exists such that
11989 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
11990 // doesn't overflow.
11991 //
11992 // Using this information, try to prove whether the addition in
11993 // "(Start - End) + (Stride - 1)" has unsigned overflow.
11994 const SCEV *One = getOne(Stride->getType());
11995 bool MayAddOverflow = [&] {
11996 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
11997 if (StrideC->getAPInt().isPowerOf2()) {
11998 // Suppose Stride is a power of two, and Start/End are unsigned
11999 // integers. Let UMAX be the largest representable unsigned
12000 // integer.
12001 //
12002 // By the preconditions of this function, we know
12003 // "(Start + Stride * N) >= End", and this doesn't overflow.
12004 // As a formula:
12005 //
12006 // End <= (Start + Stride * N) <= UMAX
12007 //
12008 // Subtracting Start from all the terms:
12009 //
12010 // End - Start <= Stride * N <= UMAX - Start
12011 //
12012 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore:
12013 //
12014 // End - Start <= Stride * N <= UMAX
12015 //
12016 // Stride * N is a multiple of Stride. Therefore,
12017 //
12018 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
12019 //
12020 // Since Stride is a power of two, UMAX + 1 is divisible by Stride.
12021 // Therefore, UMAX mod Stride == Stride - 1. So we can write:
12022 //
12023 // End - Start <= Stride * N <= UMAX - Stride - 1
12024 //
12025 // Dropping the middle term:
12026 //
12027 // End - Start <= UMAX - Stride - 1
12028 //
12029 // Adding Stride - 1 to both sides:
12030 //
12031 // (End - Start) + (Stride - 1) <= UMAX
12032 //
12033 // In other words, the addition doesn't have unsigned overflow.
12034 //
12035 // A similar proof works if we treat Start/End as signed values.
12036 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to
12037 // use signed max instead of unsigned max. Note that we're trying
12038 // to prove a lack of unsigned overflow in either case.
12039 return false;
12040 }
12041 }
12042 if (Start == Stride || Start == getMinusSCEV(Stride, One)) {
12043 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1.
12044 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End.
12045 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End.
12046 //
12047 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End.
12048 return false;
12049 }
12050 return true;
12051 }();
12052
12053 const SCEV *Delta = getMinusSCEV(End, Start);
12054 if (!MayAddOverflow) {
12055 // floor((D + (S - 1)) / S)
12056 // We prefer this formulation if it's legal because it's fewer operations.
12057 BECount =
12058 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride);
12059 } else {
12060 BECount = getUDivCeilSCEV(Delta, Stride);
12061 }
12062 }
12063
12064 const SCEV *MaxBECount;
12065 bool MaxOrZero = false;
12066 if (isa<SCEVConstant>(BECount)) {
12067 MaxBECount = BECount;
12068 } else if (BECountIfBackedgeTaken &&
12069 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
12070 // If we know exactly how many times the backedge will be taken if it's
12071 // taken at least once, then the backedge count will either be that or
12072 // zero.
12073 MaxBECount = BECountIfBackedgeTaken;
12074 MaxOrZero = true;
12075 } else {
12076 MaxBECount = computeMaxBECountForLT(
12077 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
12078 }
12079
12080 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
12081 !isa<SCEVCouldNotCompute>(BECount))
12082 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
12083
12084 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
12085 }
12086
12087 ScalarEvolution::ExitLimit
howManyGreaterThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool IsSigned,bool ControlsExit,bool AllowPredicates)12088 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
12089 const Loop *L, bool IsSigned,
12090 bool ControlsExit, bool AllowPredicates) {
12091 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
12092 // We handle only IV > Invariant
12093 if (!isLoopInvariant(RHS, L))
12094 return getCouldNotCompute();
12095
12096 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
12097 if (!IV && AllowPredicates)
12098 // Try to make this an AddRec using runtime tests, in the first X
12099 // iterations of this loop, where X is the SCEV expression found by the
12100 // algorithm below.
12101 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
12102
12103 // Avoid weird loops
12104 if (!IV || IV->getLoop() != L || !IV->isAffine())
12105 return getCouldNotCompute();
12106
12107 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
12108 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
12109 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
12110
12111 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
12112
12113 // Avoid negative or zero stride values
12114 if (!isKnownPositive(Stride))
12115 return getCouldNotCompute();
12116
12117 // Avoid proven overflow cases: this will ensure that the backedge taken count
12118 // will not generate any unsigned overflow. Relaxed no-overflow conditions
12119 // exploit NoWrapFlags, allowing to optimize in presence of undefined
12120 // behaviors like the case of C language.
12121 if (!Stride->isOne() && !NoWrap)
12122 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
12123 return getCouldNotCompute();
12124
12125 const SCEV *Start = IV->getStart();
12126 const SCEV *End = RHS;
12127 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
12128 // If we know that Start >= RHS in the context of loop, then we know that
12129 // min(RHS, Start) = RHS at this point.
12130 if (isLoopEntryGuardedByCond(
12131 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
12132 End = RHS;
12133 else
12134 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
12135 }
12136
12137 if (Start->getType()->isPointerTy()) {
12138 Start = getLosslessPtrToIntExpr(Start);
12139 if (isa<SCEVCouldNotCompute>(Start))
12140 return Start;
12141 }
12142 if (End->getType()->isPointerTy()) {
12143 End = getLosslessPtrToIntExpr(End);
12144 if (isa<SCEVCouldNotCompute>(End))
12145 return End;
12146 }
12147
12148 // Compute ((Start - End) + (Stride - 1)) / Stride.
12149 // FIXME: This can overflow. Holding off on fixing this for now;
12150 // howManyGreaterThans will hopefully be gone soon.
12151 const SCEV *One = getOne(Stride->getType());
12152 const SCEV *BECount = getUDivExpr(
12153 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
12154
12155 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
12156 : getUnsignedRangeMax(Start);
12157
12158 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
12159 : getUnsignedRangeMin(Stride);
12160
12161 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
12162 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
12163 : APInt::getMinValue(BitWidth) + (MinStride - 1);
12164
12165 // Although End can be a MIN expression we estimate MinEnd considering only
12166 // the case End = RHS. This is safe because in the other case (Start - End)
12167 // is zero, leading to a zero maximum backedge taken count.
12168 APInt MinEnd =
12169 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
12170 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
12171
12172 const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
12173 ? BECount
12174 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
12175 getConstant(MinStride));
12176
12177 if (isa<SCEVCouldNotCompute>(MaxBECount))
12178 MaxBECount = BECount;
12179
12180 return ExitLimit(BECount, MaxBECount, false, Predicates);
12181 }
12182
getNumIterationsInRange(const ConstantRange & Range,ScalarEvolution & SE) const12183 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
12184 ScalarEvolution &SE) const {
12185 if (Range.isFullSet()) // Infinite loop.
12186 return SE.getCouldNotCompute();
12187
12188 // If the start is a non-zero constant, shift the range to simplify things.
12189 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
12190 if (!SC->getValue()->isZero()) {
12191 SmallVector<const SCEV *, 4> Operands(operands());
12192 Operands[0] = SE.getZero(SC->getType());
12193 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
12194 getNoWrapFlags(FlagNW));
12195 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
12196 return ShiftedAddRec->getNumIterationsInRange(
12197 Range.subtract(SC->getAPInt()), SE);
12198 // This is strange and shouldn't happen.
12199 return SE.getCouldNotCompute();
12200 }
12201
12202 // The only time we can solve this is when we have all constant indices.
12203 // Otherwise, we cannot determine the overflow conditions.
12204 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
12205 return SE.getCouldNotCompute();
12206
12207 // Okay at this point we know that all elements of the chrec are constants and
12208 // that the start element is zero.
12209
12210 // First check to see if the range contains zero. If not, the first
12211 // iteration exits.
12212 unsigned BitWidth = SE.getTypeSizeInBits(getType());
12213 if (!Range.contains(APInt(BitWidth, 0)))
12214 return SE.getZero(getType());
12215
12216 if (isAffine()) {
12217 // If this is an affine expression then we have this situation:
12218 // Solve {0,+,A} in Range === Ax in Range
12219
12220 // We know that zero is in the range. If A is positive then we know that
12221 // the upper value of the range must be the first possible exit value.
12222 // If A is negative then the lower of the range is the last possible loop
12223 // value. Also note that we already checked for a full range.
12224 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
12225 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
12226
12227 // The exit value should be (End+A)/A.
12228 APInt ExitVal = (End + A).udiv(A);
12229 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
12230
12231 // Evaluate at the exit value. If we really did fall out of the valid
12232 // range, then we computed our trip count, otherwise wrap around or other
12233 // things must have happened.
12234 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
12235 if (Range.contains(Val->getValue()))
12236 return SE.getCouldNotCompute(); // Something strange happened
12237
12238 // Ensure that the previous value is in the range. This is a sanity check.
12239 assert(Range.contains(
12240 EvaluateConstantChrecAtConstant(this,
12241 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
12242 "Linear scev computation is off in a bad way!");
12243 return SE.getConstant(ExitValue);
12244 }
12245
12246 if (isQuadratic()) {
12247 if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
12248 return SE.getConstant(S.getValue());
12249 }
12250
12251 return SE.getCouldNotCompute();
12252 }
12253
12254 const SCEVAddRecExpr *
getPostIncExpr(ScalarEvolution & SE) const12255 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
12256 assert(getNumOperands() > 1 && "AddRec with zero step?");
12257 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
12258 // but in this case we cannot guarantee that the value returned will be an
12259 // AddRec because SCEV does not have a fixed point where it stops
12260 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
12261 // may happen if we reach arithmetic depth limit while simplifying. So we
12262 // construct the returned value explicitly.
12263 SmallVector<const SCEV *, 3> Ops;
12264 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
12265 // (this + Step) is {A+B,+,B+C,+...,+,N}.
12266 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
12267 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
12268 // We know that the last operand is not a constant zero (otherwise it would
12269 // have been popped out earlier). This guarantees us that if the result has
12270 // the same last operand, then it will also not be popped out, meaning that
12271 // the returned value will be an AddRec.
12272 const SCEV *Last = getOperand(getNumOperands() - 1);
12273 assert(!Last->isZero() && "Recurrency with zero step?");
12274 Ops.push_back(Last);
12275 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
12276 SCEV::FlagAnyWrap));
12277 }
12278
12279 // Return true when S contains at least an undef value.
containsUndefs(const SCEV * S)12280 static inline bool containsUndefs(const SCEV *S) {
12281 return SCEVExprContains(S, [](const SCEV *S) {
12282 if (const auto *SU = dyn_cast<SCEVUnknown>(S))
12283 return isa<UndefValue>(SU->getValue());
12284 return false;
12285 });
12286 }
12287
12288 /// Return the size of an element read or written by Inst.
getElementSize(Instruction * Inst)12289 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
12290 Type *Ty;
12291 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
12292 Ty = Store->getValueOperand()->getType();
12293 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
12294 Ty = Load->getType();
12295 else
12296 return nullptr;
12297
12298 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
12299 return getSizeOfExpr(ETy, Ty);
12300 }
12301
12302 //===----------------------------------------------------------------------===//
12303 // SCEVCallbackVH Class Implementation
12304 //===----------------------------------------------------------------------===//
12305
deleted()12306 void ScalarEvolution::SCEVCallbackVH::deleted() {
12307 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
12308 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
12309 SE->ConstantEvolutionLoopExitValue.erase(PN);
12310 SE->eraseValueFromMap(getValPtr());
12311 // this now dangles!
12312 }
12313
allUsesReplacedWith(Value * V)12314 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
12315 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
12316
12317 // Forget all the expressions associated with users of the old value,
12318 // so that future queries will recompute the expressions using the new
12319 // value.
12320 Value *Old = getValPtr();
12321 SmallVector<User *, 16> Worklist(Old->users());
12322 SmallPtrSet<User *, 8> Visited;
12323 while (!Worklist.empty()) {
12324 User *U = Worklist.pop_back_val();
12325 // Deleting the Old value will cause this to dangle. Postpone
12326 // that until everything else is done.
12327 if (U == Old)
12328 continue;
12329 if (!Visited.insert(U).second)
12330 continue;
12331 if (PHINode *PN = dyn_cast<PHINode>(U))
12332 SE->ConstantEvolutionLoopExitValue.erase(PN);
12333 SE->eraseValueFromMap(U);
12334 llvm::append_range(Worklist, U->users());
12335 }
12336 // Delete the Old value.
12337 if (PHINode *PN = dyn_cast<PHINode>(Old))
12338 SE->ConstantEvolutionLoopExitValue.erase(PN);
12339 SE->eraseValueFromMap(Old);
12340 // this now dangles!
12341 }
12342
SCEVCallbackVH(Value * V,ScalarEvolution * se)12343 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
12344 : CallbackVH(V), SE(se) {}
12345
12346 //===----------------------------------------------------------------------===//
12347 // ScalarEvolution Class Implementation
12348 //===----------------------------------------------------------------------===//
12349
ScalarEvolution(Function & F,TargetLibraryInfo & TLI,AssumptionCache & AC,DominatorTree & DT,LoopInfo & LI)12350 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
12351 AssumptionCache &AC, DominatorTree &DT,
12352 LoopInfo &LI)
12353 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
12354 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
12355 LoopDispositions(64), BlockDispositions(64) {
12356 // To use guards for proving predicates, we need to scan every instruction in
12357 // relevant basic blocks, and not just terminators. Doing this is a waste of
12358 // time if the IR does not actually contain any calls to
12359 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
12360 //
12361 // This pessimizes the case where a pass that preserves ScalarEvolution wants
12362 // to _add_ guards to the module when there weren't any before, and wants
12363 // ScalarEvolution to optimize based on those guards. For now we prefer to be
12364 // efficient in lieu of being smart in that rather obscure case.
12365
12366 auto *GuardDecl = F.getParent()->getFunction(
12367 Intrinsic::getName(Intrinsic::experimental_guard));
12368 HasGuards = GuardDecl && !GuardDecl->use_empty();
12369 }
12370
ScalarEvolution(ScalarEvolution && Arg)12371 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
12372 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
12373 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
12374 ValueExprMap(std::move(Arg.ValueExprMap)),
12375 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
12376 PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
12377 PendingMerges(std::move(Arg.PendingMerges)),
12378 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
12379 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
12380 PredicatedBackedgeTakenCounts(
12381 std::move(Arg.PredicatedBackedgeTakenCounts)),
12382 ConstantEvolutionLoopExitValue(
12383 std::move(Arg.ConstantEvolutionLoopExitValue)),
12384 ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
12385 LoopDispositions(std::move(Arg.LoopDispositions)),
12386 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
12387 BlockDispositions(std::move(Arg.BlockDispositions)),
12388 UnsignedRanges(std::move(Arg.UnsignedRanges)),
12389 SignedRanges(std::move(Arg.SignedRanges)),
12390 UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
12391 UniquePreds(std::move(Arg.UniquePreds)),
12392 SCEVAllocator(std::move(Arg.SCEVAllocator)),
12393 LoopUsers(std::move(Arg.LoopUsers)),
12394 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
12395 FirstUnknown(Arg.FirstUnknown) {
12396 Arg.FirstUnknown = nullptr;
12397 }
12398
~ScalarEvolution()12399 ScalarEvolution::~ScalarEvolution() {
12400 // Iterate through all the SCEVUnknown instances and call their
12401 // destructors, so that they release their references to their values.
12402 for (SCEVUnknown *U = FirstUnknown; U;) {
12403 SCEVUnknown *Tmp = U;
12404 U = U->Next;
12405 Tmp->~SCEVUnknown();
12406 }
12407 FirstUnknown = nullptr;
12408
12409 ExprValueMap.clear();
12410 ValueExprMap.clear();
12411 HasRecMap.clear();
12412 BackedgeTakenCounts.clear();
12413 PredicatedBackedgeTakenCounts.clear();
12414
12415 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
12416 assert(PendingPhiRanges.empty() && "getRangeRef garbage");
12417 assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
12418 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
12419 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
12420 }
12421
hasLoopInvariantBackedgeTakenCount(const Loop * L)12422 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
12423 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
12424 }
12425
PrintLoopInfo(raw_ostream & OS,ScalarEvolution * SE,const Loop * L)12426 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
12427 const Loop *L) {
12428 // Print all inner loops first
12429 for (Loop *I : *L)
12430 PrintLoopInfo(OS, SE, I);
12431
12432 OS << "Loop ";
12433 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12434 OS << ": ";
12435
12436 SmallVector<BasicBlock *, 8> ExitingBlocks;
12437 L->getExitingBlocks(ExitingBlocks);
12438 if (ExitingBlocks.size() != 1)
12439 OS << "<multiple exits> ";
12440
12441 if (SE->hasLoopInvariantBackedgeTakenCount(L))
12442 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
12443 else
12444 OS << "Unpredictable backedge-taken count.\n";
12445
12446 if (ExitingBlocks.size() > 1)
12447 for (BasicBlock *ExitingBlock : ExitingBlocks) {
12448 OS << " exit count for " << ExitingBlock->getName() << ": "
12449 << *SE->getExitCount(L, ExitingBlock) << "\n";
12450 }
12451
12452 OS << "Loop ";
12453 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12454 OS << ": ";
12455
12456 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
12457 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
12458 if (SE->isBackedgeTakenCountMaxOrZero(L))
12459 OS << ", actual taken count either this or zero.";
12460 } else {
12461 OS << "Unpredictable max backedge-taken count. ";
12462 }
12463
12464 OS << "\n"
12465 "Loop ";
12466 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12467 OS << ": ";
12468
12469 SCEVUnionPredicate Pred;
12470 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred);
12471 if (!isa<SCEVCouldNotCompute>(PBT)) {
12472 OS << "Predicated backedge-taken count is " << *PBT << "\n";
12473 OS << " Predicates:\n";
12474 Pred.print(OS, 4);
12475 } else {
12476 OS << "Unpredictable predicated backedge-taken count. ";
12477 }
12478 OS << "\n";
12479
12480 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
12481 OS << "Loop ";
12482 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12483 OS << ": ";
12484 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
12485 }
12486 }
12487
loopDispositionToStr(ScalarEvolution::LoopDisposition LD)12488 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
12489 switch (LD) {
12490 case ScalarEvolution::LoopVariant:
12491 return "Variant";
12492 case ScalarEvolution::LoopInvariant:
12493 return "Invariant";
12494 case ScalarEvolution::LoopComputable:
12495 return "Computable";
12496 }
12497 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
12498 }
12499
print(raw_ostream & OS) const12500 void ScalarEvolution::print(raw_ostream &OS) const {
12501 // ScalarEvolution's implementation of the print method is to print
12502 // out SCEV values of all instructions that are interesting. Doing
12503 // this potentially causes it to create new SCEV objects though,
12504 // which technically conflicts with the const qualifier. This isn't
12505 // observable from outside the class though, so casting away the
12506 // const isn't dangerous.
12507 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12508
12509 if (ClassifyExpressions) {
12510 OS << "Classifying expressions for: ";
12511 F.printAsOperand(OS, /*PrintType=*/false);
12512 OS << "\n";
12513 for (Instruction &I : instructions(F))
12514 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
12515 OS << I << '\n';
12516 OS << " --> ";
12517 const SCEV *SV = SE.getSCEV(&I);
12518 SV->print(OS);
12519 if (!isa<SCEVCouldNotCompute>(SV)) {
12520 OS << " U: ";
12521 SE.getUnsignedRange(SV).print(OS);
12522 OS << " S: ";
12523 SE.getSignedRange(SV).print(OS);
12524 }
12525
12526 const Loop *L = LI.getLoopFor(I.getParent());
12527
12528 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
12529 if (AtUse != SV) {
12530 OS << " --> ";
12531 AtUse->print(OS);
12532 if (!isa<SCEVCouldNotCompute>(AtUse)) {
12533 OS << " U: ";
12534 SE.getUnsignedRange(AtUse).print(OS);
12535 OS << " S: ";
12536 SE.getSignedRange(AtUse).print(OS);
12537 }
12538 }
12539
12540 if (L) {
12541 OS << "\t\t" "Exits: ";
12542 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
12543 if (!SE.isLoopInvariant(ExitValue, L)) {
12544 OS << "<<Unknown>>";
12545 } else {
12546 OS << *ExitValue;
12547 }
12548
12549 bool First = true;
12550 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
12551 if (First) {
12552 OS << "\t\t" "LoopDispositions: { ";
12553 First = false;
12554 } else {
12555 OS << ", ";
12556 }
12557
12558 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12559 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
12560 }
12561
12562 for (auto *InnerL : depth_first(L)) {
12563 if (InnerL == L)
12564 continue;
12565 if (First) {
12566 OS << "\t\t" "LoopDispositions: { ";
12567 First = false;
12568 } else {
12569 OS << ", ";
12570 }
12571
12572 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12573 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
12574 }
12575
12576 OS << " }";
12577 }
12578
12579 OS << "\n";
12580 }
12581 }
12582
12583 OS << "Determining loop execution counts for: ";
12584 F.printAsOperand(OS, /*PrintType=*/false);
12585 OS << "\n";
12586 for (Loop *I : LI)
12587 PrintLoopInfo(OS, &SE, I);
12588 }
12589
12590 ScalarEvolution::LoopDisposition
getLoopDisposition(const SCEV * S,const Loop * L)12591 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
12592 auto &Values = LoopDispositions[S];
12593 for (auto &V : Values) {
12594 if (V.getPointer() == L)
12595 return V.getInt();
12596 }
12597 Values.emplace_back(L, LoopVariant);
12598 LoopDisposition D = computeLoopDisposition(S, L);
12599 auto &Values2 = LoopDispositions[S];
12600 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12601 if (V.getPointer() == L) {
12602 V.setInt(D);
12603 break;
12604 }
12605 }
12606 return D;
12607 }
12608
12609 ScalarEvolution::LoopDisposition
computeLoopDisposition(const SCEV * S,const Loop * L)12610 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
12611 switch (S->getSCEVType()) {
12612 case scConstant:
12613 return LoopInvariant;
12614 case scPtrToInt:
12615 case scTruncate:
12616 case scZeroExtend:
12617 case scSignExtend:
12618 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
12619 case scAddRecExpr: {
12620 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12621
12622 // If L is the addrec's loop, it's computable.
12623 if (AR->getLoop() == L)
12624 return LoopComputable;
12625
12626 // Add recurrences are never invariant in the function-body (null loop).
12627 if (!L)
12628 return LoopVariant;
12629
12630 // Everything that is not defined at loop entry is variant.
12631 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
12632 return LoopVariant;
12633 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
12634 " dominate the contained loop's header?");
12635
12636 // This recurrence is invariant w.r.t. L if AR's loop contains L.
12637 if (AR->getLoop()->contains(L))
12638 return LoopInvariant;
12639
12640 // This recurrence is variant w.r.t. L if any of its operands
12641 // are variant.
12642 for (auto *Op : AR->operands())
12643 if (!isLoopInvariant(Op, L))
12644 return LoopVariant;
12645
12646 // Otherwise it's loop-invariant.
12647 return LoopInvariant;
12648 }
12649 case scAddExpr:
12650 case scMulExpr:
12651 case scUMaxExpr:
12652 case scSMaxExpr:
12653 case scUMinExpr:
12654 case scSMinExpr: {
12655 bool HasVarying = false;
12656 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
12657 LoopDisposition D = getLoopDisposition(Op, L);
12658 if (D == LoopVariant)
12659 return LoopVariant;
12660 if (D == LoopComputable)
12661 HasVarying = true;
12662 }
12663 return HasVarying ? LoopComputable : LoopInvariant;
12664 }
12665 case scUDivExpr: {
12666 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
12667 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
12668 if (LD == LoopVariant)
12669 return LoopVariant;
12670 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
12671 if (RD == LoopVariant)
12672 return LoopVariant;
12673 return (LD == LoopInvariant && RD == LoopInvariant) ?
12674 LoopInvariant : LoopComputable;
12675 }
12676 case scUnknown:
12677 // All non-instruction values are loop invariant. All instructions are loop
12678 // invariant if they are not contained in the specified loop.
12679 // Instructions are never considered invariant in the function body
12680 // (null loop) because they are defined within the "loop".
12681 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
12682 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
12683 return LoopInvariant;
12684 case scCouldNotCompute:
12685 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
12686 }
12687 llvm_unreachable("Unknown SCEV kind!");
12688 }
12689
isLoopInvariant(const SCEV * S,const Loop * L)12690 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
12691 return getLoopDisposition(S, L) == LoopInvariant;
12692 }
12693
hasComputableLoopEvolution(const SCEV * S,const Loop * L)12694 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
12695 return getLoopDisposition(S, L) == LoopComputable;
12696 }
12697
12698 ScalarEvolution::BlockDisposition
getBlockDisposition(const SCEV * S,const BasicBlock * BB)12699 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
12700 auto &Values = BlockDispositions[S];
12701 for (auto &V : Values) {
12702 if (V.getPointer() == BB)
12703 return V.getInt();
12704 }
12705 Values.emplace_back(BB, DoesNotDominateBlock);
12706 BlockDisposition D = computeBlockDisposition(S, BB);
12707 auto &Values2 = BlockDispositions[S];
12708 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12709 if (V.getPointer() == BB) {
12710 V.setInt(D);
12711 break;
12712 }
12713 }
12714 return D;
12715 }
12716
12717 ScalarEvolution::BlockDisposition
computeBlockDisposition(const SCEV * S,const BasicBlock * BB)12718 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
12719 switch (S->getSCEVType()) {
12720 case scConstant:
12721 return ProperlyDominatesBlock;
12722 case scPtrToInt:
12723 case scTruncate:
12724 case scZeroExtend:
12725 case scSignExtend:
12726 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
12727 case scAddRecExpr: {
12728 // This uses a "dominates" query instead of "properly dominates" query
12729 // to test for proper dominance too, because the instruction which
12730 // produces the addrec's value is a PHI, and a PHI effectively properly
12731 // dominates its entire containing block.
12732 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12733 if (!DT.dominates(AR->getLoop()->getHeader(), BB))
12734 return DoesNotDominateBlock;
12735
12736 // Fall through into SCEVNAryExpr handling.
12737 LLVM_FALLTHROUGH;
12738 }
12739 case scAddExpr:
12740 case scMulExpr:
12741 case scUMaxExpr:
12742 case scSMaxExpr:
12743 case scUMinExpr:
12744 case scSMinExpr: {
12745 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
12746 bool Proper = true;
12747 for (const SCEV *NAryOp : NAry->operands()) {
12748 BlockDisposition D = getBlockDisposition(NAryOp, BB);
12749 if (D == DoesNotDominateBlock)
12750 return DoesNotDominateBlock;
12751 if (D == DominatesBlock)
12752 Proper = false;
12753 }
12754 return Proper ? ProperlyDominatesBlock : DominatesBlock;
12755 }
12756 case scUDivExpr: {
12757 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
12758 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
12759 BlockDisposition LD = getBlockDisposition(LHS, BB);
12760 if (LD == DoesNotDominateBlock)
12761 return DoesNotDominateBlock;
12762 BlockDisposition RD = getBlockDisposition(RHS, BB);
12763 if (RD == DoesNotDominateBlock)
12764 return DoesNotDominateBlock;
12765 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
12766 ProperlyDominatesBlock : DominatesBlock;
12767 }
12768 case scUnknown:
12769 if (Instruction *I =
12770 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
12771 if (I->getParent() == BB)
12772 return DominatesBlock;
12773 if (DT.properlyDominates(I->getParent(), BB))
12774 return ProperlyDominatesBlock;
12775 return DoesNotDominateBlock;
12776 }
12777 return ProperlyDominatesBlock;
12778 case scCouldNotCompute:
12779 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
12780 }
12781 llvm_unreachable("Unknown SCEV kind!");
12782 }
12783
dominates(const SCEV * S,const BasicBlock * BB)12784 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
12785 return getBlockDisposition(S, BB) >= DominatesBlock;
12786 }
12787
properlyDominates(const SCEV * S,const BasicBlock * BB)12788 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
12789 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
12790 }
12791
hasOperand(const SCEV * S,const SCEV * Op) const12792 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
12793 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
12794 }
12795
12796 void
forgetMemoizedResults(const SCEV * S)12797 ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
12798 ValuesAtScopes.erase(S);
12799 LoopDispositions.erase(S);
12800 BlockDispositions.erase(S);
12801 UnsignedRanges.erase(S);
12802 SignedRanges.erase(S);
12803 ExprValueMap.erase(S);
12804 HasRecMap.erase(S);
12805 MinTrailingZerosCache.erase(S);
12806
12807 for (auto I = PredicatedSCEVRewrites.begin();
12808 I != PredicatedSCEVRewrites.end();) {
12809 std::pair<const SCEV *, const Loop *> Entry = I->first;
12810 if (Entry.first == S)
12811 PredicatedSCEVRewrites.erase(I++);
12812 else
12813 ++I;
12814 }
12815
12816 auto RemoveSCEVFromBackedgeMap =
12817 [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
12818 for (auto I = Map.begin(), E = Map.end(); I != E;) {
12819 BackedgeTakenInfo &BEInfo = I->second;
12820 if (BEInfo.hasOperand(S))
12821 Map.erase(I++);
12822 else
12823 ++I;
12824 }
12825 };
12826
12827 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
12828 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
12829 }
12830
12831 void
getUsedLoops(const SCEV * S,SmallPtrSetImpl<const Loop * > & LoopsUsed)12832 ScalarEvolution::getUsedLoops(const SCEV *S,
12833 SmallPtrSetImpl<const Loop *> &LoopsUsed) {
12834 struct FindUsedLoops {
12835 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
12836 : LoopsUsed(LoopsUsed) {}
12837 SmallPtrSetImpl<const Loop *> &LoopsUsed;
12838 bool follow(const SCEV *S) {
12839 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
12840 LoopsUsed.insert(AR->getLoop());
12841 return true;
12842 }
12843
12844 bool isDone() const { return false; }
12845 };
12846
12847 FindUsedLoops F(LoopsUsed);
12848 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
12849 }
12850
addToLoopUseLists(const SCEV * S)12851 void ScalarEvolution::addToLoopUseLists(const SCEV *S) {
12852 SmallPtrSet<const Loop *, 8> LoopsUsed;
12853 getUsedLoops(S, LoopsUsed);
12854 for (auto *L : LoopsUsed)
12855 LoopUsers[L].push_back(S);
12856 }
12857
verify() const12858 void ScalarEvolution::verify() const {
12859 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12860 ScalarEvolution SE2(F, TLI, AC, DT, LI);
12861
12862 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
12863
12864 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
12865 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
12866 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
12867
12868 const SCEV *visitConstant(const SCEVConstant *Constant) {
12869 return SE.getConstant(Constant->getAPInt());
12870 }
12871
12872 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
12873 return SE.getUnknown(Expr->getValue());
12874 }
12875
12876 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
12877 return SE.getCouldNotCompute();
12878 }
12879 };
12880
12881 SCEVMapper SCM(SE2);
12882
12883 while (!LoopStack.empty()) {
12884 auto *L = LoopStack.pop_back_val();
12885 llvm::append_range(LoopStack, *L);
12886
12887 auto *CurBECount = SCM.visit(
12888 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
12889 auto *NewBECount = SE2.getBackedgeTakenCount(L);
12890
12891 if (CurBECount == SE2.getCouldNotCompute() ||
12892 NewBECount == SE2.getCouldNotCompute()) {
12893 // NB! This situation is legal, but is very suspicious -- whatever pass
12894 // change the loop to make a trip count go from could not compute to
12895 // computable or vice-versa *should have* invalidated SCEV. However, we
12896 // choose not to assert here (for now) since we don't want false
12897 // positives.
12898 continue;
12899 }
12900
12901 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
12902 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
12903 // not propagate undef aggressively). This means we can (and do) fail
12904 // verification in cases where a transform makes the trip count of a loop
12905 // go from "undef" to "undef+1" (say). The transform is fine, since in
12906 // both cases the loop iterates "undef" times, but SCEV thinks we
12907 // increased the trip count of the loop by 1 incorrectly.
12908 continue;
12909 }
12910
12911 if (SE.getTypeSizeInBits(CurBECount->getType()) >
12912 SE.getTypeSizeInBits(NewBECount->getType()))
12913 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
12914 else if (SE.getTypeSizeInBits(CurBECount->getType()) <
12915 SE.getTypeSizeInBits(NewBECount->getType()))
12916 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
12917
12918 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);
12919
12920 // Unless VerifySCEVStrict is set, we only compare constant deltas.
12921 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
12922 dbgs() << "Trip Count for " << *L << " Changed!\n";
12923 dbgs() << "Old: " << *CurBECount << "\n";
12924 dbgs() << "New: " << *NewBECount << "\n";
12925 dbgs() << "Delta: " << *Delta << "\n";
12926 std::abort();
12927 }
12928 }
12929
12930 // Collect all valid loops currently in LoopInfo.
12931 SmallPtrSet<Loop *, 32> ValidLoops;
12932 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
12933 while (!Worklist.empty()) {
12934 Loop *L = Worklist.pop_back_val();
12935 if (ValidLoops.contains(L))
12936 continue;
12937 ValidLoops.insert(L);
12938 Worklist.append(L->begin(), L->end());
12939 }
12940 // Check for SCEV expressions referencing invalid/deleted loops.
12941 for (auto &KV : ValueExprMap) {
12942 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second);
12943 if (!AR)
12944 continue;
12945 assert(ValidLoops.contains(AR->getLoop()) &&
12946 "AddRec references invalid loop");
12947 }
12948 }
12949
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)12950 bool ScalarEvolution::invalidate(
12951 Function &F, const PreservedAnalyses &PA,
12952 FunctionAnalysisManager::Invalidator &Inv) {
12953 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
12954 // of its dependencies is invalidated.
12955 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
12956 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
12957 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
12958 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
12959 Inv.invalidate<LoopAnalysis>(F, PA);
12960 }
12961
12962 AnalysisKey ScalarEvolutionAnalysis::Key;
12963
run(Function & F,FunctionAnalysisManager & AM)12964 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
12965 FunctionAnalysisManager &AM) {
12966 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
12967 AM.getResult<AssumptionAnalysis>(F),
12968 AM.getResult<DominatorTreeAnalysis>(F),
12969 AM.getResult<LoopAnalysis>(F));
12970 }
12971
12972 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)12973 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
12974 AM.getResult<ScalarEvolutionAnalysis>(F).verify();
12975 return PreservedAnalyses::all();
12976 }
12977
12978 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)12979 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
12980 // For compatibility with opt's -analyze feature under legacy pass manager
12981 // which was not ported to NPM. This keeps tests using
12982 // update_analyze_test_checks.py working.
12983 OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
12984 << F.getName() << "':\n";
12985 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
12986 return PreservedAnalyses::all();
12987 }
12988
12989 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
12990 "Scalar Evolution Analysis", false, true)
12991 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
12992 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
12993 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
12994 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
12995 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
12996 "Scalar Evolution Analysis", false, true)
12997
12998 char ScalarEvolutionWrapperPass::ID = 0;
12999
ScalarEvolutionWrapperPass()13000 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
13001 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
13002 }
13003
runOnFunction(Function & F)13004 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
13005 SE.reset(new ScalarEvolution(
13006 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
13007 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
13008 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
13009 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
13010 return false;
13011 }
13012
releaseMemory()13013 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
13014
print(raw_ostream & OS,const Module *) const13015 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
13016 SE->print(OS);
13017 }
13018
verifyAnalysis() const13019 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
13020 if (!VerifySCEV)
13021 return;
13022
13023 SE->verify();
13024 }
13025
getAnalysisUsage(AnalysisUsage & AU) const13026 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
13027 AU.setPreservesAll();
13028 AU.addRequiredTransitive<AssumptionCacheTracker>();
13029 AU.addRequiredTransitive<LoopInfoWrapperPass>();
13030 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
13031 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
13032 }
13033
getEqualPredicate(const SCEV * LHS,const SCEV * RHS)13034 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
13035 const SCEV *RHS) {
13036 FoldingSetNodeID ID;
13037 assert(LHS->getType() == RHS->getType() &&
13038 "Type mismatch between LHS and RHS");
13039 // Unique this node based on the arguments
13040 ID.AddInteger(SCEVPredicate::P_Equal);
13041 ID.AddPointer(LHS);
13042 ID.AddPointer(RHS);
13043 void *IP = nullptr;
13044 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13045 return S;
13046 SCEVEqualPredicate *Eq = new (SCEVAllocator)
13047 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS);
13048 UniquePreds.InsertNode(Eq, IP);
13049 return Eq;
13050 }
13051
getWrapPredicate(const SCEVAddRecExpr * AR,SCEVWrapPredicate::IncrementWrapFlags AddedFlags)13052 const SCEVPredicate *ScalarEvolution::getWrapPredicate(
13053 const SCEVAddRecExpr *AR,
13054 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13055 FoldingSetNodeID ID;
13056 // Unique this node based on the arguments
13057 ID.AddInteger(SCEVPredicate::P_Wrap);
13058 ID.AddPointer(AR);
13059 ID.AddInteger(AddedFlags);
13060 void *IP = nullptr;
13061 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13062 return S;
13063 auto *OF = new (SCEVAllocator)
13064 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
13065 UniquePreds.InsertNode(OF, IP);
13066 return OF;
13067 }
13068
13069 namespace {
13070
13071 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
13072 public:
13073
13074 /// Rewrites \p S in the context of a loop L and the SCEV predication
13075 /// infrastructure.
13076 ///
13077 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
13078 /// equivalences present in \p Pred.
13079 ///
13080 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
13081 /// \p NewPreds such that the result will be an AddRecExpr.
rewrite(const SCEV * S,const Loop * L,ScalarEvolution & SE,SmallPtrSetImpl<const SCEVPredicate * > * NewPreds,SCEVUnionPredicate * Pred)13082 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
13083 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13084 SCEVUnionPredicate *Pred) {
13085 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
13086 return Rewriter.visit(S);
13087 }
13088
visitUnknown(const SCEVUnknown * Expr)13089 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13090 if (Pred) {
13091 auto ExprPreds = Pred->getPredicatesForExpr(Expr);
13092 for (auto *Pred : ExprPreds)
13093 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
13094 if (IPred->getLHS() == Expr)
13095 return IPred->getRHS();
13096 }
13097 return convertToAddRecWithPreds(Expr);
13098 }
13099
visitZeroExtendExpr(const SCEVZeroExtendExpr * Expr)13100 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
13101 const SCEV *Operand = visit(Expr->getOperand());
13102 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13103 if (AR && AR->getLoop() == L && AR->isAffine()) {
13104 // This couldn't be folded because the operand didn't have the nuw
13105 // flag. Add the nusw flag as an assumption that we could make.
13106 const SCEV *Step = AR->getStepRecurrence(SE);
13107 Type *Ty = Expr->getType();
13108 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
13109 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
13110 SE.getSignExtendExpr(Step, Ty), L,
13111 AR->getNoWrapFlags());
13112 }
13113 return SE.getZeroExtendExpr(Operand, Expr->getType());
13114 }
13115
visitSignExtendExpr(const SCEVSignExtendExpr * Expr)13116 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
13117 const SCEV *Operand = visit(Expr->getOperand());
13118 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13119 if (AR && AR->getLoop() == L && AR->isAffine()) {
13120 // This couldn't be folded because the operand didn't have the nsw
13121 // flag. Add the nssw flag as an assumption that we could make.
13122 const SCEV *Step = AR->getStepRecurrence(SE);
13123 Type *Ty = Expr->getType();
13124 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
13125 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
13126 SE.getSignExtendExpr(Step, Ty), L,
13127 AR->getNoWrapFlags());
13128 }
13129 return SE.getSignExtendExpr(Operand, Expr->getType());
13130 }
13131
13132 private:
SCEVPredicateRewriter(const Loop * L,ScalarEvolution & SE,SmallPtrSetImpl<const SCEVPredicate * > * NewPreds,SCEVUnionPredicate * Pred)13133 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
13134 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13135 SCEVUnionPredicate *Pred)
13136 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
13137
addOverflowAssumption(const SCEVPredicate * P)13138 bool addOverflowAssumption(const SCEVPredicate *P) {
13139 if (!NewPreds) {
13140 // Check if we've already made this assumption.
13141 return Pred && Pred->implies(P);
13142 }
13143 NewPreds->insert(P);
13144 return true;
13145 }
13146
addOverflowAssumption(const SCEVAddRecExpr * AR,SCEVWrapPredicate::IncrementWrapFlags AddedFlags)13147 bool addOverflowAssumption(const SCEVAddRecExpr *AR,
13148 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13149 auto *A = SE.getWrapPredicate(AR, AddedFlags);
13150 return addOverflowAssumption(A);
13151 }
13152
13153 // If \p Expr represents a PHINode, we try to see if it can be represented
13154 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
13155 // to add this predicate as a runtime overflow check, we return the AddRec.
13156 // If \p Expr does not meet these conditions (is not a PHI node, or we
13157 // couldn't create an AddRec for it, or couldn't add the predicate), we just
13158 // return \p Expr.
convertToAddRecWithPreds(const SCEVUnknown * Expr)13159 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
13160 if (!isa<PHINode>(Expr->getValue()))
13161 return Expr;
13162 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
13163 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
13164 if (!PredicatedRewrite)
13165 return Expr;
13166 for (auto *P : PredicatedRewrite->second){
13167 // Wrap predicates from outer loops are not supported.
13168 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
13169 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
13170 if (L != AR->getLoop())
13171 return Expr;
13172 }
13173 if (!addOverflowAssumption(P))
13174 return Expr;
13175 }
13176 return PredicatedRewrite->first;
13177 }
13178
13179 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
13180 SCEVUnionPredicate *Pred;
13181 const Loop *L;
13182 };
13183
13184 } // end anonymous namespace
13185
rewriteUsingPredicate(const SCEV * S,const Loop * L,SCEVUnionPredicate & Preds)13186 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
13187 SCEVUnionPredicate &Preds) {
13188 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
13189 }
13190
convertSCEVToAddRecWithPredicates(const SCEV * S,const Loop * L,SmallPtrSetImpl<const SCEVPredicate * > & Preds)13191 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
13192 const SCEV *S, const Loop *L,
13193 SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
13194 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
13195 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
13196 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
13197
13198 if (!AddRec)
13199 return nullptr;
13200
13201 // Since the transformation was successful, we can now transfer the SCEV
13202 // predicates.
13203 for (auto *P : TransformPreds)
13204 Preds.insert(P);
13205
13206 return AddRec;
13207 }
13208
13209 /// SCEV predicates
SCEVPredicate(const FoldingSetNodeIDRef ID,SCEVPredicateKind Kind)13210 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
13211 SCEVPredicateKind Kind)
13212 : FastID(ID), Kind(Kind) {}
13213
SCEVEqualPredicate(const FoldingSetNodeIDRef ID,const SCEV * LHS,const SCEV * RHS)13214 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
13215 const SCEV *LHS, const SCEV *RHS)
13216 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
13217 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
13218 assert(LHS != RHS && "LHS and RHS are the same SCEV");
13219 }
13220
implies(const SCEVPredicate * N) const13221 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
13222 const auto *Op = dyn_cast<SCEVEqualPredicate>(N);
13223
13224 if (!Op)
13225 return false;
13226
13227 return Op->LHS == LHS && Op->RHS == RHS;
13228 }
13229
isAlwaysTrue() const13230 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
13231
getExpr() const13232 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; }
13233
print(raw_ostream & OS,unsigned Depth) const13234 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const {
13235 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
13236 }
13237
SCEVWrapPredicate(const FoldingSetNodeIDRef ID,const SCEVAddRecExpr * AR,IncrementWrapFlags Flags)13238 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
13239 const SCEVAddRecExpr *AR,
13240 IncrementWrapFlags Flags)
13241 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
13242
getExpr() const13243 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
13244
implies(const SCEVPredicate * N) const13245 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
13246 const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
13247
13248 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
13249 }
13250
isAlwaysTrue() const13251 bool SCEVWrapPredicate::isAlwaysTrue() const {
13252 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
13253 IncrementWrapFlags IFlags = Flags;
13254
13255 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
13256 IFlags = clearFlags(IFlags, IncrementNSSW);
13257
13258 return IFlags == IncrementAnyWrap;
13259 }
13260
print(raw_ostream & OS,unsigned Depth) const13261 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
13262 OS.indent(Depth) << *getExpr() << " Added Flags: ";
13263 if (SCEVWrapPredicate::IncrementNUSW & getFlags())
13264 OS << "<nusw>";
13265 if (SCEVWrapPredicate::IncrementNSSW & getFlags())
13266 OS << "<nssw>";
13267 OS << "\n";
13268 }
13269
13270 SCEVWrapPredicate::IncrementWrapFlags
getImpliedFlags(const SCEVAddRecExpr * AR,ScalarEvolution & SE)13271 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
13272 ScalarEvolution &SE) {
13273 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
13274 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
13275
13276 // We can safely transfer the NSW flag as NSSW.
13277 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
13278 ImpliedFlags = IncrementNSSW;
13279
13280 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
13281 // If the increment is positive, the SCEV NUW flag will also imply the
13282 // WrapPredicate NUSW flag.
13283 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
13284 if (Step->getValue()->getValue().isNonNegative())
13285 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
13286 }
13287
13288 return ImpliedFlags;
13289 }
13290
13291 /// Union predicates don't get cached so create a dummy set ID for it.
SCEVUnionPredicate()13292 SCEVUnionPredicate::SCEVUnionPredicate()
13293 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {}
13294
isAlwaysTrue() const13295 bool SCEVUnionPredicate::isAlwaysTrue() const {
13296 return all_of(Preds,
13297 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
13298 }
13299
13300 ArrayRef<const SCEVPredicate *>
getPredicatesForExpr(const SCEV * Expr)13301 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
13302 auto I = SCEVToPreds.find(Expr);
13303 if (I == SCEVToPreds.end())
13304 return ArrayRef<const SCEVPredicate *>();
13305 return I->second;
13306 }
13307
implies(const SCEVPredicate * N) const13308 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
13309 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
13310 return all_of(Set->Preds,
13311 [this](const SCEVPredicate *I) { return this->implies(I); });
13312
13313 auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
13314 if (ScevPredsIt == SCEVToPreds.end())
13315 return false;
13316 auto &SCEVPreds = ScevPredsIt->second;
13317
13318 return any_of(SCEVPreds,
13319 [N](const SCEVPredicate *I) { return I->implies(N); });
13320 }
13321
getExpr() const13322 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
13323
print(raw_ostream & OS,unsigned Depth) const13324 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
13325 for (auto Pred : Preds)
13326 Pred->print(OS, Depth);
13327 }
13328
add(const SCEVPredicate * N)13329 void SCEVUnionPredicate::add(const SCEVPredicate *N) {
13330 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
13331 for (auto Pred : Set->Preds)
13332 add(Pred);
13333 return;
13334 }
13335
13336 if (implies(N))
13337 return;
13338
13339 const SCEV *Key = N->getExpr();
13340 assert(Key && "Only SCEVUnionPredicate doesn't have an "
13341 " associated expression!");
13342
13343 SCEVToPreds[Key].push_back(N);
13344 Preds.push_back(N);
13345 }
13346
PredicatedScalarEvolution(ScalarEvolution & SE,Loop & L)13347 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
13348 Loop &L)
13349 : SE(SE), L(L) {}
13350
getSCEV(Value * V)13351 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
13352 const SCEV *Expr = SE.getSCEV(V);
13353 RewriteEntry &Entry = RewriteMap[Expr];
13354
13355 // If we already have an entry and the version matches, return it.
13356 if (Entry.second && Generation == Entry.first)
13357 return Entry.second;
13358
13359 // We found an entry but it's stale. Rewrite the stale entry
13360 // according to the current predicate.
13361 if (Entry.second)
13362 Expr = Entry.second;
13363
13364 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds);
13365 Entry = {Generation, NewSCEV};
13366
13367 return NewSCEV;
13368 }
13369
getBackedgeTakenCount()13370 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
13371 if (!BackedgeCount) {
13372 SCEVUnionPredicate BackedgePred;
13373 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred);
13374 addPredicate(BackedgePred);
13375 }
13376 return BackedgeCount;
13377 }
13378
addPredicate(const SCEVPredicate & Pred)13379 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
13380 if (Preds.implies(&Pred))
13381 return;
13382 Preds.add(&Pred);
13383 updateGeneration();
13384 }
13385
getUnionPredicate() const13386 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
13387 return Preds;
13388 }
13389
updateGeneration()13390 void PredicatedScalarEvolution::updateGeneration() {
13391 // If the generation number wrapped recompute everything.
13392 if (++Generation == 0) {
13393 for (auto &II : RewriteMap) {
13394 const SCEV *Rewritten = II.second.second;
13395 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)};
13396 }
13397 }
13398 }
13399
setNoOverflow(Value * V,SCEVWrapPredicate::IncrementWrapFlags Flags)13400 void PredicatedScalarEvolution::setNoOverflow(
13401 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13402 const SCEV *Expr = getSCEV(V);
13403 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13404
13405 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
13406
13407 // Clear the statically implied flags.
13408 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
13409 addPredicate(*SE.getWrapPredicate(AR, Flags));
13410
13411 auto II = FlagsMap.insert({V, Flags});
13412 if (!II.second)
13413 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
13414 }
13415
hasNoOverflow(Value * V,SCEVWrapPredicate::IncrementWrapFlags Flags)13416 bool PredicatedScalarEvolution::hasNoOverflow(
13417 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13418 const SCEV *Expr = getSCEV(V);
13419 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13420
13421 Flags = SCEVWrapPredicate::clearFlags(
13422 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
13423
13424 auto II = FlagsMap.find(V);
13425
13426 if (II != FlagsMap.end())
13427 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
13428
13429 return Flags == SCEVWrapPredicate::IncrementAnyWrap;
13430 }
13431
getAsAddRec(Value * V)13432 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
13433 const SCEV *Expr = this->getSCEV(V);
13434 SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
13435 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
13436
13437 if (!New)
13438 return nullptr;
13439
13440 for (auto *P : NewPreds)
13441 Preds.add(P);
13442
13443 updateGeneration();
13444 RewriteMap[SE.getSCEV(V)] = {Generation, New};
13445 return New;
13446 }
13447
PredicatedScalarEvolution(const PredicatedScalarEvolution & Init)13448 PredicatedScalarEvolution::PredicatedScalarEvolution(
13449 const PredicatedScalarEvolution &Init)
13450 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
13451 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
13452 for (auto I : Init.FlagsMap)
13453 FlagsMap.insert(I);
13454 }
13455
print(raw_ostream & OS,unsigned Depth) const13456 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
13457 // For each block.
13458 for (auto *BB : L.getBlocks())
13459 for (auto &I : *BB) {
13460 if (!SE.isSCEVable(I.getType()))
13461 continue;
13462
13463 auto *Expr = SE.getSCEV(&I);
13464 auto II = RewriteMap.find(Expr);
13465
13466 if (II == RewriteMap.end())
13467 continue;
13468
13469 // Don't print things that are not interesting.
13470 if (II->second.second == Expr)
13471 continue;
13472
13473 OS.indent(Depth) << "[PSE]" << I << ":\n";
13474 OS.indent(Depth + 2) << *Expr << "\n";
13475 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
13476 }
13477 }
13478
13479 // Match the mathematical pattern A - (A / B) * B, where A and B can be
13480 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
13481 // for URem with constant power-of-2 second operands.
13482 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
13483 // 4, A / B becomes X / 8).
matchURem(const SCEV * Expr,const SCEV * & LHS,const SCEV * & RHS)13484 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
13485 const SCEV *&RHS) {
13486 // Try to match 'zext (trunc A to iB) to iY', which is used
13487 // for URem with constant power-of-2 second operands. Make sure the size of
13488 // the operand A matches the size of the whole expressions.
13489 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
13490 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
13491 LHS = Trunc->getOperand();
13492 // Bail out if the type of the LHS is larger than the type of the
13493 // expression for now.
13494 if (getTypeSizeInBits(LHS->getType()) >
13495 getTypeSizeInBits(Expr->getType()))
13496 return false;
13497 if (LHS->getType() != Expr->getType())
13498 LHS = getZeroExtendExpr(LHS, Expr->getType());
13499 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
13500 << getTypeSizeInBits(Trunc->getType()));
13501 return true;
13502 }
13503 const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
13504 if (Add == nullptr || Add->getNumOperands() != 2)
13505 return false;
13506
13507 const SCEV *A = Add->getOperand(1);
13508 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
13509
13510 if (Mul == nullptr)
13511 return false;
13512
13513 const auto MatchURemWithDivisor = [&](const SCEV *B) {
13514 // (SomeExpr + (-(SomeExpr / B) * B)).
13515 if (Expr == getURemExpr(A, B)) {
13516 LHS = A;
13517 RHS = B;
13518 return true;
13519 }
13520 return false;
13521 };
13522
13523 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
13524 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
13525 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13526 MatchURemWithDivisor(Mul->getOperand(2));
13527
13528 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
13529 if (Mul->getNumOperands() == 2)
13530 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13531 MatchURemWithDivisor(Mul->getOperand(0)) ||
13532 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
13533 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
13534 return false;
13535 }
13536
13537 const SCEV *
computeSymbolicMaxBackedgeTakenCount(const Loop * L)13538 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
13539 SmallVector<BasicBlock*, 16> ExitingBlocks;
13540 L->getExitingBlocks(ExitingBlocks);
13541
13542 // Form an expression for the maximum exit count possible for this loop. We
13543 // merge the max and exact information to approximate a version of
13544 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
13545 SmallVector<const SCEV*, 4> ExitCounts;
13546 for (BasicBlock *ExitingBB : ExitingBlocks) {
13547 const SCEV *ExitCount = getExitCount(L, ExitingBB);
13548 if (isa<SCEVCouldNotCompute>(ExitCount))
13549 ExitCount = getExitCount(L, ExitingBB,
13550 ScalarEvolution::ConstantMaximum);
13551 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
13552 assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
13553 "We should only have known counts for exiting blocks that "
13554 "dominate latch!");
13555 ExitCounts.push_back(ExitCount);
13556 }
13557 }
13558 if (ExitCounts.empty())
13559 return getCouldNotCompute();
13560 return getUMinFromMismatchedTypes(ExitCounts);
13561 }
13562
13563 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown
13564 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because
13565 /// we cannot guarantee that the replacement is loop invariant in the loop of
13566 /// the AddRec.
13567 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
13568 ValueToSCEVMapTy ⤅
13569
13570 public:
SCEVLoopGuardRewriter(ScalarEvolution & SE,ValueToSCEVMapTy & M)13571 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
13572 : SCEVRewriteVisitor(SE), Map(M) {}
13573
visitAddRecExpr(const SCEVAddRecExpr * Expr)13574 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
13575
visitUnknown(const SCEVUnknown * Expr)13576 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13577 auto I = Map.find(Expr->getValue());
13578 if (I == Map.end())
13579 return Expr;
13580 return I->second;
13581 }
13582 };
13583
applyLoopGuards(const SCEV * Expr,const Loop * L)13584 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
13585 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
13586 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) {
13587 // WARNING: It is generally unsound to apply any wrap flags to the proposed
13588 // replacement SCEV which isn't directly implied by the structure of that
13589 // SCEV. In particular, using contextual facts to imply flags is *NOT*
13590 // legal. See the scoping rules for flags in the header to understand why.
13591
13592 // If we have LHS == 0, check if LHS is computing a property of some unknown
13593 // SCEV %v which we can rewrite %v to express explicitly.
13594 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
13595 if (Predicate == CmpInst::ICMP_EQ && RHSC &&
13596 RHSC->getValue()->isNullValue()) {
13597 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
13598 // explicitly express that.
13599 const SCEV *URemLHS = nullptr;
13600 const SCEV *URemRHS = nullptr;
13601 if (matchURem(LHS, URemLHS, URemRHS)) {
13602 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
13603 Value *V = LHSUnknown->getValue();
13604 RewriteMap[V] = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS);
13605 return;
13606 }
13607 }
13608 }
13609
13610 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
13611 std::swap(LHS, RHS);
13612 Predicate = CmpInst::getSwappedPredicate(Predicate);
13613 }
13614
13615 // Check for a condition of the form (-C1 + X < C2). InstCombine will
13616 // create this form when combining two checks of the form (X u< C2 + C1) and
13617 // (X >=u C1).
13618 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap]() {
13619 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS);
13620 if (!AddExpr || AddExpr->getNumOperands() != 2)
13621 return false;
13622
13623 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
13624 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1));
13625 auto *C2 = dyn_cast<SCEVConstant>(RHS);
13626 if (!C1 || !C2 || !LHSUnknown)
13627 return false;
13628
13629 auto ExactRegion =
13630 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt())
13631 .sub(C1->getAPInt());
13632
13633 // Bail out, unless we have a non-wrapping, monotonic range.
13634 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
13635 return false;
13636 auto I = RewriteMap.find(LHSUnknown->getValue());
13637 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown;
13638 RewriteMap[LHSUnknown->getValue()] = getUMaxExpr(
13639 getConstant(ExactRegion.getUnsignedMin()),
13640 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
13641 return true;
13642 };
13643 if (MatchRangeCheckIdiom())
13644 return;
13645
13646 // For now, limit to conditions that provide information about unknown
13647 // expressions. RHS also cannot contain add recurrences.
13648 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
13649 if (!LHSUnknown || containsAddRecurrence(RHS))
13650 return;
13651
13652 // Check whether LHS has already been rewritten. In that case we want to
13653 // chain further rewrites onto the already rewritten value.
13654 auto I = RewriteMap.find(LHSUnknown->getValue());
13655 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
13656 const SCEV *RewrittenRHS = nullptr;
13657 switch (Predicate) {
13658 case CmpInst::ICMP_ULT:
13659 RewrittenRHS =
13660 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
13661 break;
13662 case CmpInst::ICMP_SLT:
13663 RewrittenRHS =
13664 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
13665 break;
13666 case CmpInst::ICMP_ULE:
13667 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS);
13668 break;
13669 case CmpInst::ICMP_SLE:
13670 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS);
13671 break;
13672 case CmpInst::ICMP_UGT:
13673 RewrittenRHS =
13674 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
13675 break;
13676 case CmpInst::ICMP_SGT:
13677 RewrittenRHS =
13678 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
13679 break;
13680 case CmpInst::ICMP_UGE:
13681 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS);
13682 break;
13683 case CmpInst::ICMP_SGE:
13684 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS);
13685 break;
13686 case CmpInst::ICMP_EQ:
13687 if (isa<SCEVConstant>(RHS))
13688 RewrittenRHS = RHS;
13689 break;
13690 case CmpInst::ICMP_NE:
13691 if (isa<SCEVConstant>(RHS) &&
13692 cast<SCEVConstant>(RHS)->getValue()->isNullValue())
13693 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType()));
13694 break;
13695 default:
13696 break;
13697 }
13698
13699 if (RewrittenRHS)
13700 RewriteMap[LHSUnknown->getValue()] = RewrittenRHS;
13701 };
13702 // Starting at the loop predecessor, climb up the predecessor chain, as long
13703 // as there are predecessors that can be found that have unique successors
13704 // leading to the original header.
13705 // TODO: share this logic with isLoopEntryGuardedByCond.
13706 ValueToSCEVMapTy RewriteMap;
13707 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
13708 L->getLoopPredecessor(), L->getHeader());
13709 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
13710
13711 const BranchInst *LoopEntryPredicate =
13712 dyn_cast<BranchInst>(Pair.first->getTerminator());
13713 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
13714 continue;
13715
13716 bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second;
13717 SmallVector<Value *, 8> Worklist;
13718 SmallPtrSet<Value *, 8> Visited;
13719 Worklist.push_back(LoopEntryPredicate->getCondition());
13720 while (!Worklist.empty()) {
13721 Value *Cond = Worklist.pop_back_val();
13722 if (!Visited.insert(Cond).second)
13723 continue;
13724
13725 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
13726 auto Predicate =
13727 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
13728 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
13729 getSCEV(Cmp->getOperand(1)), RewriteMap);
13730 continue;
13731 }
13732
13733 Value *L, *R;
13734 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))
13735 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) {
13736 Worklist.push_back(L);
13737 Worklist.push_back(R);
13738 }
13739 }
13740 }
13741
13742 // Also collect information from assumptions dominating the loop.
13743 for (auto &AssumeVH : AC.assumptions()) {
13744 if (!AssumeVH)
13745 continue;
13746 auto *AssumeI = cast<CallInst>(AssumeVH);
13747 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0));
13748 if (!Cmp || !DT.dominates(AssumeI, L->getHeader()))
13749 continue;
13750 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)),
13751 getSCEV(Cmp->getOperand(1)), RewriteMap);
13752 }
13753
13754 if (RewriteMap.empty())
13755 return Expr;
13756 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
13757 return Rewriter.visit(Expr);
13758 }
13759