1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/EHPersonalities.h"
30 #include "llvm/Analysis/GuardUtils.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/Loads.h"
33 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/IntrinsicsAArch64.h"
56 #include "llvm/IR/IntrinsicsRISCV.h"
57 #include "llvm/IR/IntrinsicsX86.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/Metadata.h"
60 #include "llvm/IR/Module.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/IR/PatternMatch.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/User.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/KnownBits.h"
71 #include "llvm/Support/MathExtras.h"
72 #include <algorithm>
73 #include <array>
74 #include <cassert>
75 #include <cstdint>
76 #include <iterator>
77 #include <utility>
78
79 using namespace llvm;
80 using namespace llvm::PatternMatch;
81
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85 cl::Hidden, cl::init(20));
86
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
getBitWidth(Type * Ty,const DataLayout & DL)89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
90 if (unsigned BitWidth = Ty->getScalarSizeInBits())
91 return BitWidth;
92
93 return DL.getPointerTypeSizeInBits(Ty);
94 }
95
96 namespace {
97
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
102 struct Query {
103 const DataLayout &DL;
104 AssumptionCache *AC;
105 const Instruction *CxtI;
106 const DominatorTree *DT;
107
108 // Unlike the other analyses, this may be a nullptr because not all clients
109 // provide it currently.
110 OptimizationRemarkEmitter *ORE;
111
112 /// If true, it is safe to use metadata during simplification.
113 InstrInfoQuery IIQ;
114
Query__anon6905156c0111::Query115 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
116 const DominatorTree *DT, bool UseInstrInfo,
117 OptimizationRemarkEmitter *ORE = nullptr)
118 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
119 };
120
121 } // end anonymous namespace
122
123 // Given the provided Value and, potentially, a context instruction, return
124 // the preferred context instruction (if any).
safeCxtI(const Value * V,const Instruction * CxtI)125 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
126 // If we've been provided with a context instruction, then use that (provided
127 // it has been inserted).
128 if (CxtI && CxtI->getParent())
129 return CxtI;
130
131 // If the value is really an already-inserted instruction, then use that.
132 CxtI = dyn_cast<Instruction>(V);
133 if (CxtI && CxtI->getParent())
134 return CxtI;
135
136 return nullptr;
137 }
138
safeCxtI(const Value * V1,const Value * V2,const Instruction * CxtI)139 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
140 // If we've been provided with a context instruction, then use that (provided
141 // it has been inserted).
142 if (CxtI && CxtI->getParent())
143 return CxtI;
144
145 // If the value is really an already-inserted instruction, then use that.
146 CxtI = dyn_cast<Instruction>(V1);
147 if (CxtI && CxtI->getParent())
148 return CxtI;
149
150 CxtI = dyn_cast<Instruction>(V2);
151 if (CxtI && CxtI->getParent())
152 return CxtI;
153
154 return nullptr;
155 }
156
getShuffleDemandedElts(const ShuffleVectorInst * Shuf,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)157 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
158 const APInt &DemandedElts,
159 APInt &DemandedLHS, APInt &DemandedRHS) {
160 // The length of scalable vectors is unknown at compile time, thus we
161 // cannot check their values
162 if (isa<ScalableVectorType>(Shuf->getType()))
163 return false;
164
165 int NumElts =
166 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
167 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
168 DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
169 if (DemandedElts.isZero())
170 return true;
171 // Simple case of a shuffle with zeroinitializer.
172 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
173 DemandedLHS.setBit(0);
174 return true;
175 }
176 for (int i = 0; i != NumMaskElts; ++i) {
177 if (!DemandedElts[i])
178 continue;
179 int M = Shuf->getMaskValue(i);
180 assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
181
182 // For undef elements, we don't know anything about the common state of
183 // the shuffle result.
184 if (M == -1)
185 return false;
186 if (M < NumElts)
187 DemandedLHS.setBit(M % NumElts);
188 else
189 DemandedRHS.setBit(M % NumElts);
190 }
191
192 return true;
193 }
194
195 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
196 KnownBits &Known, unsigned Depth, const Query &Q);
197
computeKnownBits(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)198 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
199 const Query &Q) {
200 // FIXME: We currently have no way to represent the DemandedElts of a scalable
201 // vector
202 if (isa<ScalableVectorType>(V->getType())) {
203 Known.resetAll();
204 return;
205 }
206
207 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
208 APInt DemandedElts =
209 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
210 computeKnownBits(V, DemandedElts, Known, Depth, Q);
211 }
212
computeKnownBits(const Value * V,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)213 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
214 const DataLayout &DL, unsigned Depth,
215 AssumptionCache *AC, const Instruction *CxtI,
216 const DominatorTree *DT,
217 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
218 ::computeKnownBits(V, Known, Depth,
219 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
220 }
221
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)222 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
223 KnownBits &Known, const DataLayout &DL,
224 unsigned Depth, AssumptionCache *AC,
225 const Instruction *CxtI, const DominatorTree *DT,
226 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227 ::computeKnownBits(V, DemandedElts, Known, Depth,
228 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229 }
230
231 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
232 unsigned Depth, const Query &Q);
233
234 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
235 const Query &Q);
236
computeKnownBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)237 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
238 unsigned Depth, AssumptionCache *AC,
239 const Instruction *CxtI,
240 const DominatorTree *DT,
241 OptimizationRemarkEmitter *ORE,
242 bool UseInstrInfo) {
243 return ::computeKnownBits(
244 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
245 }
246
computeKnownBits(const Value * V,const APInt & DemandedElts,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)247 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
248 const DataLayout &DL, unsigned Depth,
249 AssumptionCache *AC, const Instruction *CxtI,
250 const DominatorTree *DT,
251 OptimizationRemarkEmitter *ORE,
252 bool UseInstrInfo) {
253 return ::computeKnownBits(
254 V, DemandedElts, Depth,
255 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
256 }
257
haveNoCommonBitsSet(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)258 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
259 const DataLayout &DL, AssumptionCache *AC,
260 const Instruction *CxtI, const DominatorTree *DT,
261 bool UseInstrInfo) {
262 assert(LHS->getType() == RHS->getType() &&
263 "LHS and RHS should have the same type");
264 assert(LHS->getType()->isIntOrIntVectorTy() &&
265 "LHS and RHS should be integers");
266 // Look for an inverted mask: (X & ~M) op (Y & M).
267 Value *M;
268 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
269 match(RHS, m_c_And(m_Specific(M), m_Value())))
270 return true;
271 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
272 match(LHS, m_c_And(m_Specific(M), m_Value())))
273 return true;
274 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
275 KnownBits LHSKnown(IT->getBitWidth());
276 KnownBits RHSKnown(IT->getBitWidth());
277 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
278 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
279 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
280 }
281
isOnlyUsedInZeroEqualityComparison(const Instruction * I)282 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
283 return !I->user_empty() && all_of(I->users(), [](const User *U) {
284 ICmpInst::Predicate P;
285 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
286 });
287 }
288
289 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
290 const Query &Q);
291
isKnownToBeAPowerOfTwo(const Value * V,const DataLayout & DL,bool OrZero,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)292 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
293 bool OrZero, unsigned Depth,
294 AssumptionCache *AC, const Instruction *CxtI,
295 const DominatorTree *DT, bool UseInstrInfo) {
296 return ::isKnownToBeAPowerOfTwo(
297 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
298 }
299
300 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
301 unsigned Depth, const Query &Q);
302
303 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
304
isKnownNonZero(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)305 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
306 AssumptionCache *AC, const Instruction *CxtI,
307 const DominatorTree *DT, bool UseInstrInfo) {
308 return ::isKnownNonZero(V, Depth,
309 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
310 }
311
isKnownNonNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)312 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
313 unsigned Depth, AssumptionCache *AC,
314 const Instruction *CxtI, const DominatorTree *DT,
315 bool UseInstrInfo) {
316 KnownBits Known =
317 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
318 return Known.isNonNegative();
319 }
320
isKnownPositive(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)321 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
322 AssumptionCache *AC, const Instruction *CxtI,
323 const DominatorTree *DT, bool UseInstrInfo) {
324 if (auto *CI = dyn_cast<ConstantInt>(V))
325 return CI->getValue().isStrictlyPositive();
326
327 // TODO: We'd doing two recursive queries here. We should factor this such
328 // that only a single query is needed.
329 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
330 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
331 }
332
isKnownNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)333 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
334 AssumptionCache *AC, const Instruction *CxtI,
335 const DominatorTree *DT, bool UseInstrInfo) {
336 KnownBits Known =
337 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
338 return Known.isNegative();
339 }
340
341 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
342 const Query &Q);
343
isKnownNonEqual(const Value * V1,const Value * V2,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)344 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
345 const DataLayout &DL, AssumptionCache *AC,
346 const Instruction *CxtI, const DominatorTree *DT,
347 bool UseInstrInfo) {
348 return ::isKnownNonEqual(V1, V2, 0,
349 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
350 UseInstrInfo, /*ORE=*/nullptr));
351 }
352
353 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
354 const Query &Q);
355
MaskedValueIsZero(const Value * V,const APInt & Mask,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)356 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
357 const DataLayout &DL, unsigned Depth,
358 AssumptionCache *AC, const Instruction *CxtI,
359 const DominatorTree *DT, bool UseInstrInfo) {
360 return ::MaskedValueIsZero(
361 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
362 }
363
364 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
365 unsigned Depth, const Query &Q);
366
ComputeNumSignBits(const Value * V,unsigned Depth,const Query & Q)367 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
368 const Query &Q) {
369 // FIXME: We currently have no way to represent the DemandedElts of a scalable
370 // vector
371 if (isa<ScalableVectorType>(V->getType()))
372 return 1;
373
374 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
375 APInt DemandedElts =
376 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
377 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
378 }
379
ComputeNumSignBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)380 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
381 unsigned Depth, AssumptionCache *AC,
382 const Instruction *CxtI,
383 const DominatorTree *DT, bool UseInstrInfo) {
384 return ::ComputeNumSignBits(
385 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
386 }
387
computeKnownBitsAddSub(bool Add,const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & KnownOut,KnownBits & Known2,unsigned Depth,const Query & Q)388 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
389 bool NSW, const APInt &DemandedElts,
390 KnownBits &KnownOut, KnownBits &Known2,
391 unsigned Depth, const Query &Q) {
392 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
393
394 // If one operand is unknown and we have no nowrap information,
395 // the result will be unknown independently of the second operand.
396 if (KnownOut.isUnknown() && !NSW)
397 return;
398
399 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
400 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
401 }
402
computeKnownBitsMul(const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q)403 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
404 const APInt &DemandedElts, KnownBits &Known,
405 KnownBits &Known2, unsigned Depth,
406 const Query &Q) {
407 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
408 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
409
410 bool isKnownNegative = false;
411 bool isKnownNonNegative = false;
412 // If the multiplication is known not to overflow, compute the sign bit.
413 if (NSW) {
414 if (Op0 == Op1) {
415 // The product of a number with itself is non-negative.
416 isKnownNonNegative = true;
417 } else {
418 bool isKnownNonNegativeOp1 = Known.isNonNegative();
419 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
420 bool isKnownNegativeOp1 = Known.isNegative();
421 bool isKnownNegativeOp0 = Known2.isNegative();
422 // The product of two numbers with the same sign is non-negative.
423 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
424 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
425 // The product of a negative number and a non-negative number is either
426 // negative or zero.
427 if (!isKnownNonNegative)
428 isKnownNegative =
429 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
430 Known2.isNonZero()) ||
431 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
432 }
433 }
434
435 Known = KnownBits::mul(Known, Known2);
436
437 // Only make use of no-wrap flags if we failed to compute the sign bit
438 // directly. This matters if the multiplication always overflows, in
439 // which case we prefer to follow the result of the direct computation,
440 // though as the program is invoking undefined behaviour we can choose
441 // whatever we like here.
442 if (isKnownNonNegative && !Known.isNegative())
443 Known.makeNonNegative();
444 else if (isKnownNegative && !Known.isNonNegative())
445 Known.makeNegative();
446 }
447
computeKnownBitsFromRangeMetadata(const MDNode & Ranges,KnownBits & Known)448 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
449 KnownBits &Known) {
450 unsigned BitWidth = Known.getBitWidth();
451 unsigned NumRanges = Ranges.getNumOperands() / 2;
452 assert(NumRanges >= 1);
453
454 Known.Zero.setAllBits();
455 Known.One.setAllBits();
456
457 for (unsigned i = 0; i < NumRanges; ++i) {
458 ConstantInt *Lower =
459 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
460 ConstantInt *Upper =
461 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
462 ConstantRange Range(Lower->getValue(), Upper->getValue());
463
464 // The first CommonPrefixBits of all values in Range are equal.
465 unsigned CommonPrefixBits =
466 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
467 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
468 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
469 Known.One &= UnsignedMax & Mask;
470 Known.Zero &= ~UnsignedMax & Mask;
471 }
472 }
473
isEphemeralValueOf(const Instruction * I,const Value * E)474 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
475 SmallVector<const Value *, 16> WorkSet(1, I);
476 SmallPtrSet<const Value *, 32> Visited;
477 SmallPtrSet<const Value *, 16> EphValues;
478
479 // The instruction defining an assumption's condition itself is always
480 // considered ephemeral to that assumption (even if it has other
481 // non-ephemeral users). See r246696's test case for an example.
482 if (is_contained(I->operands(), E))
483 return true;
484
485 while (!WorkSet.empty()) {
486 const Value *V = WorkSet.pop_back_val();
487 if (!Visited.insert(V).second)
488 continue;
489
490 // If all uses of this value are ephemeral, then so is this value.
491 if (llvm::all_of(V->users(), [&](const User *U) {
492 return EphValues.count(U);
493 })) {
494 if (V == E)
495 return true;
496
497 if (V == I || isSafeToSpeculativelyExecute(V)) {
498 EphValues.insert(V);
499 if (const User *U = dyn_cast<User>(V))
500 append_range(WorkSet, U->operands());
501 }
502 }
503 }
504
505 return false;
506 }
507
508 // Is this an intrinsic that cannot be speculated but also cannot trap?
isAssumeLikeIntrinsic(const Instruction * I)509 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
510 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
511 return CI->isAssumeLikeIntrinsic();
512
513 return false;
514 }
515
isValidAssumeForContext(const Instruction * Inv,const Instruction * CxtI,const DominatorTree * DT)516 bool llvm::isValidAssumeForContext(const Instruction *Inv,
517 const Instruction *CxtI,
518 const DominatorTree *DT) {
519 // There are two restrictions on the use of an assume:
520 // 1. The assume must dominate the context (or the control flow must
521 // reach the assume whenever it reaches the context).
522 // 2. The context must not be in the assume's set of ephemeral values
523 // (otherwise we will use the assume to prove that the condition
524 // feeding the assume is trivially true, thus causing the removal of
525 // the assume).
526
527 if (Inv->getParent() == CxtI->getParent()) {
528 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
529 // in the BB.
530 if (Inv->comesBefore(CxtI))
531 return true;
532
533 // Don't let an assume affect itself - this would cause the problems
534 // `isEphemeralValueOf` is trying to prevent, and it would also make
535 // the loop below go out of bounds.
536 if (Inv == CxtI)
537 return false;
538
539 // The context comes first, but they're both in the same block.
540 // Make sure there is nothing in between that might interrupt
541 // the control flow, not even CxtI itself.
542 // We limit the scan distance between the assume and its context instruction
543 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
544 // it can be adjusted if needed (could be turned into a cl::opt).
545 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
546 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
547 return false;
548
549 return !isEphemeralValueOf(Inv, CxtI);
550 }
551
552 // Inv and CxtI are in different blocks.
553 if (DT) {
554 if (DT->dominates(Inv, CxtI))
555 return true;
556 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
557 // We don't have a DT, but this trivially dominates.
558 return true;
559 }
560
561 return false;
562 }
563
cmpExcludesZero(CmpInst::Predicate Pred,const Value * RHS)564 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
565 // v u> y implies v != 0.
566 if (Pred == ICmpInst::ICMP_UGT)
567 return true;
568
569 // Special-case v != 0 to also handle v != null.
570 if (Pred == ICmpInst::ICMP_NE)
571 return match(RHS, m_Zero());
572
573 // All other predicates - rely on generic ConstantRange handling.
574 const APInt *C;
575 if (!match(RHS, m_APInt(C)))
576 return false;
577
578 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
579 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
580 }
581
isKnownNonZeroFromAssume(const Value * V,const Query & Q)582 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
583 // Use of assumptions is context-sensitive. If we don't have a context, we
584 // cannot use them!
585 if (!Q.AC || !Q.CxtI)
586 return false;
587
588 if (Q.CxtI && V->getType()->isPointerTy()) {
589 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
590 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
591 V->getType()->getPointerAddressSpace()))
592 AttrKinds.push_back(Attribute::Dereferenceable);
593
594 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
595 return true;
596 }
597
598 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
599 if (!AssumeVH)
600 continue;
601 CallInst *I = cast<CallInst>(AssumeVH);
602 assert(I->getFunction() == Q.CxtI->getFunction() &&
603 "Got assumption for the wrong function!");
604
605 // Warning: This loop can end up being somewhat performance sensitive.
606 // We're running this loop for once for each value queried resulting in a
607 // runtime of ~O(#assumes * #values).
608
609 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
610 "must be an assume intrinsic");
611
612 Value *RHS;
613 CmpInst::Predicate Pred;
614 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
615 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
616 return false;
617
618 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
619 return true;
620 }
621
622 return false;
623 }
624
computeKnownBitsFromAssume(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)625 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
626 unsigned Depth, const Query &Q) {
627 // Use of assumptions is context-sensitive. If we don't have a context, we
628 // cannot use them!
629 if (!Q.AC || !Q.CxtI)
630 return;
631
632 unsigned BitWidth = Known.getBitWidth();
633
634 // Refine Known set if the pointer alignment is set by assume bundles.
635 if (V->getType()->isPointerTy()) {
636 if (RetainedKnowledge RK = getKnowledgeValidInContext(
637 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
638 Known.Zero.setLowBits(Log2_32(RK.ArgValue));
639 }
640 }
641
642 // Note that the patterns below need to be kept in sync with the code
643 // in AssumptionCache::updateAffectedValues.
644
645 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
646 if (!AssumeVH)
647 continue;
648 CallInst *I = cast<CallInst>(AssumeVH);
649 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
650 "Got assumption for the wrong function!");
651
652 // Warning: This loop can end up being somewhat performance sensitive.
653 // We're running this loop for once for each value queried resulting in a
654 // runtime of ~O(#assumes * #values).
655
656 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
657 "must be an assume intrinsic");
658
659 Value *Arg = I->getArgOperand(0);
660
661 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
662 assert(BitWidth == 1 && "assume operand is not i1?");
663 Known.setAllOnes();
664 return;
665 }
666 if (match(Arg, m_Not(m_Specific(V))) &&
667 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
668 assert(BitWidth == 1 && "assume operand is not i1?");
669 Known.setAllZero();
670 return;
671 }
672
673 // The remaining tests are all recursive, so bail out if we hit the limit.
674 if (Depth == MaxAnalysisRecursionDepth)
675 continue;
676
677 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
678 if (!Cmp)
679 continue;
680
681 // We are attempting to compute known bits for the operands of an assume.
682 // Do not try to use other assumptions for those recursive calls because
683 // that can lead to mutual recursion and a compile-time explosion.
684 // An example of the mutual recursion: computeKnownBits can call
685 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
686 // and so on.
687 Query QueryNoAC = Q;
688 QueryNoAC.AC = nullptr;
689
690 // Note that ptrtoint may change the bitwidth.
691 Value *A, *B;
692 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
693
694 CmpInst::Predicate Pred;
695 uint64_t C;
696 switch (Cmp->getPredicate()) {
697 default:
698 break;
699 case ICmpInst::ICMP_EQ:
700 // assume(v = a)
701 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
702 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
703 KnownBits RHSKnown =
704 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
705 Known.Zero |= RHSKnown.Zero;
706 Known.One |= RHSKnown.One;
707 // assume(v & b = a)
708 } else if (match(Cmp,
709 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
710 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711 KnownBits RHSKnown =
712 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
713 KnownBits MaskKnown =
714 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
715
716 // For those bits in the mask that are known to be one, we can propagate
717 // known bits from the RHS to V.
718 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
719 Known.One |= RHSKnown.One & MaskKnown.One;
720 // assume(~(v & b) = a)
721 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
722 m_Value(A))) &&
723 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
724 KnownBits RHSKnown =
725 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
726 KnownBits MaskKnown =
727 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
728
729 // For those bits in the mask that are known to be one, we can propagate
730 // inverted known bits from the RHS to V.
731 Known.Zero |= RHSKnown.One & MaskKnown.One;
732 Known.One |= RHSKnown.Zero & MaskKnown.One;
733 // assume(v | b = a)
734 } else if (match(Cmp,
735 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
736 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
737 KnownBits RHSKnown =
738 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
739 KnownBits BKnown =
740 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
741
742 // For those bits in B that are known to be zero, we can propagate known
743 // bits from the RHS to V.
744 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
745 Known.One |= RHSKnown.One & BKnown.Zero;
746 // assume(~(v | b) = a)
747 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
748 m_Value(A))) &&
749 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
750 KnownBits RHSKnown =
751 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
752 KnownBits BKnown =
753 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
754
755 // For those bits in B that are known to be zero, we can propagate
756 // inverted known bits from the RHS to V.
757 Known.Zero |= RHSKnown.One & BKnown.Zero;
758 Known.One |= RHSKnown.Zero & BKnown.Zero;
759 // assume(v ^ b = a)
760 } else if (match(Cmp,
761 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
762 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763 KnownBits RHSKnown =
764 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
765 KnownBits BKnown =
766 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
767
768 // For those bits in B that are known to be zero, we can propagate known
769 // bits from the RHS to V. For those bits in B that are known to be one,
770 // we can propagate inverted known bits from the RHS to V.
771 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
772 Known.One |= RHSKnown.One & BKnown.Zero;
773 Known.Zero |= RHSKnown.One & BKnown.One;
774 Known.One |= RHSKnown.Zero & BKnown.One;
775 // assume(~(v ^ b) = a)
776 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
777 m_Value(A))) &&
778 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
779 KnownBits RHSKnown =
780 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
781 KnownBits BKnown =
782 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
783
784 // For those bits in B that are known to be zero, we can propagate
785 // inverted known bits from the RHS to V. For those bits in B that are
786 // known to be one, we can propagate known bits from the RHS to V.
787 Known.Zero |= RHSKnown.One & BKnown.Zero;
788 Known.One |= RHSKnown.Zero & BKnown.Zero;
789 Known.Zero |= RHSKnown.Zero & BKnown.One;
790 Known.One |= RHSKnown.One & BKnown.One;
791 // assume(v << c = a)
792 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
793 m_Value(A))) &&
794 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
795 KnownBits RHSKnown =
796 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
797
798 // For those bits in RHS that are known, we can propagate them to known
799 // bits in V shifted to the right by C.
800 RHSKnown.Zero.lshrInPlace(C);
801 Known.Zero |= RHSKnown.Zero;
802 RHSKnown.One.lshrInPlace(C);
803 Known.One |= RHSKnown.One;
804 // assume(~(v << c) = a)
805 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
806 m_Value(A))) &&
807 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
808 KnownBits RHSKnown =
809 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
810 // For those bits in RHS that are known, we can propagate them inverted
811 // to known bits in V shifted to the right by C.
812 RHSKnown.One.lshrInPlace(C);
813 Known.Zero |= RHSKnown.One;
814 RHSKnown.Zero.lshrInPlace(C);
815 Known.One |= RHSKnown.Zero;
816 // assume(v >> c = a)
817 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
818 m_Value(A))) &&
819 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
820 KnownBits RHSKnown =
821 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
822 // For those bits in RHS that are known, we can propagate them to known
823 // bits in V shifted to the right by C.
824 Known.Zero |= RHSKnown.Zero << C;
825 Known.One |= RHSKnown.One << C;
826 // assume(~(v >> c) = a)
827 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
828 m_Value(A))) &&
829 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
830 KnownBits RHSKnown =
831 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
832 // For those bits in RHS that are known, we can propagate them inverted
833 // to known bits in V shifted to the right by C.
834 Known.Zero |= RHSKnown.One << C;
835 Known.One |= RHSKnown.Zero << C;
836 }
837 break;
838 case ICmpInst::ICMP_SGE:
839 // assume(v >=_s c) where c is non-negative
840 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
841 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
842 KnownBits RHSKnown =
843 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
844
845 if (RHSKnown.isNonNegative()) {
846 // We know that the sign bit is zero.
847 Known.makeNonNegative();
848 }
849 }
850 break;
851 case ICmpInst::ICMP_SGT:
852 // assume(v >_s c) where c is at least -1.
853 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
854 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
855 KnownBits RHSKnown =
856 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
857
858 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
859 // We know that the sign bit is zero.
860 Known.makeNonNegative();
861 }
862 }
863 break;
864 case ICmpInst::ICMP_SLE:
865 // assume(v <=_s c) where c is negative
866 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
867 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
868 KnownBits RHSKnown =
869 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
870
871 if (RHSKnown.isNegative()) {
872 // We know that the sign bit is one.
873 Known.makeNegative();
874 }
875 }
876 break;
877 case ICmpInst::ICMP_SLT:
878 // assume(v <_s c) where c is non-positive
879 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
880 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
881 KnownBits RHSKnown =
882 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
883
884 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
885 // We know that the sign bit is one.
886 Known.makeNegative();
887 }
888 }
889 break;
890 case ICmpInst::ICMP_ULE:
891 // assume(v <=_u c)
892 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
893 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
894 KnownBits RHSKnown =
895 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
896
897 // Whatever high bits in c are zero are known to be zero.
898 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
899 }
900 break;
901 case ICmpInst::ICMP_ULT:
902 // assume(v <_u c)
903 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
904 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
905 KnownBits RHSKnown =
906 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
907
908 // If the RHS is known zero, then this assumption must be wrong (nothing
909 // is unsigned less than zero). Signal a conflict and get out of here.
910 if (RHSKnown.isZero()) {
911 Known.Zero.setAllBits();
912 Known.One.setAllBits();
913 break;
914 }
915
916 // Whatever high bits in c are zero are known to be zero (if c is a power
917 // of 2, then one more).
918 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
919 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
920 else
921 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
922 }
923 break;
924 }
925 }
926
927 // If assumptions conflict with each other or previous known bits, then we
928 // have a logical fallacy. It's possible that the assumption is not reachable,
929 // so this isn't a real bug. On the other hand, the program may have undefined
930 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
931 // clear out the known bits, try to warn the user, and hope for the best.
932 if (Known.Zero.intersects(Known.One)) {
933 Known.resetAll();
934
935 if (Q.ORE)
936 Q.ORE->emit([&]() {
937 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
938 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
939 CxtI)
940 << "Detected conflicting code assumptions. Program may "
941 "have undefined behavior, or compiler may have "
942 "internal error.";
943 });
944 }
945 }
946
947 /// Compute known bits from a shift operator, including those with a
948 /// non-constant shift amount. Known is the output of this function. Known2 is a
949 /// pre-allocated temporary with the same bit width as Known and on return
950 /// contains the known bit of the shift value source. KF is an
951 /// operator-specific function that, given the known-bits and a shift amount,
952 /// compute the implied known-bits of the shift operator's result respectively
953 /// for that shift amount. The results from calling KF are conservatively
954 /// combined for all permitted shift amounts.
computeKnownBitsFromShiftOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q,function_ref<KnownBits (const KnownBits &,const KnownBits &)> KF)955 static void computeKnownBitsFromShiftOperator(
956 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
957 KnownBits &Known2, unsigned Depth, const Query &Q,
958 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
959 unsigned BitWidth = Known.getBitWidth();
960 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
961 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
962
963 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
964 // BitWidth > 64 and any upper bits are known, we'll end up returning the
965 // limit value (which implies all bits are known).
966 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
967 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
968 bool ShiftAmtIsConstant = Known.isConstant();
969 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
970
971 if (ShiftAmtIsConstant) {
972 Known = KF(Known2, Known);
973
974 // If the known bits conflict, this must be an overflowing left shift, so
975 // the shift result is poison. We can return anything we want. Choose 0 for
976 // the best folding opportunity.
977 if (Known.hasConflict())
978 Known.setAllZero();
979
980 return;
981 }
982
983 // If the shift amount could be greater than or equal to the bit-width of the
984 // LHS, the value could be poison, but bail out because the check below is
985 // expensive.
986 // TODO: Should we just carry on?
987 if (MaxShiftAmtIsOutOfRange) {
988 Known.resetAll();
989 return;
990 }
991
992 // It would be more-clearly correct to use the two temporaries for this
993 // calculation. Reusing the APInts here to prevent unnecessary allocations.
994 Known.resetAll();
995
996 // If we know the shifter operand is nonzero, we can sometimes infer more
997 // known bits. However this is expensive to compute, so be lazy about it and
998 // only compute it when absolutely necessary.
999 Optional<bool> ShifterOperandIsNonZero;
1000
1001 // Early exit if we can't constrain any well-defined shift amount.
1002 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1003 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1004 ShifterOperandIsNonZero =
1005 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1006 if (!*ShifterOperandIsNonZero)
1007 return;
1008 }
1009
1010 Known.Zero.setAllBits();
1011 Known.One.setAllBits();
1012 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1013 // Combine the shifted known input bits only for those shift amounts
1014 // compatible with its known constraints.
1015 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1016 continue;
1017 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1018 continue;
1019 // If we know the shifter is nonzero, we may be able to infer more known
1020 // bits. This check is sunk down as far as possible to avoid the expensive
1021 // call to isKnownNonZero if the cheaper checks above fail.
1022 if (ShiftAmt == 0) {
1023 if (!ShifterOperandIsNonZero.hasValue())
1024 ShifterOperandIsNonZero =
1025 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1026 if (*ShifterOperandIsNonZero)
1027 continue;
1028 }
1029
1030 Known = KnownBits::commonBits(
1031 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1032 }
1033
1034 // If the known bits conflict, the result is poison. Return a 0 and hope the
1035 // caller can further optimize that.
1036 if (Known.hasConflict())
1037 Known.setAllZero();
1038 }
1039
computeKnownBitsFromOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1040 static void computeKnownBitsFromOperator(const Operator *I,
1041 const APInt &DemandedElts,
1042 KnownBits &Known, unsigned Depth,
1043 const Query &Q) {
1044 unsigned BitWidth = Known.getBitWidth();
1045
1046 KnownBits Known2(BitWidth);
1047 switch (I->getOpcode()) {
1048 default: break;
1049 case Instruction::Load:
1050 if (MDNode *MD =
1051 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1052 computeKnownBitsFromRangeMetadata(*MD, Known);
1053 break;
1054 case Instruction::And: {
1055 // If either the LHS or the RHS are Zero, the result is zero.
1056 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1057 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1058
1059 Known &= Known2;
1060
1061 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1062 // here we handle the more general case of adding any odd number by
1063 // matching the form add(x, add(x, y)) where y is odd.
1064 // TODO: This could be generalized to clearing any bit set in y where the
1065 // following bit is known to be unset in y.
1066 Value *X = nullptr, *Y = nullptr;
1067 if (!Known.Zero[0] && !Known.One[0] &&
1068 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1069 Known2.resetAll();
1070 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1071 if (Known2.countMinTrailingOnes() > 0)
1072 Known.Zero.setBit(0);
1073 }
1074 break;
1075 }
1076 case Instruction::Or:
1077 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1078 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1079
1080 Known |= Known2;
1081 break;
1082 case Instruction::Xor:
1083 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1084 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1085
1086 Known ^= Known2;
1087 break;
1088 case Instruction::Mul: {
1089 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1090 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1091 Known, Known2, Depth, Q);
1092 break;
1093 }
1094 case Instruction::UDiv: {
1095 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1096 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1097 Known = KnownBits::udiv(Known, Known2);
1098 break;
1099 }
1100 case Instruction::Select: {
1101 const Value *LHS = nullptr, *RHS = nullptr;
1102 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1103 if (SelectPatternResult::isMinOrMax(SPF)) {
1104 computeKnownBits(RHS, Known, Depth + 1, Q);
1105 computeKnownBits(LHS, Known2, Depth + 1, Q);
1106 switch (SPF) {
1107 default:
1108 llvm_unreachable("Unhandled select pattern flavor!");
1109 case SPF_SMAX:
1110 Known = KnownBits::smax(Known, Known2);
1111 break;
1112 case SPF_SMIN:
1113 Known = KnownBits::smin(Known, Known2);
1114 break;
1115 case SPF_UMAX:
1116 Known = KnownBits::umax(Known, Known2);
1117 break;
1118 case SPF_UMIN:
1119 Known = KnownBits::umin(Known, Known2);
1120 break;
1121 }
1122 break;
1123 }
1124
1125 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1126 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1127
1128 // Only known if known in both the LHS and RHS.
1129 Known = KnownBits::commonBits(Known, Known2);
1130
1131 if (SPF == SPF_ABS) {
1132 // RHS from matchSelectPattern returns the negation part of abs pattern.
1133 // If the negate has an NSW flag we can assume the sign bit of the result
1134 // will be 0 because that makes abs(INT_MIN) undefined.
1135 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1136 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1137 Known.Zero.setSignBit();
1138 }
1139
1140 break;
1141 }
1142 case Instruction::FPTrunc:
1143 case Instruction::FPExt:
1144 case Instruction::FPToUI:
1145 case Instruction::FPToSI:
1146 case Instruction::SIToFP:
1147 case Instruction::UIToFP:
1148 break; // Can't work with floating point.
1149 case Instruction::PtrToInt:
1150 case Instruction::IntToPtr:
1151 // Fall through and handle them the same as zext/trunc.
1152 LLVM_FALLTHROUGH;
1153 case Instruction::ZExt:
1154 case Instruction::Trunc: {
1155 Type *SrcTy = I->getOperand(0)->getType();
1156
1157 unsigned SrcBitWidth;
1158 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1159 // which fall through here.
1160 Type *ScalarTy = SrcTy->getScalarType();
1161 SrcBitWidth = ScalarTy->isPointerTy() ?
1162 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1163 Q.DL.getTypeSizeInBits(ScalarTy);
1164
1165 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1166 Known = Known.anyextOrTrunc(SrcBitWidth);
1167 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1168 Known = Known.zextOrTrunc(BitWidth);
1169 break;
1170 }
1171 case Instruction::BitCast: {
1172 Type *SrcTy = I->getOperand(0)->getType();
1173 if (SrcTy->isIntOrPtrTy() &&
1174 // TODO: For now, not handling conversions like:
1175 // (bitcast i64 %x to <2 x i32>)
1176 !I->getType()->isVectorTy()) {
1177 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1178 break;
1179 }
1180
1181 // Handle cast from vector integer type to scalar or vector integer.
1182 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1183 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1184 !I->getType()->isIntOrIntVectorTy())
1185 break;
1186
1187 // Look through a cast from narrow vector elements to wider type.
1188 // Examples: v4i32 -> v2i64, v3i8 -> v24
1189 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1190 if (BitWidth % SubBitWidth == 0) {
1191 // Known bits are automatically intersected across demanded elements of a
1192 // vector. So for example, if a bit is computed as known zero, it must be
1193 // zero across all demanded elements of the vector.
1194 //
1195 // For this bitcast, each demanded element of the output is sub-divided
1196 // across a set of smaller vector elements in the source vector. To get
1197 // the known bits for an entire element of the output, compute the known
1198 // bits for each sub-element sequentially. This is done by shifting the
1199 // one-set-bit demanded elements parameter across the sub-elements for
1200 // consecutive calls to computeKnownBits. We are using the demanded
1201 // elements parameter as a mask operator.
1202 //
1203 // The known bits of each sub-element are then inserted into place
1204 // (dependent on endian) to form the full result of known bits.
1205 unsigned NumElts = DemandedElts.getBitWidth();
1206 unsigned SubScale = BitWidth / SubBitWidth;
1207 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1208 for (unsigned i = 0; i != NumElts; ++i) {
1209 if (DemandedElts[i])
1210 SubDemandedElts.setBit(i * SubScale);
1211 }
1212
1213 KnownBits KnownSrc(SubBitWidth);
1214 for (unsigned i = 0; i != SubScale; ++i) {
1215 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1216 Depth + 1, Q);
1217 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1218 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1219 }
1220 }
1221 break;
1222 }
1223 case Instruction::SExt: {
1224 // Compute the bits in the result that are not present in the input.
1225 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1226
1227 Known = Known.trunc(SrcBitWidth);
1228 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1229 // If the sign bit of the input is known set or clear, then we know the
1230 // top bits of the result.
1231 Known = Known.sext(BitWidth);
1232 break;
1233 }
1234 case Instruction::Shl: {
1235 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1236 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1237 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1238 // If this shift has "nsw" keyword, then the result is either a poison
1239 // value or has the same sign bit as the first operand.
1240 if (NSW) {
1241 if (KnownVal.Zero.isSignBitSet())
1242 Result.Zero.setSignBit();
1243 if (KnownVal.One.isSignBitSet())
1244 Result.One.setSignBit();
1245 }
1246 return Result;
1247 };
1248 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1249 KF);
1250 // Trailing zeros of a right-shifted constant never decrease.
1251 const APInt *C;
1252 if (match(I->getOperand(0), m_APInt(C)))
1253 Known.Zero.setLowBits(C->countTrailingZeros());
1254 break;
1255 }
1256 case Instruction::LShr: {
1257 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1258 return KnownBits::lshr(KnownVal, KnownAmt);
1259 };
1260 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1261 KF);
1262 // Leading zeros of a left-shifted constant never decrease.
1263 const APInt *C;
1264 if (match(I->getOperand(0), m_APInt(C)))
1265 Known.Zero.setHighBits(C->countLeadingZeros());
1266 break;
1267 }
1268 case Instruction::AShr: {
1269 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1270 return KnownBits::ashr(KnownVal, KnownAmt);
1271 };
1272 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1273 KF);
1274 break;
1275 }
1276 case Instruction::Sub: {
1277 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1278 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1279 DemandedElts, Known, Known2, Depth, Q);
1280 break;
1281 }
1282 case Instruction::Add: {
1283 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1284 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1285 DemandedElts, Known, Known2, Depth, Q);
1286 break;
1287 }
1288 case Instruction::SRem:
1289 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1290 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1291 Known = KnownBits::srem(Known, Known2);
1292 break;
1293
1294 case Instruction::URem:
1295 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1296 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1297 Known = KnownBits::urem(Known, Known2);
1298 break;
1299 case Instruction::Alloca:
1300 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1301 break;
1302 case Instruction::GetElementPtr: {
1303 // Analyze all of the subscripts of this getelementptr instruction
1304 // to determine if we can prove known low zero bits.
1305 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1306 // Accumulate the constant indices in a separate variable
1307 // to minimize the number of calls to computeForAddSub.
1308 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1309
1310 gep_type_iterator GTI = gep_type_begin(I);
1311 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1312 // TrailZ can only become smaller, short-circuit if we hit zero.
1313 if (Known.isUnknown())
1314 break;
1315
1316 Value *Index = I->getOperand(i);
1317
1318 // Handle case when index is zero.
1319 Constant *CIndex = dyn_cast<Constant>(Index);
1320 if (CIndex && CIndex->isZeroValue())
1321 continue;
1322
1323 if (StructType *STy = GTI.getStructTypeOrNull()) {
1324 // Handle struct member offset arithmetic.
1325
1326 assert(CIndex &&
1327 "Access to structure field must be known at compile time");
1328
1329 if (CIndex->getType()->isVectorTy())
1330 Index = CIndex->getSplatValue();
1331
1332 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1333 const StructLayout *SL = Q.DL.getStructLayout(STy);
1334 uint64_t Offset = SL->getElementOffset(Idx);
1335 AccConstIndices += Offset;
1336 continue;
1337 }
1338
1339 // Handle array index arithmetic.
1340 Type *IndexedTy = GTI.getIndexedType();
1341 if (!IndexedTy->isSized()) {
1342 Known.resetAll();
1343 break;
1344 }
1345
1346 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1347 KnownBits IndexBits(IndexBitWidth);
1348 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1349 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1350 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1351 KnownBits ScalingFactor(IndexBitWidth);
1352 // Multiply by current sizeof type.
1353 // &A[i] == A + i * sizeof(*A[i]).
1354 if (IndexTypeSize.isScalable()) {
1355 // For scalable types the only thing we know about sizeof is
1356 // that this is a multiple of the minimum size.
1357 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1358 } else if (IndexBits.isConstant()) {
1359 APInt IndexConst = IndexBits.getConstant();
1360 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1361 IndexConst *= ScalingFactor;
1362 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1363 continue;
1364 } else {
1365 ScalingFactor =
1366 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1367 }
1368 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1369
1370 // If the offsets have a different width from the pointer, according
1371 // to the language reference we need to sign-extend or truncate them
1372 // to the width of the pointer.
1373 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1374
1375 // Note that inbounds does *not* guarantee nsw for the addition, as only
1376 // the offset is signed, while the base address is unsigned.
1377 Known = KnownBits::computeForAddSub(
1378 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1379 }
1380 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1381 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1382 Known = KnownBits::computeForAddSub(
1383 /*Add=*/true, /*NSW=*/false, Known, Index);
1384 }
1385 break;
1386 }
1387 case Instruction::PHI: {
1388 const PHINode *P = cast<PHINode>(I);
1389 BinaryOperator *BO = nullptr;
1390 Value *R = nullptr, *L = nullptr;
1391 if (matchSimpleRecurrence(P, BO, R, L)) {
1392 // Handle the case of a simple two-predecessor recurrence PHI.
1393 // There's a lot more that could theoretically be done here, but
1394 // this is sufficient to catch some interesting cases.
1395 unsigned Opcode = BO->getOpcode();
1396
1397 // If this is a shift recurrence, we know the bits being shifted in.
1398 // We can combine that with information about the start value of the
1399 // recurrence to conclude facts about the result.
1400 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1401 Opcode == Instruction::Shl) &&
1402 BO->getOperand(0) == I) {
1403
1404 // We have matched a recurrence of the form:
1405 // %iv = [R, %entry], [%iv.next, %backedge]
1406 // %iv.next = shift_op %iv, L
1407
1408 // Recurse with the phi context to avoid concern about whether facts
1409 // inferred hold at original context instruction. TODO: It may be
1410 // correct to use the original context. IF warranted, explore and
1411 // add sufficient tests to cover.
1412 Query RecQ = Q;
1413 RecQ.CxtI = P;
1414 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1415 switch (Opcode) {
1416 case Instruction::Shl:
1417 // A shl recurrence will only increase the tailing zeros
1418 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1419 break;
1420 case Instruction::LShr:
1421 // A lshr recurrence will preserve the leading zeros of the
1422 // start value
1423 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1424 break;
1425 case Instruction::AShr:
1426 // An ashr recurrence will extend the initial sign bit
1427 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1428 Known.One.setHighBits(Known2.countMinLeadingOnes());
1429 break;
1430 };
1431 }
1432
1433 // Check for operations that have the property that if
1434 // both their operands have low zero bits, the result
1435 // will have low zero bits.
1436 if (Opcode == Instruction::Add ||
1437 Opcode == Instruction::Sub ||
1438 Opcode == Instruction::And ||
1439 Opcode == Instruction::Or ||
1440 Opcode == Instruction::Mul) {
1441 // Change the context instruction to the "edge" that flows into the
1442 // phi. This is important because that is where the value is actually
1443 // "evaluated" even though it is used later somewhere else. (see also
1444 // D69571).
1445 Query RecQ = Q;
1446
1447 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1448 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1449 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1450
1451 // Ok, we have a PHI of the form L op= R. Check for low
1452 // zero bits.
1453 RecQ.CxtI = RInst;
1454 computeKnownBits(R, Known2, Depth + 1, RecQ);
1455
1456 // We need to take the minimum number of known bits
1457 KnownBits Known3(BitWidth);
1458 RecQ.CxtI = LInst;
1459 computeKnownBits(L, Known3, Depth + 1, RecQ);
1460
1461 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1462 Known3.countMinTrailingZeros()));
1463
1464 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1465 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1466 // If initial value of recurrence is nonnegative, and we are adding
1467 // a nonnegative number with nsw, the result can only be nonnegative
1468 // or poison value regardless of the number of times we execute the
1469 // add in phi recurrence. If initial value is negative and we are
1470 // adding a negative number with nsw, the result can only be
1471 // negative or poison value. Similar arguments apply to sub and mul.
1472 //
1473 // (add non-negative, non-negative) --> non-negative
1474 // (add negative, negative) --> negative
1475 if (Opcode == Instruction::Add) {
1476 if (Known2.isNonNegative() && Known3.isNonNegative())
1477 Known.makeNonNegative();
1478 else if (Known2.isNegative() && Known3.isNegative())
1479 Known.makeNegative();
1480 }
1481
1482 // (sub nsw non-negative, negative) --> non-negative
1483 // (sub nsw negative, non-negative) --> negative
1484 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1485 if (Known2.isNonNegative() && Known3.isNegative())
1486 Known.makeNonNegative();
1487 else if (Known2.isNegative() && Known3.isNonNegative())
1488 Known.makeNegative();
1489 }
1490
1491 // (mul nsw non-negative, non-negative) --> non-negative
1492 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1493 Known3.isNonNegative())
1494 Known.makeNonNegative();
1495 }
1496
1497 break;
1498 }
1499 }
1500
1501 // Unreachable blocks may have zero-operand PHI nodes.
1502 if (P->getNumIncomingValues() == 0)
1503 break;
1504
1505 // Otherwise take the unions of the known bit sets of the operands,
1506 // taking conservative care to avoid excessive recursion.
1507 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1508 // Skip if every incoming value references to ourself.
1509 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1510 break;
1511
1512 Known.Zero.setAllBits();
1513 Known.One.setAllBits();
1514 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1515 Value *IncValue = P->getIncomingValue(u);
1516 // Skip direct self references.
1517 if (IncValue == P) continue;
1518
1519 // Change the context instruction to the "edge" that flows into the
1520 // phi. This is important because that is where the value is actually
1521 // "evaluated" even though it is used later somewhere else. (see also
1522 // D69571).
1523 Query RecQ = Q;
1524 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1525
1526 Known2 = KnownBits(BitWidth);
1527 // Recurse, but cap the recursion to one level, because we don't
1528 // want to waste time spinning around in loops.
1529 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1530 Known = KnownBits::commonBits(Known, Known2);
1531 // If all bits have been ruled out, there's no need to check
1532 // more operands.
1533 if (Known.isUnknown())
1534 break;
1535 }
1536 }
1537 break;
1538 }
1539 case Instruction::Call:
1540 case Instruction::Invoke:
1541 // If range metadata is attached to this call, set known bits from that,
1542 // and then intersect with known bits based on other properties of the
1543 // function.
1544 if (MDNode *MD =
1545 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1546 computeKnownBitsFromRangeMetadata(*MD, Known);
1547 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1548 computeKnownBits(RV, Known2, Depth + 1, Q);
1549 Known.Zero |= Known2.Zero;
1550 Known.One |= Known2.One;
1551 }
1552 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1553 switch (II->getIntrinsicID()) {
1554 default: break;
1555 case Intrinsic::abs: {
1556 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1557 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1558 Known = Known2.abs(IntMinIsPoison);
1559 break;
1560 }
1561 case Intrinsic::bitreverse:
1562 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1563 Known.Zero |= Known2.Zero.reverseBits();
1564 Known.One |= Known2.One.reverseBits();
1565 break;
1566 case Intrinsic::bswap:
1567 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1568 Known.Zero |= Known2.Zero.byteSwap();
1569 Known.One |= Known2.One.byteSwap();
1570 break;
1571 case Intrinsic::ctlz: {
1572 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1573 // If we have a known 1, its position is our upper bound.
1574 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1575 // If this call is undefined for 0, the result will be less than 2^n.
1576 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1577 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1578 unsigned LowBits = Log2_32(PossibleLZ)+1;
1579 Known.Zero.setBitsFrom(LowBits);
1580 break;
1581 }
1582 case Intrinsic::cttz: {
1583 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1584 // If we have a known 1, its position is our upper bound.
1585 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1586 // If this call is undefined for 0, the result will be less than 2^n.
1587 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1588 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1589 unsigned LowBits = Log2_32(PossibleTZ)+1;
1590 Known.Zero.setBitsFrom(LowBits);
1591 break;
1592 }
1593 case Intrinsic::ctpop: {
1594 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1595 // We can bound the space the count needs. Also, bits known to be zero
1596 // can't contribute to the population.
1597 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1598 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1599 Known.Zero.setBitsFrom(LowBits);
1600 // TODO: we could bound KnownOne using the lower bound on the number
1601 // of bits which might be set provided by popcnt KnownOne2.
1602 break;
1603 }
1604 case Intrinsic::fshr:
1605 case Intrinsic::fshl: {
1606 const APInt *SA;
1607 if (!match(I->getOperand(2), m_APInt(SA)))
1608 break;
1609
1610 // Normalize to funnel shift left.
1611 uint64_t ShiftAmt = SA->urem(BitWidth);
1612 if (II->getIntrinsicID() == Intrinsic::fshr)
1613 ShiftAmt = BitWidth - ShiftAmt;
1614
1615 KnownBits Known3(BitWidth);
1616 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1617 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1618
1619 Known.Zero =
1620 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1621 Known.One =
1622 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1623 break;
1624 }
1625 case Intrinsic::uadd_sat:
1626 case Intrinsic::usub_sat: {
1627 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1628 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1629 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1630
1631 // Add: Leading ones of either operand are preserved.
1632 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1633 // as leading zeros in the result.
1634 unsigned LeadingKnown;
1635 if (IsAdd)
1636 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1637 Known2.countMinLeadingOnes());
1638 else
1639 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1640 Known2.countMinLeadingOnes());
1641
1642 Known = KnownBits::computeForAddSub(
1643 IsAdd, /* NSW */ false, Known, Known2);
1644
1645 // We select between the operation result and all-ones/zero
1646 // respectively, so we can preserve known ones/zeros.
1647 if (IsAdd) {
1648 Known.One.setHighBits(LeadingKnown);
1649 Known.Zero.clearAllBits();
1650 } else {
1651 Known.Zero.setHighBits(LeadingKnown);
1652 Known.One.clearAllBits();
1653 }
1654 break;
1655 }
1656 case Intrinsic::umin:
1657 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1658 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1659 Known = KnownBits::umin(Known, Known2);
1660 break;
1661 case Intrinsic::umax:
1662 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1663 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1664 Known = KnownBits::umax(Known, Known2);
1665 break;
1666 case Intrinsic::smin:
1667 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1668 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1669 Known = KnownBits::smin(Known, Known2);
1670 break;
1671 case Intrinsic::smax:
1672 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1673 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1674 Known = KnownBits::smax(Known, Known2);
1675 break;
1676 case Intrinsic::x86_sse42_crc32_64_64:
1677 Known.Zero.setBitsFrom(32);
1678 break;
1679 case Intrinsic::riscv_vsetvli:
1680 case Intrinsic::riscv_vsetvlimax:
1681 // Assume that VL output is positive and would fit in an int32_t.
1682 // TODO: VLEN might be capped at 16 bits in a future V spec update.
1683 if (BitWidth >= 32)
1684 Known.Zero.setBitsFrom(31);
1685 break;
1686 case Intrinsic::vscale: {
1687 if (!II->getParent() || !II->getFunction() ||
1688 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
1689 break;
1690
1691 auto VScaleRange = II->getFunction()
1692 ->getFnAttribute(Attribute::VScaleRange)
1693 .getVScaleRangeArgs();
1694
1695 if (VScaleRange.second == 0)
1696 break;
1697
1698 // If vscale min = max then we know the exact value at compile time
1699 // and hence we know the exact bits.
1700 if (VScaleRange.first == VScaleRange.second) {
1701 Known.One = VScaleRange.first;
1702 Known.Zero = VScaleRange.first;
1703 Known.Zero.flipAllBits();
1704 break;
1705 }
1706
1707 unsigned FirstZeroHighBit = 32 - countLeadingZeros(VScaleRange.second);
1708 if (FirstZeroHighBit < BitWidth)
1709 Known.Zero.setBitsFrom(FirstZeroHighBit);
1710
1711 break;
1712 }
1713 }
1714 }
1715 break;
1716 case Instruction::ShuffleVector: {
1717 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1718 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1719 if (!Shuf) {
1720 Known.resetAll();
1721 return;
1722 }
1723 // For undef elements, we don't know anything about the common state of
1724 // the shuffle result.
1725 APInt DemandedLHS, DemandedRHS;
1726 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1727 Known.resetAll();
1728 return;
1729 }
1730 Known.One.setAllBits();
1731 Known.Zero.setAllBits();
1732 if (!!DemandedLHS) {
1733 const Value *LHS = Shuf->getOperand(0);
1734 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1735 // If we don't know any bits, early out.
1736 if (Known.isUnknown())
1737 break;
1738 }
1739 if (!!DemandedRHS) {
1740 const Value *RHS = Shuf->getOperand(1);
1741 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1742 Known = KnownBits::commonBits(Known, Known2);
1743 }
1744 break;
1745 }
1746 case Instruction::InsertElement: {
1747 const Value *Vec = I->getOperand(0);
1748 const Value *Elt = I->getOperand(1);
1749 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1750 // Early out if the index is non-constant or out-of-range.
1751 unsigned NumElts = DemandedElts.getBitWidth();
1752 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1753 Known.resetAll();
1754 return;
1755 }
1756 Known.One.setAllBits();
1757 Known.Zero.setAllBits();
1758 unsigned EltIdx = CIdx->getZExtValue();
1759 // Do we demand the inserted element?
1760 if (DemandedElts[EltIdx]) {
1761 computeKnownBits(Elt, Known, Depth + 1, Q);
1762 // If we don't know any bits, early out.
1763 if (Known.isUnknown())
1764 break;
1765 }
1766 // We don't need the base vector element that has been inserted.
1767 APInt DemandedVecElts = DemandedElts;
1768 DemandedVecElts.clearBit(EltIdx);
1769 if (!!DemandedVecElts) {
1770 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1771 Known = KnownBits::commonBits(Known, Known2);
1772 }
1773 break;
1774 }
1775 case Instruction::ExtractElement: {
1776 // Look through extract element. If the index is non-constant or
1777 // out-of-range demand all elements, otherwise just the extracted element.
1778 const Value *Vec = I->getOperand(0);
1779 const Value *Idx = I->getOperand(1);
1780 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1781 if (isa<ScalableVectorType>(Vec->getType())) {
1782 // FIXME: there's probably *something* we can do with scalable vectors
1783 Known.resetAll();
1784 break;
1785 }
1786 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1787 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1788 if (CIdx && CIdx->getValue().ult(NumElts))
1789 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1790 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1791 break;
1792 }
1793 case Instruction::ExtractValue:
1794 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1795 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1796 if (EVI->getNumIndices() != 1) break;
1797 if (EVI->getIndices()[0] == 0) {
1798 switch (II->getIntrinsicID()) {
1799 default: break;
1800 case Intrinsic::uadd_with_overflow:
1801 case Intrinsic::sadd_with_overflow:
1802 computeKnownBitsAddSub(true, II->getArgOperand(0),
1803 II->getArgOperand(1), false, DemandedElts,
1804 Known, Known2, Depth, Q);
1805 break;
1806 case Intrinsic::usub_with_overflow:
1807 case Intrinsic::ssub_with_overflow:
1808 computeKnownBitsAddSub(false, II->getArgOperand(0),
1809 II->getArgOperand(1), false, DemandedElts,
1810 Known, Known2, Depth, Q);
1811 break;
1812 case Intrinsic::umul_with_overflow:
1813 case Intrinsic::smul_with_overflow:
1814 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1815 DemandedElts, Known, Known2, Depth, Q);
1816 break;
1817 }
1818 }
1819 }
1820 break;
1821 case Instruction::Freeze:
1822 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1823 Depth + 1))
1824 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1825 break;
1826 }
1827 }
1828
1829 /// Determine which bits of V are known to be either zero or one and return
1830 /// them.
computeKnownBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)1831 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1832 unsigned Depth, const Query &Q) {
1833 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1834 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1835 return Known;
1836 }
1837
1838 /// Determine which bits of V are known to be either zero or one and return
1839 /// them.
computeKnownBits(const Value * V,unsigned Depth,const Query & Q)1840 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1841 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1842 computeKnownBits(V, Known, Depth, Q);
1843 return Known;
1844 }
1845
1846 /// Determine which bits of V are known to be either zero or one and return
1847 /// them in the Known bit set.
1848 ///
1849 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1850 /// we cannot optimize based on the assumption that it is zero without changing
1851 /// it to be an explicit zero. If we don't change it to zero, other code could
1852 /// optimized based on the contradictory assumption that it is non-zero.
1853 /// Because instcombine aggressively folds operations with undef args anyway,
1854 /// this won't lose us code quality.
1855 ///
1856 /// This function is defined on values with integer type, values with pointer
1857 /// type, and vectors of integers. In the case
1858 /// where V is a vector, known zero, and known one values are the
1859 /// same width as the vector element, and the bit is set only if it is true
1860 /// for all of the demanded elements in the vector specified by DemandedElts.
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1861 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1862 KnownBits &Known, unsigned Depth, const Query &Q) {
1863 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1864 // No demanded elts or V is a scalable vector, better to assume we don't
1865 // know anything.
1866 Known.resetAll();
1867 return;
1868 }
1869
1870 assert(V && "No Value?");
1871 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1872
1873 #ifndef NDEBUG
1874 Type *Ty = V->getType();
1875 unsigned BitWidth = Known.getBitWidth();
1876
1877 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1878 "Not integer or pointer type!");
1879
1880 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1881 assert(
1882 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1883 "DemandedElt width should equal the fixed vector number of elements");
1884 } else {
1885 assert(DemandedElts == APInt(1, 1) &&
1886 "DemandedElt width should be 1 for scalars");
1887 }
1888
1889 Type *ScalarTy = Ty->getScalarType();
1890 if (ScalarTy->isPointerTy()) {
1891 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1892 "V and Known should have same BitWidth");
1893 } else {
1894 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1895 "V and Known should have same BitWidth");
1896 }
1897 #endif
1898
1899 const APInt *C;
1900 if (match(V, m_APInt(C))) {
1901 // We know all of the bits for a scalar constant or a splat vector constant!
1902 Known = KnownBits::makeConstant(*C);
1903 return;
1904 }
1905 // Null and aggregate-zero are all-zeros.
1906 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1907 Known.setAllZero();
1908 return;
1909 }
1910 // Handle a constant vector by taking the intersection of the known bits of
1911 // each element.
1912 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1913 // We know that CDV must be a vector of integers. Take the intersection of
1914 // each element.
1915 Known.Zero.setAllBits(); Known.One.setAllBits();
1916 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1917 if (!DemandedElts[i])
1918 continue;
1919 APInt Elt = CDV->getElementAsAPInt(i);
1920 Known.Zero &= ~Elt;
1921 Known.One &= Elt;
1922 }
1923 return;
1924 }
1925
1926 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1927 // We know that CV must be a vector of integers. Take the intersection of
1928 // each element.
1929 Known.Zero.setAllBits(); Known.One.setAllBits();
1930 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1931 if (!DemandedElts[i])
1932 continue;
1933 Constant *Element = CV->getAggregateElement(i);
1934 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1935 if (!ElementCI) {
1936 Known.resetAll();
1937 return;
1938 }
1939 const APInt &Elt = ElementCI->getValue();
1940 Known.Zero &= ~Elt;
1941 Known.One &= Elt;
1942 }
1943 return;
1944 }
1945
1946 // Start out not knowing anything.
1947 Known.resetAll();
1948
1949 // We can't imply anything about undefs.
1950 if (isa<UndefValue>(V))
1951 return;
1952
1953 // There's no point in looking through other users of ConstantData for
1954 // assumptions. Confirm that we've handled them all.
1955 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1956
1957 // All recursive calls that increase depth must come after this.
1958 if (Depth == MaxAnalysisRecursionDepth)
1959 return;
1960
1961 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1962 // the bits of its aliasee.
1963 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1964 if (!GA->isInterposable())
1965 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1966 return;
1967 }
1968
1969 if (const Operator *I = dyn_cast<Operator>(V))
1970 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1971
1972 // Aligned pointers have trailing zeros - refine Known.Zero set
1973 if (isa<PointerType>(V->getType())) {
1974 Align Alignment = V->getPointerAlignment(Q.DL);
1975 Known.Zero.setLowBits(Log2(Alignment));
1976 }
1977
1978 // computeKnownBitsFromAssume strictly refines Known.
1979 // Therefore, we run them after computeKnownBitsFromOperator.
1980
1981 // Check whether a nearby assume intrinsic can determine some known bits.
1982 computeKnownBitsFromAssume(V, Known, Depth, Q);
1983
1984 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1985 }
1986
1987 /// Return true if the given value is known to have exactly one
1988 /// bit set when defined. For vectors return true if every element is known to
1989 /// be a power of two when defined. Supports values with integer or pointer
1990 /// types and vectors of integers.
isKnownToBeAPowerOfTwo(const Value * V,bool OrZero,unsigned Depth,const Query & Q)1991 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1992 const Query &Q) {
1993 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1994
1995 // Attempt to match against constants.
1996 if (OrZero && match(V, m_Power2OrZero()))
1997 return true;
1998 if (match(V, m_Power2()))
1999 return true;
2000
2001 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2002 // it is shifted off the end then the result is undefined.
2003 if (match(V, m_Shl(m_One(), m_Value())))
2004 return true;
2005
2006 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2007 // the bottom. If it is shifted off the bottom then the result is undefined.
2008 if (match(V, m_LShr(m_SignMask(), m_Value())))
2009 return true;
2010
2011 // The remaining tests are all recursive, so bail out if we hit the limit.
2012 if (Depth++ == MaxAnalysisRecursionDepth)
2013 return false;
2014
2015 Value *X = nullptr, *Y = nullptr;
2016 // A shift left or a logical shift right of a power of two is a power of two
2017 // or zero.
2018 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2019 match(V, m_LShr(m_Value(X), m_Value()))))
2020 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2021
2022 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2023 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2024
2025 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2026 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2027 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2028
2029 // Peek through min/max.
2030 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2031 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2032 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2033 }
2034
2035 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2036 // A power of two and'd with anything is a power of two or zero.
2037 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2038 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2039 return true;
2040 // X & (-X) is always a power of two or zero.
2041 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2042 return true;
2043 return false;
2044 }
2045
2046 // Adding a power-of-two or zero to the same power-of-two or zero yields
2047 // either the original power-of-two, a larger power-of-two or zero.
2048 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2049 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2050 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2051 Q.IIQ.hasNoSignedWrap(VOBO)) {
2052 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2053 match(X, m_And(m_Value(), m_Specific(Y))))
2054 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2055 return true;
2056 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2057 match(Y, m_And(m_Value(), m_Specific(X))))
2058 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2059 return true;
2060
2061 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2062 KnownBits LHSBits(BitWidth);
2063 computeKnownBits(X, LHSBits, Depth, Q);
2064
2065 KnownBits RHSBits(BitWidth);
2066 computeKnownBits(Y, RHSBits, Depth, Q);
2067 // If i8 V is a power of two or zero:
2068 // ZeroBits: 1 1 1 0 1 1 1 1
2069 // ~ZeroBits: 0 0 0 1 0 0 0 0
2070 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2071 // If OrZero isn't set, we cannot give back a zero result.
2072 // Make sure either the LHS or RHS has a bit set.
2073 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2074 return true;
2075 }
2076 }
2077
2078 // An exact divide or right shift can only shift off zero bits, so the result
2079 // is a power of two only if the first operand is a power of two and not
2080 // copying a sign bit (sdiv int_min, 2).
2081 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2082 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2083 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2084 Depth, Q);
2085 }
2086
2087 return false;
2088 }
2089
2090 /// Test whether a GEP's result is known to be non-null.
2091 ///
2092 /// Uses properties inherent in a GEP to try to determine whether it is known
2093 /// to be non-null.
2094 ///
2095 /// Currently this routine does not support vector GEPs.
isGEPKnownNonNull(const GEPOperator * GEP,unsigned Depth,const Query & Q)2096 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2097 const Query &Q) {
2098 const Function *F = nullptr;
2099 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2100 F = I->getFunction();
2101
2102 if (!GEP->isInBounds() ||
2103 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2104 return false;
2105
2106 // FIXME: Support vector-GEPs.
2107 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2108
2109 // If the base pointer is non-null, we cannot walk to a null address with an
2110 // inbounds GEP in address space zero.
2111 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2112 return true;
2113
2114 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2115 // If so, then the GEP cannot produce a null pointer, as doing so would
2116 // inherently violate the inbounds contract within address space zero.
2117 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2118 GTI != GTE; ++GTI) {
2119 // Struct types are easy -- they must always be indexed by a constant.
2120 if (StructType *STy = GTI.getStructTypeOrNull()) {
2121 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2122 unsigned ElementIdx = OpC->getZExtValue();
2123 const StructLayout *SL = Q.DL.getStructLayout(STy);
2124 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2125 if (ElementOffset > 0)
2126 return true;
2127 continue;
2128 }
2129
2130 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2131 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2132 continue;
2133
2134 // Fast path the constant operand case both for efficiency and so we don't
2135 // increment Depth when just zipping down an all-constant GEP.
2136 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2137 if (!OpC->isZero())
2138 return true;
2139 continue;
2140 }
2141
2142 // We post-increment Depth here because while isKnownNonZero increments it
2143 // as well, when we pop back up that increment won't persist. We don't want
2144 // to recurse 10k times just because we have 10k GEP operands. We don't
2145 // bail completely out because we want to handle constant GEPs regardless
2146 // of depth.
2147 if (Depth++ >= MaxAnalysisRecursionDepth)
2148 continue;
2149
2150 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2151 return true;
2152 }
2153
2154 return false;
2155 }
2156
isKnownNonNullFromDominatingCondition(const Value * V,const Instruction * CtxI,const DominatorTree * DT)2157 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2158 const Instruction *CtxI,
2159 const DominatorTree *DT) {
2160 if (isa<Constant>(V))
2161 return false;
2162
2163 if (!CtxI || !DT)
2164 return false;
2165
2166 unsigned NumUsesExplored = 0;
2167 for (auto *U : V->users()) {
2168 // Avoid massive lists
2169 if (NumUsesExplored >= DomConditionsMaxUses)
2170 break;
2171 NumUsesExplored++;
2172
2173 // If the value is used as an argument to a call or invoke, then argument
2174 // attributes may provide an answer about null-ness.
2175 if (const auto *CB = dyn_cast<CallBase>(U))
2176 if (auto *CalledFunc = CB->getCalledFunction())
2177 for (const Argument &Arg : CalledFunc->args())
2178 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2179 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2180 DT->dominates(CB, CtxI))
2181 return true;
2182
2183 // If the value is used as a load/store, then the pointer must be non null.
2184 if (V == getLoadStorePointerOperand(U)) {
2185 const Instruction *I = cast<Instruction>(U);
2186 if (!NullPointerIsDefined(I->getFunction(),
2187 V->getType()->getPointerAddressSpace()) &&
2188 DT->dominates(I, CtxI))
2189 return true;
2190 }
2191
2192 // Consider only compare instructions uniquely controlling a branch
2193 Value *RHS;
2194 CmpInst::Predicate Pred;
2195 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2196 continue;
2197
2198 bool NonNullIfTrue;
2199 if (cmpExcludesZero(Pred, RHS))
2200 NonNullIfTrue = true;
2201 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2202 NonNullIfTrue = false;
2203 else
2204 continue;
2205
2206 SmallVector<const User *, 4> WorkList;
2207 SmallPtrSet<const User *, 4> Visited;
2208 for (auto *CmpU : U->users()) {
2209 assert(WorkList.empty() && "Should be!");
2210 if (Visited.insert(CmpU).second)
2211 WorkList.push_back(CmpU);
2212
2213 while (!WorkList.empty()) {
2214 auto *Curr = WorkList.pop_back_val();
2215
2216 // If a user is an AND, add all its users to the work list. We only
2217 // propagate "pred != null" condition through AND because it is only
2218 // correct to assume that all conditions of AND are met in true branch.
2219 // TODO: Support similar logic of OR and EQ predicate?
2220 if (NonNullIfTrue)
2221 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2222 for (auto *CurrU : Curr->users())
2223 if (Visited.insert(CurrU).second)
2224 WorkList.push_back(CurrU);
2225 continue;
2226 }
2227
2228 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2229 assert(BI->isConditional() && "uses a comparison!");
2230
2231 BasicBlock *NonNullSuccessor =
2232 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2233 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2234 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2235 return true;
2236 } else if (NonNullIfTrue && isGuard(Curr) &&
2237 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2238 return true;
2239 }
2240 }
2241 }
2242 }
2243
2244 return false;
2245 }
2246
2247 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2248 /// ensure that the value it's attached to is never Value? 'RangeType' is
2249 /// is the type of the value described by the range.
rangeMetadataExcludesValue(const MDNode * Ranges,const APInt & Value)2250 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2251 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2252 assert(NumRanges >= 1);
2253 for (unsigned i = 0; i < NumRanges; ++i) {
2254 ConstantInt *Lower =
2255 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2256 ConstantInt *Upper =
2257 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2258 ConstantRange Range(Lower->getValue(), Upper->getValue());
2259 if (Range.contains(Value))
2260 return false;
2261 }
2262 return true;
2263 }
2264
2265 /// Try to detect a recurrence that monotonically increases/decreases from a
2266 /// non-zero starting value. These are common as induction variables.
isNonZeroRecurrence(const PHINode * PN)2267 static bool isNonZeroRecurrence(const PHINode *PN) {
2268 BinaryOperator *BO = nullptr;
2269 Value *Start = nullptr, *Step = nullptr;
2270 const APInt *StartC, *StepC;
2271 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2272 !match(Start, m_APInt(StartC)) || StartC->isZero())
2273 return false;
2274
2275 switch (BO->getOpcode()) {
2276 case Instruction::Add:
2277 // Starting from non-zero and stepping away from zero can never wrap back
2278 // to zero.
2279 return BO->hasNoUnsignedWrap() ||
2280 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2281 StartC->isNegative() == StepC->isNegative());
2282 case Instruction::Mul:
2283 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2284 match(Step, m_APInt(StepC)) && !StepC->isZero();
2285 case Instruction::Shl:
2286 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2287 case Instruction::AShr:
2288 case Instruction::LShr:
2289 return BO->isExact();
2290 default:
2291 return false;
2292 }
2293 }
2294
2295 /// Return true if the given value is known to be non-zero when defined. For
2296 /// vectors, return true if every demanded element is known to be non-zero when
2297 /// defined. For pointers, if the context instruction and dominator tree are
2298 /// specified, perform context-sensitive analysis and return true if the
2299 /// pointer couldn't possibly be null at the specified instruction.
2300 /// Supports values with integer or pointer type and vectors of integers.
isKnownNonZero(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2301 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2302 const Query &Q) {
2303 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2304 // vector
2305 if (isa<ScalableVectorType>(V->getType()))
2306 return false;
2307
2308 if (auto *C = dyn_cast<Constant>(V)) {
2309 if (C->isNullValue())
2310 return false;
2311 if (isa<ConstantInt>(C))
2312 // Must be non-zero due to null test above.
2313 return true;
2314
2315 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2316 // See the comment for IntToPtr/PtrToInt instructions below.
2317 if (CE->getOpcode() == Instruction::IntToPtr ||
2318 CE->getOpcode() == Instruction::PtrToInt)
2319 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2320 .getFixedSize() <=
2321 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2322 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2323 }
2324
2325 // For constant vectors, check that all elements are undefined or known
2326 // non-zero to determine that the whole vector is known non-zero.
2327 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2328 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2329 if (!DemandedElts[i])
2330 continue;
2331 Constant *Elt = C->getAggregateElement(i);
2332 if (!Elt || Elt->isNullValue())
2333 return false;
2334 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2335 return false;
2336 }
2337 return true;
2338 }
2339
2340 // A global variable in address space 0 is non null unless extern weak
2341 // or an absolute symbol reference. Other address spaces may have null as a
2342 // valid address for a global, so we can't assume anything.
2343 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2344 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2345 GV->getType()->getAddressSpace() == 0)
2346 return true;
2347 } else
2348 return false;
2349 }
2350
2351 if (auto *I = dyn_cast<Instruction>(V)) {
2352 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2353 // If the possible ranges don't contain zero, then the value is
2354 // definitely non-zero.
2355 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2356 const APInt ZeroValue(Ty->getBitWidth(), 0);
2357 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2358 return true;
2359 }
2360 }
2361 }
2362
2363 if (isKnownNonZeroFromAssume(V, Q))
2364 return true;
2365
2366 // Some of the tests below are recursive, so bail out if we hit the limit.
2367 if (Depth++ >= MaxAnalysisRecursionDepth)
2368 return false;
2369
2370 // Check for pointer simplifications.
2371
2372 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2373 // Alloca never returns null, malloc might.
2374 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2375 return true;
2376
2377 // A byval, inalloca may not be null in a non-default addres space. A
2378 // nonnull argument is assumed never 0.
2379 if (const Argument *A = dyn_cast<Argument>(V)) {
2380 if (((A->hasPassPointeeByValueCopyAttr() &&
2381 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2382 A->hasNonNullAttr()))
2383 return true;
2384 }
2385
2386 // A Load tagged with nonnull metadata is never null.
2387 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2388 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2389 return true;
2390
2391 if (const auto *Call = dyn_cast<CallBase>(V)) {
2392 if (Call->isReturnNonNull())
2393 return true;
2394 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2395 return isKnownNonZero(RP, Depth, Q);
2396 }
2397 }
2398
2399 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2400 return true;
2401
2402 // Check for recursive pointer simplifications.
2403 if (V->getType()->isPointerTy()) {
2404 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2405 // do not alter the value, or at least not the nullness property of the
2406 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2407 //
2408 // Note that we have to take special care to avoid looking through
2409 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2410 // as casts that can alter the value, e.g., AddrSpaceCasts.
2411 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2412 return isGEPKnownNonNull(GEP, Depth, Q);
2413
2414 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2415 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2416
2417 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2418 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2419 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2420 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2421 }
2422
2423 // Similar to int2ptr above, we can look through ptr2int here if the cast
2424 // is a no-op or an extend and not a truncate.
2425 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2426 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2427 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2428 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2429
2430 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2431
2432 // X | Y != 0 if X != 0 or Y != 0.
2433 Value *X = nullptr, *Y = nullptr;
2434 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2435 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2436 isKnownNonZero(Y, DemandedElts, Depth, Q);
2437
2438 // ext X != 0 if X != 0.
2439 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2440 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2441
2442 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2443 // if the lowest bit is shifted off the end.
2444 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2445 // shl nuw can't remove any non-zero bits.
2446 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2447 if (Q.IIQ.hasNoUnsignedWrap(BO))
2448 return isKnownNonZero(X, Depth, Q);
2449
2450 KnownBits Known(BitWidth);
2451 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2452 if (Known.One[0])
2453 return true;
2454 }
2455 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2456 // defined if the sign bit is shifted off the end.
2457 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2458 // shr exact can only shift out zero bits.
2459 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2460 if (BO->isExact())
2461 return isKnownNonZero(X, Depth, Q);
2462
2463 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2464 if (Known.isNegative())
2465 return true;
2466
2467 // If the shifter operand is a constant, and all of the bits shifted
2468 // out are known to be zero, and X is known non-zero then at least one
2469 // non-zero bit must remain.
2470 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2471 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2472 // Is there a known one in the portion not shifted out?
2473 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2474 return true;
2475 // Are all the bits to be shifted out known zero?
2476 if (Known.countMinTrailingZeros() >= ShiftVal)
2477 return isKnownNonZero(X, DemandedElts, Depth, Q);
2478 }
2479 }
2480 // div exact can only produce a zero if the dividend is zero.
2481 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2482 return isKnownNonZero(X, DemandedElts, Depth, Q);
2483 }
2484 // X + Y.
2485 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2486 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2487 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2488
2489 // If X and Y are both non-negative (as signed values) then their sum is not
2490 // zero unless both X and Y are zero.
2491 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2492 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2493 isKnownNonZero(Y, DemandedElts, Depth, Q))
2494 return true;
2495
2496 // If X and Y are both negative (as signed values) then their sum is not
2497 // zero unless both X and Y equal INT_MIN.
2498 if (XKnown.isNegative() && YKnown.isNegative()) {
2499 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2500 // The sign bit of X is set. If some other bit is set then X is not equal
2501 // to INT_MIN.
2502 if (XKnown.One.intersects(Mask))
2503 return true;
2504 // The sign bit of Y is set. If some other bit is set then Y is not equal
2505 // to INT_MIN.
2506 if (YKnown.One.intersects(Mask))
2507 return true;
2508 }
2509
2510 // The sum of a non-negative number and a power of two is not zero.
2511 if (XKnown.isNonNegative() &&
2512 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2513 return true;
2514 if (YKnown.isNonNegative() &&
2515 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2516 return true;
2517 }
2518 // X * Y.
2519 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2520 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2521 // If X and Y are non-zero then so is X * Y as long as the multiplication
2522 // does not overflow.
2523 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2524 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2525 isKnownNonZero(Y, DemandedElts, Depth, Q))
2526 return true;
2527 }
2528 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2529 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2530 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2531 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2532 return true;
2533 }
2534 // PHI
2535 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2536 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2537 return true;
2538
2539 // Check if all incoming values are non-zero using recursion.
2540 Query RecQ = Q;
2541 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2542 return llvm::all_of(PN->operands(), [&](const Use &U) {
2543 if (U.get() == PN)
2544 return true;
2545 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2546 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2547 });
2548 }
2549 // ExtractElement
2550 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2551 const Value *Vec = EEI->getVectorOperand();
2552 const Value *Idx = EEI->getIndexOperand();
2553 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2554 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2555 unsigned NumElts = VecTy->getNumElements();
2556 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2557 if (CIdx && CIdx->getValue().ult(NumElts))
2558 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2559 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2560 }
2561 }
2562 // Freeze
2563 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2564 auto *Op = FI->getOperand(0);
2565 if (isKnownNonZero(Op, Depth, Q) &&
2566 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2567 return true;
2568 }
2569
2570 KnownBits Known(BitWidth);
2571 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2572 return Known.One != 0;
2573 }
2574
isKnownNonZero(const Value * V,unsigned Depth,const Query & Q)2575 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2576 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2577 // vector
2578 if (isa<ScalableVectorType>(V->getType()))
2579 return false;
2580
2581 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2582 APInt DemandedElts =
2583 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2584 return isKnownNonZero(V, DemandedElts, Depth, Q);
2585 }
2586
2587 /// If the pair of operators are the same invertible function, return the
2588 /// the operands of the function corresponding to each input. Otherwise,
2589 /// return None. An invertible function is one that is 1-to-1 and maps
2590 /// every input value to exactly one output value. This is equivalent to
2591 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2592 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2593 static Optional<std::pair<Value*, Value*>>
getInvertibleOperands(const Operator * Op1,const Operator * Op2)2594 getInvertibleOperands(const Operator *Op1,
2595 const Operator *Op2) {
2596 if (Op1->getOpcode() != Op2->getOpcode())
2597 return None;
2598
2599 auto getOperands = [&](unsigned OpNum) -> auto {
2600 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2601 };
2602
2603 switch (Op1->getOpcode()) {
2604 default:
2605 break;
2606 case Instruction::Add:
2607 case Instruction::Sub:
2608 if (Op1->getOperand(0) == Op2->getOperand(0))
2609 return getOperands(1);
2610 if (Op1->getOperand(1) == Op2->getOperand(1))
2611 return getOperands(0);
2612 break;
2613 case Instruction::Mul: {
2614 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2615 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2616 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2617 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2618 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2619 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2620 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2621 break;
2622
2623 // Assume operand order has been canonicalized
2624 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2625 isa<ConstantInt>(Op1->getOperand(1)) &&
2626 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2627 return getOperands(0);
2628 break;
2629 }
2630 case Instruction::Shl: {
2631 // Same as multiplies, with the difference that we don't need to check
2632 // for a non-zero multiply. Shifts always multiply by non-zero.
2633 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2634 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2635 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2636 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2637 break;
2638
2639 if (Op1->getOperand(1) == Op2->getOperand(1))
2640 return getOperands(0);
2641 break;
2642 }
2643 case Instruction::AShr:
2644 case Instruction::LShr: {
2645 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2646 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2647 if (!PEO1->isExact() || !PEO2->isExact())
2648 break;
2649
2650 if (Op1->getOperand(1) == Op2->getOperand(1))
2651 return getOperands(0);
2652 break;
2653 }
2654 case Instruction::SExt:
2655 case Instruction::ZExt:
2656 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2657 return getOperands(0);
2658 break;
2659 case Instruction::PHI: {
2660 const PHINode *PN1 = cast<PHINode>(Op1);
2661 const PHINode *PN2 = cast<PHINode>(Op2);
2662
2663 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2664 // are a single invertible function of the start values? Note that repeated
2665 // application of an invertible function is also invertible
2666 BinaryOperator *BO1 = nullptr;
2667 Value *Start1 = nullptr, *Step1 = nullptr;
2668 BinaryOperator *BO2 = nullptr;
2669 Value *Start2 = nullptr, *Step2 = nullptr;
2670 if (PN1->getParent() != PN2->getParent() ||
2671 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2672 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2673 break;
2674
2675 auto Values = getInvertibleOperands(cast<Operator>(BO1),
2676 cast<Operator>(BO2));
2677 if (!Values)
2678 break;
2679
2680 // We have to be careful of mutually defined recurrences here. Ex:
2681 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2682 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2683 // The invertibility of these is complicated, and not worth reasoning
2684 // about (yet?).
2685 if (Values->first != PN1 || Values->second != PN2)
2686 break;
2687
2688 return std::make_pair(Start1, Start2);
2689 }
2690 }
2691 return None;
2692 }
2693
2694 /// Return true if V2 == V1 + X, where X is known non-zero.
isAddOfNonZero(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2695 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2696 const Query &Q) {
2697 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2698 if (!BO || BO->getOpcode() != Instruction::Add)
2699 return false;
2700 Value *Op = nullptr;
2701 if (V2 == BO->getOperand(0))
2702 Op = BO->getOperand(1);
2703 else if (V2 == BO->getOperand(1))
2704 Op = BO->getOperand(0);
2705 else
2706 return false;
2707 return isKnownNonZero(Op, Depth + 1, Q);
2708 }
2709
2710 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2711 /// the multiplication is nuw or nsw.
isNonEqualMul(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2712 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2713 const Query &Q) {
2714 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2715 const APInt *C;
2716 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2717 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2718 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
2719 }
2720 return false;
2721 }
2722
2723 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2724 /// the shift is nuw or nsw.
isNonEqualShl(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2725 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2726 const Query &Q) {
2727 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2728 const APInt *C;
2729 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2730 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2731 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
2732 }
2733 return false;
2734 }
2735
isNonEqualPHIs(const PHINode * PN1,const PHINode * PN2,unsigned Depth,const Query & Q)2736 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2737 unsigned Depth, const Query &Q) {
2738 // Check two PHIs are in same block.
2739 if (PN1->getParent() != PN2->getParent())
2740 return false;
2741
2742 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2743 bool UsedFullRecursion = false;
2744 for (const BasicBlock *IncomBB : PN1->blocks()) {
2745 if (!VisitedBBs.insert(IncomBB).second)
2746 continue; // Don't reprocess blocks that we have dealt with already.
2747 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2748 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2749 const APInt *C1, *C2;
2750 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2751 continue;
2752
2753 // Only one pair of phi operands is allowed for full recursion.
2754 if (UsedFullRecursion)
2755 return false;
2756
2757 Query RecQ = Q;
2758 RecQ.CxtI = IncomBB->getTerminator();
2759 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2760 return false;
2761 UsedFullRecursion = true;
2762 }
2763 return true;
2764 }
2765
2766 /// Return true if it is known that V1 != V2.
isKnownNonEqual(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2767 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2768 const Query &Q) {
2769 if (V1 == V2)
2770 return false;
2771 if (V1->getType() != V2->getType())
2772 // We can't look through casts yet.
2773 return false;
2774
2775 if (Depth >= MaxAnalysisRecursionDepth)
2776 return false;
2777
2778 // See if we can recurse through (exactly one of) our operands. This
2779 // requires our operation be 1-to-1 and map every input value to exactly
2780 // one output value. Such an operation is invertible.
2781 auto *O1 = dyn_cast<Operator>(V1);
2782 auto *O2 = dyn_cast<Operator>(V2);
2783 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2784 if (auto Values = getInvertibleOperands(O1, O2))
2785 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2786
2787 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2788 const PHINode *PN2 = cast<PHINode>(V2);
2789 // FIXME: This is missing a generalization to handle the case where one is
2790 // a PHI and another one isn't.
2791 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2792 return true;
2793 };
2794 }
2795
2796 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2797 return true;
2798
2799 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2800 return true;
2801
2802 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2803 return true;
2804
2805 if (V1->getType()->isIntOrIntVectorTy()) {
2806 // Are any known bits in V1 contradictory to known bits in V2? If V1
2807 // has a known zero where V2 has a known one, they must not be equal.
2808 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2809 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2810
2811 if (Known1.Zero.intersects(Known2.One) ||
2812 Known2.Zero.intersects(Known1.One))
2813 return true;
2814 }
2815 return false;
2816 }
2817
2818 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2819 /// simplify operations downstream. Mask is known to be zero for bits that V
2820 /// cannot have.
2821 ///
2822 /// This function is defined on values with integer type, values with pointer
2823 /// type, and vectors of integers. In the case
2824 /// where V is a vector, the mask, known zero, and known one values are the
2825 /// same width as the vector element, and the bit is set only if it is true
2826 /// for all of the elements in the vector.
MaskedValueIsZero(const Value * V,const APInt & Mask,unsigned Depth,const Query & Q)2827 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2828 const Query &Q) {
2829 KnownBits Known(Mask.getBitWidth());
2830 computeKnownBits(V, Known, Depth, Q);
2831 return Mask.isSubsetOf(Known.Zero);
2832 }
2833
2834 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2835 // Returns the input and lower/upper bounds.
isSignedMinMaxClamp(const Value * Select,const Value * & In,const APInt * & CLow,const APInt * & CHigh)2836 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2837 const APInt *&CLow, const APInt *&CHigh) {
2838 assert(isa<Operator>(Select) &&
2839 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2840 "Input should be a Select!");
2841
2842 const Value *LHS = nullptr, *RHS = nullptr;
2843 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2844 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2845 return false;
2846
2847 if (!match(RHS, m_APInt(CLow)))
2848 return false;
2849
2850 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2851 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2852 if (getInverseMinMaxFlavor(SPF) != SPF2)
2853 return false;
2854
2855 if (!match(RHS2, m_APInt(CHigh)))
2856 return false;
2857
2858 if (SPF == SPF_SMIN)
2859 std::swap(CLow, CHigh);
2860
2861 In = LHS2;
2862 return CLow->sle(*CHigh);
2863 }
2864
2865 /// For vector constants, loop over the elements and find the constant with the
2866 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2867 /// or if any element was not analyzed; otherwise, return the count for the
2868 /// element with the minimum number of sign bits.
computeNumSignBitsVectorConstant(const Value * V,const APInt & DemandedElts,unsigned TyBits)2869 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2870 const APInt &DemandedElts,
2871 unsigned TyBits) {
2872 const auto *CV = dyn_cast<Constant>(V);
2873 if (!CV || !isa<FixedVectorType>(CV->getType()))
2874 return 0;
2875
2876 unsigned MinSignBits = TyBits;
2877 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2878 for (unsigned i = 0; i != NumElts; ++i) {
2879 if (!DemandedElts[i])
2880 continue;
2881 // If we find a non-ConstantInt, bail out.
2882 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2883 if (!Elt)
2884 return 0;
2885
2886 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2887 }
2888
2889 return MinSignBits;
2890 }
2891
2892 static unsigned ComputeNumSignBitsImpl(const Value *V,
2893 const APInt &DemandedElts,
2894 unsigned Depth, const Query &Q);
2895
ComputeNumSignBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2896 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2897 unsigned Depth, const Query &Q) {
2898 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2899 assert(Result > 0 && "At least one sign bit needs to be present!");
2900 return Result;
2901 }
2902
2903 /// Return the number of times the sign bit of the register is replicated into
2904 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2905 /// (itself), but other cases can give us information. For example, immediately
2906 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2907 /// other, so we return 3. For vectors, return the number of sign bits for the
2908 /// vector element with the minimum number of known sign bits of the demanded
2909 /// elements in the vector specified by DemandedElts.
ComputeNumSignBitsImpl(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2910 static unsigned ComputeNumSignBitsImpl(const Value *V,
2911 const APInt &DemandedElts,
2912 unsigned Depth, const Query &Q) {
2913 Type *Ty = V->getType();
2914
2915 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2916 // vector
2917 if (isa<ScalableVectorType>(Ty))
2918 return 1;
2919
2920 #ifndef NDEBUG
2921 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2922
2923 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2924 assert(
2925 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2926 "DemandedElt width should equal the fixed vector number of elements");
2927 } else {
2928 assert(DemandedElts == APInt(1, 1) &&
2929 "DemandedElt width should be 1 for scalars");
2930 }
2931 #endif
2932
2933 // We return the minimum number of sign bits that are guaranteed to be present
2934 // in V, so for undef we have to conservatively return 1. We don't have the
2935 // same behavior for poison though -- that's a FIXME today.
2936
2937 Type *ScalarTy = Ty->getScalarType();
2938 unsigned TyBits = ScalarTy->isPointerTy() ?
2939 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2940 Q.DL.getTypeSizeInBits(ScalarTy);
2941
2942 unsigned Tmp, Tmp2;
2943 unsigned FirstAnswer = 1;
2944
2945 // Note that ConstantInt is handled by the general computeKnownBits case
2946 // below.
2947
2948 if (Depth == MaxAnalysisRecursionDepth)
2949 return 1;
2950
2951 if (auto *U = dyn_cast<Operator>(V)) {
2952 switch (Operator::getOpcode(V)) {
2953 default: break;
2954 case Instruction::SExt:
2955 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2956 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2957
2958 case Instruction::SDiv: {
2959 const APInt *Denominator;
2960 // sdiv X, C -> adds log(C) sign bits.
2961 if (match(U->getOperand(1), m_APInt(Denominator))) {
2962
2963 // Ignore non-positive denominator.
2964 if (!Denominator->isStrictlyPositive())
2965 break;
2966
2967 // Calculate the incoming numerator bits.
2968 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2969
2970 // Add floor(log(C)) bits to the numerator bits.
2971 return std::min(TyBits, NumBits + Denominator->logBase2());
2972 }
2973 break;
2974 }
2975
2976 case Instruction::SRem: {
2977 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2978
2979 const APInt *Denominator;
2980 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2981 // positive constant. This let us put a lower bound on the number of sign
2982 // bits.
2983 if (match(U->getOperand(1), m_APInt(Denominator))) {
2984
2985 // Ignore non-positive denominator.
2986 if (Denominator->isStrictlyPositive()) {
2987 // Calculate the leading sign bit constraints by examining the
2988 // denominator. Given that the denominator is positive, there are two
2989 // cases:
2990 //
2991 // 1. The numerator is positive. The result range is [0,C) and
2992 // [0,C) u< (1 << ceilLogBase2(C)).
2993 //
2994 // 2. The numerator is negative. Then the result range is (-C,0] and
2995 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2996 //
2997 // Thus a lower bound on the number of sign bits is `TyBits -
2998 // ceilLogBase2(C)`.
2999
3000 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3001 Tmp = std::max(Tmp, ResBits);
3002 }
3003 }
3004 return Tmp;
3005 }
3006
3007 case Instruction::AShr: {
3008 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3009 // ashr X, C -> adds C sign bits. Vectors too.
3010 const APInt *ShAmt;
3011 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3012 if (ShAmt->uge(TyBits))
3013 break; // Bad shift.
3014 unsigned ShAmtLimited = ShAmt->getZExtValue();
3015 Tmp += ShAmtLimited;
3016 if (Tmp > TyBits) Tmp = TyBits;
3017 }
3018 return Tmp;
3019 }
3020 case Instruction::Shl: {
3021 const APInt *ShAmt;
3022 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3023 // shl destroys sign bits.
3024 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3025 if (ShAmt->uge(TyBits) || // Bad shift.
3026 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3027 Tmp2 = ShAmt->getZExtValue();
3028 return Tmp - Tmp2;
3029 }
3030 break;
3031 }
3032 case Instruction::And:
3033 case Instruction::Or:
3034 case Instruction::Xor: // NOT is handled here.
3035 // Logical binary ops preserve the number of sign bits at the worst.
3036 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3037 if (Tmp != 1) {
3038 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3039 FirstAnswer = std::min(Tmp, Tmp2);
3040 // We computed what we know about the sign bits as our first
3041 // answer. Now proceed to the generic code that uses
3042 // computeKnownBits, and pick whichever answer is better.
3043 }
3044 break;
3045
3046 case Instruction::Select: {
3047 // If we have a clamp pattern, we know that the number of sign bits will
3048 // be the minimum of the clamp min/max range.
3049 const Value *X;
3050 const APInt *CLow, *CHigh;
3051 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3052 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3053
3054 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3055 if (Tmp == 1) break;
3056 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3057 return std::min(Tmp, Tmp2);
3058 }
3059
3060 case Instruction::Add:
3061 // Add can have at most one carry bit. Thus we know that the output
3062 // is, at worst, one more bit than the inputs.
3063 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3064 if (Tmp == 1) break;
3065
3066 // Special case decrementing a value (ADD X, -1):
3067 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3068 if (CRHS->isAllOnesValue()) {
3069 KnownBits Known(TyBits);
3070 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3071
3072 // If the input is known to be 0 or 1, the output is 0/-1, which is
3073 // all sign bits set.
3074 if ((Known.Zero | 1).isAllOnes())
3075 return TyBits;
3076
3077 // If we are subtracting one from a positive number, there is no carry
3078 // out of the result.
3079 if (Known.isNonNegative())
3080 return Tmp;
3081 }
3082
3083 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3084 if (Tmp2 == 1) break;
3085 return std::min(Tmp, Tmp2) - 1;
3086
3087 case Instruction::Sub:
3088 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3089 if (Tmp2 == 1) break;
3090
3091 // Handle NEG.
3092 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3093 if (CLHS->isNullValue()) {
3094 KnownBits Known(TyBits);
3095 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3096 // If the input is known to be 0 or 1, the output is 0/-1, which is
3097 // all sign bits set.
3098 if ((Known.Zero | 1).isAllOnes())
3099 return TyBits;
3100
3101 // If the input is known to be positive (the sign bit is known clear),
3102 // the output of the NEG has the same number of sign bits as the
3103 // input.
3104 if (Known.isNonNegative())
3105 return Tmp2;
3106
3107 // Otherwise, we treat this like a SUB.
3108 }
3109
3110 // Sub can have at most one carry bit. Thus we know that the output
3111 // is, at worst, one more bit than the inputs.
3112 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3113 if (Tmp == 1) break;
3114 return std::min(Tmp, Tmp2) - 1;
3115
3116 case Instruction::Mul: {
3117 // The output of the Mul can be at most twice the valid bits in the
3118 // inputs.
3119 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3120 if (SignBitsOp0 == 1) break;
3121 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3122 if (SignBitsOp1 == 1) break;
3123 unsigned OutValidBits =
3124 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3125 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3126 }
3127
3128 case Instruction::PHI: {
3129 const PHINode *PN = cast<PHINode>(U);
3130 unsigned NumIncomingValues = PN->getNumIncomingValues();
3131 // Don't analyze large in-degree PHIs.
3132 if (NumIncomingValues > 4) break;
3133 // Unreachable blocks may have zero-operand PHI nodes.
3134 if (NumIncomingValues == 0) break;
3135
3136 // Take the minimum of all incoming values. This can't infinitely loop
3137 // because of our depth threshold.
3138 Query RecQ = Q;
3139 Tmp = TyBits;
3140 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3141 if (Tmp == 1) return Tmp;
3142 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3143 Tmp = std::min(
3144 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3145 }
3146 return Tmp;
3147 }
3148
3149 case Instruction::Trunc:
3150 // FIXME: it's tricky to do anything useful for this, but it is an
3151 // important case for targets like X86.
3152 break;
3153
3154 case Instruction::ExtractElement:
3155 // Look through extract element. At the moment we keep this simple and
3156 // skip tracking the specific element. But at least we might find
3157 // information valid for all elements of the vector (for example if vector
3158 // is sign extended, shifted, etc).
3159 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3160
3161 case Instruction::ShuffleVector: {
3162 // Collect the minimum number of sign bits that are shared by every vector
3163 // element referenced by the shuffle.
3164 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3165 if (!Shuf) {
3166 // FIXME: Add support for shufflevector constant expressions.
3167 return 1;
3168 }
3169 APInt DemandedLHS, DemandedRHS;
3170 // For undef elements, we don't know anything about the common state of
3171 // the shuffle result.
3172 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3173 return 1;
3174 Tmp = std::numeric_limits<unsigned>::max();
3175 if (!!DemandedLHS) {
3176 const Value *LHS = Shuf->getOperand(0);
3177 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3178 }
3179 // If we don't know anything, early out and try computeKnownBits
3180 // fall-back.
3181 if (Tmp == 1)
3182 break;
3183 if (!!DemandedRHS) {
3184 const Value *RHS = Shuf->getOperand(1);
3185 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3186 Tmp = std::min(Tmp, Tmp2);
3187 }
3188 // If we don't know anything, early out and try computeKnownBits
3189 // fall-back.
3190 if (Tmp == 1)
3191 break;
3192 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3193 return Tmp;
3194 }
3195 case Instruction::Call: {
3196 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3197 switch (II->getIntrinsicID()) {
3198 default: break;
3199 case Intrinsic::abs:
3200 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3201 if (Tmp == 1) break;
3202
3203 // Absolute value reduces number of sign bits by at most 1.
3204 return Tmp - 1;
3205 }
3206 }
3207 }
3208 }
3209 }
3210
3211 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3212 // use this information.
3213
3214 // If we can examine all elements of a vector constant successfully, we're
3215 // done (we can't do any better than that). If not, keep trying.
3216 if (unsigned VecSignBits =
3217 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3218 return VecSignBits;
3219
3220 KnownBits Known(TyBits);
3221 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3222
3223 // If we know that the sign bit is either zero or one, determine the number of
3224 // identical bits in the top of the input value.
3225 return std::max(FirstAnswer, Known.countMinSignBits());
3226 }
3227
3228 /// This function computes the integer multiple of Base that equals V.
3229 /// If successful, it returns true and returns the multiple in
3230 /// Multiple. If unsuccessful, it returns false. It looks
3231 /// through SExt instructions only if LookThroughSExt is true.
ComputeMultiple(Value * V,unsigned Base,Value * & Multiple,bool LookThroughSExt,unsigned Depth)3232 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3233 bool LookThroughSExt, unsigned Depth) {
3234 assert(V && "No Value?");
3235 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3236 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3237
3238 Type *T = V->getType();
3239
3240 ConstantInt *CI = dyn_cast<ConstantInt>(V);
3241
3242 if (Base == 0)
3243 return false;
3244
3245 if (Base == 1) {
3246 Multiple = V;
3247 return true;
3248 }
3249
3250 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3251 Constant *BaseVal = ConstantInt::get(T, Base);
3252 if (CO && CO == BaseVal) {
3253 // Multiple is 1.
3254 Multiple = ConstantInt::get(T, 1);
3255 return true;
3256 }
3257
3258 if (CI && CI->getZExtValue() % Base == 0) {
3259 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3260 return true;
3261 }
3262
3263 if (Depth == MaxAnalysisRecursionDepth) return false;
3264
3265 Operator *I = dyn_cast<Operator>(V);
3266 if (!I) return false;
3267
3268 switch (I->getOpcode()) {
3269 default: break;
3270 case Instruction::SExt:
3271 if (!LookThroughSExt) return false;
3272 // otherwise fall through to ZExt
3273 LLVM_FALLTHROUGH;
3274 case Instruction::ZExt:
3275 return ComputeMultiple(I->getOperand(0), Base, Multiple,
3276 LookThroughSExt, Depth+1);
3277 case Instruction::Shl:
3278 case Instruction::Mul: {
3279 Value *Op0 = I->getOperand(0);
3280 Value *Op1 = I->getOperand(1);
3281
3282 if (I->getOpcode() == Instruction::Shl) {
3283 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3284 if (!Op1CI) return false;
3285 // Turn Op0 << Op1 into Op0 * 2^Op1
3286 APInt Op1Int = Op1CI->getValue();
3287 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3288 APInt API(Op1Int.getBitWidth(), 0);
3289 API.setBit(BitToSet);
3290 Op1 = ConstantInt::get(V->getContext(), API);
3291 }
3292
3293 Value *Mul0 = nullptr;
3294 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3295 if (Constant *Op1C = dyn_cast<Constant>(Op1))
3296 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3297 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3298 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3299 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3300 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3301 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3302 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3303
3304 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3305 Multiple = ConstantExpr::getMul(MulC, Op1C);
3306 return true;
3307 }
3308
3309 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3310 if (Mul0CI->getValue() == 1) {
3311 // V == Base * Op1, so return Op1
3312 Multiple = Op1;
3313 return true;
3314 }
3315 }
3316
3317 Value *Mul1 = nullptr;
3318 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3319 if (Constant *Op0C = dyn_cast<Constant>(Op0))
3320 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3321 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3322 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3323 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3324 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3325 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3326 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3327
3328 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3329 Multiple = ConstantExpr::getMul(MulC, Op0C);
3330 return true;
3331 }
3332
3333 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3334 if (Mul1CI->getValue() == 1) {
3335 // V == Base * Op0, so return Op0
3336 Multiple = Op0;
3337 return true;
3338 }
3339 }
3340 }
3341 }
3342
3343 // We could not determine if V is a multiple of Base.
3344 return false;
3345 }
3346
getIntrinsicForCallSite(const CallBase & CB,const TargetLibraryInfo * TLI)3347 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3348 const TargetLibraryInfo *TLI) {
3349 const Function *F = CB.getCalledFunction();
3350 if (!F)
3351 return Intrinsic::not_intrinsic;
3352
3353 if (F->isIntrinsic())
3354 return F->getIntrinsicID();
3355
3356 // We are going to infer semantics of a library function based on mapping it
3357 // to an LLVM intrinsic. Check that the library function is available from
3358 // this callbase and in this environment.
3359 LibFunc Func;
3360 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3361 !CB.onlyReadsMemory())
3362 return Intrinsic::not_intrinsic;
3363
3364 switch (Func) {
3365 default:
3366 break;
3367 case LibFunc_sin:
3368 case LibFunc_sinf:
3369 case LibFunc_sinl:
3370 return Intrinsic::sin;
3371 case LibFunc_cos:
3372 case LibFunc_cosf:
3373 case LibFunc_cosl:
3374 return Intrinsic::cos;
3375 case LibFunc_exp:
3376 case LibFunc_expf:
3377 case LibFunc_expl:
3378 return Intrinsic::exp;
3379 case LibFunc_exp2:
3380 case LibFunc_exp2f:
3381 case LibFunc_exp2l:
3382 return Intrinsic::exp2;
3383 case LibFunc_log:
3384 case LibFunc_logf:
3385 case LibFunc_logl:
3386 return Intrinsic::log;
3387 case LibFunc_log10:
3388 case LibFunc_log10f:
3389 case LibFunc_log10l:
3390 return Intrinsic::log10;
3391 case LibFunc_log2:
3392 case LibFunc_log2f:
3393 case LibFunc_log2l:
3394 return Intrinsic::log2;
3395 case LibFunc_fabs:
3396 case LibFunc_fabsf:
3397 case LibFunc_fabsl:
3398 return Intrinsic::fabs;
3399 case LibFunc_fmin:
3400 case LibFunc_fminf:
3401 case LibFunc_fminl:
3402 return Intrinsic::minnum;
3403 case LibFunc_fmax:
3404 case LibFunc_fmaxf:
3405 case LibFunc_fmaxl:
3406 return Intrinsic::maxnum;
3407 case LibFunc_copysign:
3408 case LibFunc_copysignf:
3409 case LibFunc_copysignl:
3410 return Intrinsic::copysign;
3411 case LibFunc_floor:
3412 case LibFunc_floorf:
3413 case LibFunc_floorl:
3414 return Intrinsic::floor;
3415 case LibFunc_ceil:
3416 case LibFunc_ceilf:
3417 case LibFunc_ceill:
3418 return Intrinsic::ceil;
3419 case LibFunc_trunc:
3420 case LibFunc_truncf:
3421 case LibFunc_truncl:
3422 return Intrinsic::trunc;
3423 case LibFunc_rint:
3424 case LibFunc_rintf:
3425 case LibFunc_rintl:
3426 return Intrinsic::rint;
3427 case LibFunc_nearbyint:
3428 case LibFunc_nearbyintf:
3429 case LibFunc_nearbyintl:
3430 return Intrinsic::nearbyint;
3431 case LibFunc_round:
3432 case LibFunc_roundf:
3433 case LibFunc_roundl:
3434 return Intrinsic::round;
3435 case LibFunc_roundeven:
3436 case LibFunc_roundevenf:
3437 case LibFunc_roundevenl:
3438 return Intrinsic::roundeven;
3439 case LibFunc_pow:
3440 case LibFunc_powf:
3441 case LibFunc_powl:
3442 return Intrinsic::pow;
3443 case LibFunc_sqrt:
3444 case LibFunc_sqrtf:
3445 case LibFunc_sqrtl:
3446 return Intrinsic::sqrt;
3447 }
3448
3449 return Intrinsic::not_intrinsic;
3450 }
3451
3452 /// Return true if we can prove that the specified FP value is never equal to
3453 /// -0.0.
3454 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3455 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3456 /// the same as +0.0 in floating-point ops.
3457 ///
3458 /// NOTE: this function will need to be revisited when we support non-default
3459 /// rounding modes!
CannotBeNegativeZero(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3460 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3461 unsigned Depth) {
3462 if (auto *CFP = dyn_cast<ConstantFP>(V))
3463 return !CFP->getValueAPF().isNegZero();
3464
3465 if (Depth == MaxAnalysisRecursionDepth)
3466 return false;
3467
3468 auto *Op = dyn_cast<Operator>(V);
3469 if (!Op)
3470 return false;
3471
3472 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3473 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3474 return true;
3475
3476 // sitofp and uitofp turn into +0.0 for zero.
3477 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3478 return true;
3479
3480 if (auto *Call = dyn_cast<CallInst>(Op)) {
3481 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3482 switch (IID) {
3483 default:
3484 break;
3485 // sqrt(-0.0) = -0.0, no other negative results are possible.
3486 case Intrinsic::sqrt:
3487 case Intrinsic::canonicalize:
3488 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3489 // fabs(x) != -0.0
3490 case Intrinsic::fabs:
3491 return true;
3492 }
3493 }
3494
3495 return false;
3496 }
3497
3498 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3499 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3500 /// bit despite comparing equal.
cannotBeOrderedLessThanZeroImpl(const Value * V,const TargetLibraryInfo * TLI,bool SignBitOnly,unsigned Depth)3501 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3502 const TargetLibraryInfo *TLI,
3503 bool SignBitOnly,
3504 unsigned Depth) {
3505 // TODO: This function does not do the right thing when SignBitOnly is true
3506 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3507 // which flips the sign bits of NaNs. See
3508 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3509
3510 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3511 return !CFP->getValueAPF().isNegative() ||
3512 (!SignBitOnly && CFP->getValueAPF().isZero());
3513 }
3514
3515 // Handle vector of constants.
3516 if (auto *CV = dyn_cast<Constant>(V)) {
3517 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3518 unsigned NumElts = CVFVTy->getNumElements();
3519 for (unsigned i = 0; i != NumElts; ++i) {
3520 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3521 if (!CFP)
3522 return false;
3523 if (CFP->getValueAPF().isNegative() &&
3524 (SignBitOnly || !CFP->getValueAPF().isZero()))
3525 return false;
3526 }
3527
3528 // All non-negative ConstantFPs.
3529 return true;
3530 }
3531 }
3532
3533 if (Depth == MaxAnalysisRecursionDepth)
3534 return false;
3535
3536 const Operator *I = dyn_cast<Operator>(V);
3537 if (!I)
3538 return false;
3539
3540 switch (I->getOpcode()) {
3541 default:
3542 break;
3543 // Unsigned integers are always nonnegative.
3544 case Instruction::UIToFP:
3545 return true;
3546 case Instruction::FMul:
3547 case Instruction::FDiv:
3548 // X * X is always non-negative or a NaN.
3549 // X / X is always exactly 1.0 or a NaN.
3550 if (I->getOperand(0) == I->getOperand(1) &&
3551 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3552 return true;
3553
3554 LLVM_FALLTHROUGH;
3555 case Instruction::FAdd:
3556 case Instruction::FRem:
3557 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3558 Depth + 1) &&
3559 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3560 Depth + 1);
3561 case Instruction::Select:
3562 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3563 Depth + 1) &&
3564 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3565 Depth + 1);
3566 case Instruction::FPExt:
3567 case Instruction::FPTrunc:
3568 // Widening/narrowing never change sign.
3569 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3570 Depth + 1);
3571 case Instruction::ExtractElement:
3572 // Look through extract element. At the moment we keep this simple and skip
3573 // tracking the specific element. But at least we might find information
3574 // valid for all elements of the vector.
3575 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3576 Depth + 1);
3577 case Instruction::Call:
3578 const auto *CI = cast<CallInst>(I);
3579 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3580 switch (IID) {
3581 default:
3582 break;
3583 case Intrinsic::maxnum: {
3584 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3585 auto isPositiveNum = [&](Value *V) {
3586 if (SignBitOnly) {
3587 // With SignBitOnly, this is tricky because the result of
3588 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3589 // a constant strictly greater than 0.0.
3590 const APFloat *C;
3591 return match(V, m_APFloat(C)) &&
3592 *C > APFloat::getZero(C->getSemantics());
3593 }
3594
3595 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3596 // maxnum can't be ordered-less-than-zero.
3597 return isKnownNeverNaN(V, TLI) &&
3598 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3599 };
3600
3601 // TODO: This could be improved. We could also check that neither operand
3602 // has its sign bit set (and at least 1 is not-NAN?).
3603 return isPositiveNum(V0) || isPositiveNum(V1);
3604 }
3605
3606 case Intrinsic::maximum:
3607 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3608 Depth + 1) ||
3609 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3610 Depth + 1);
3611 case Intrinsic::minnum:
3612 case Intrinsic::minimum:
3613 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3614 Depth + 1) &&
3615 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3616 Depth + 1);
3617 case Intrinsic::exp:
3618 case Intrinsic::exp2:
3619 case Intrinsic::fabs:
3620 return true;
3621
3622 case Intrinsic::sqrt:
3623 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3624 if (!SignBitOnly)
3625 return true;
3626 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3627 CannotBeNegativeZero(CI->getOperand(0), TLI));
3628
3629 case Intrinsic::powi:
3630 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3631 // powi(x,n) is non-negative if n is even.
3632 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3633 return true;
3634 }
3635 // TODO: This is not correct. Given that exp is an integer, here are the
3636 // ways that pow can return a negative value:
3637 //
3638 // pow(x, exp) --> negative if exp is odd and x is negative.
3639 // pow(-0, exp) --> -inf if exp is negative odd.
3640 // pow(-0, exp) --> -0 if exp is positive odd.
3641 // pow(-inf, exp) --> -0 if exp is negative odd.
3642 // pow(-inf, exp) --> -inf if exp is positive odd.
3643 //
3644 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3645 // but we must return false if x == -0. Unfortunately we do not currently
3646 // have a way of expressing this constraint. See details in
3647 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3648 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3649 Depth + 1);
3650
3651 case Intrinsic::fma:
3652 case Intrinsic::fmuladd:
3653 // x*x+y is non-negative if y is non-negative.
3654 return I->getOperand(0) == I->getOperand(1) &&
3655 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3656 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3657 Depth + 1);
3658 }
3659 break;
3660 }
3661 return false;
3662 }
3663
CannotBeOrderedLessThanZero(const Value * V,const TargetLibraryInfo * TLI)3664 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3665 const TargetLibraryInfo *TLI) {
3666 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3667 }
3668
SignBitMustBeZero(const Value * V,const TargetLibraryInfo * TLI)3669 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3670 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3671 }
3672
isKnownNeverInfinity(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3673 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3674 unsigned Depth) {
3675 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3676
3677 // If we're told that infinities won't happen, assume they won't.
3678 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3679 if (FPMathOp->hasNoInfs())
3680 return true;
3681
3682 // Handle scalar constants.
3683 if (auto *CFP = dyn_cast<ConstantFP>(V))
3684 return !CFP->isInfinity();
3685
3686 if (Depth == MaxAnalysisRecursionDepth)
3687 return false;
3688
3689 if (auto *Inst = dyn_cast<Instruction>(V)) {
3690 switch (Inst->getOpcode()) {
3691 case Instruction::Select: {
3692 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3693 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3694 }
3695 case Instruction::SIToFP:
3696 case Instruction::UIToFP: {
3697 // Get width of largest magnitude integer (remove a bit if signed).
3698 // This still works for a signed minimum value because the largest FP
3699 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3700 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3701 if (Inst->getOpcode() == Instruction::SIToFP)
3702 --IntSize;
3703
3704 // If the exponent of the largest finite FP value can hold the largest
3705 // integer, the result of the cast must be finite.
3706 Type *FPTy = Inst->getType()->getScalarType();
3707 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3708 }
3709 default:
3710 break;
3711 }
3712 }
3713
3714 // try to handle fixed width vector constants
3715 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3716 if (VFVTy && isa<Constant>(V)) {
3717 // For vectors, verify that each element is not infinity.
3718 unsigned NumElts = VFVTy->getNumElements();
3719 for (unsigned i = 0; i != NumElts; ++i) {
3720 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3721 if (!Elt)
3722 return false;
3723 if (isa<UndefValue>(Elt))
3724 continue;
3725 auto *CElt = dyn_cast<ConstantFP>(Elt);
3726 if (!CElt || CElt->isInfinity())
3727 return false;
3728 }
3729 // All elements were confirmed non-infinity or undefined.
3730 return true;
3731 }
3732
3733 // was not able to prove that V never contains infinity
3734 return false;
3735 }
3736
isKnownNeverNaN(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3737 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3738 unsigned Depth) {
3739 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3740
3741 // If we're told that NaNs won't happen, assume they won't.
3742 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3743 if (FPMathOp->hasNoNaNs())
3744 return true;
3745
3746 // Handle scalar constants.
3747 if (auto *CFP = dyn_cast<ConstantFP>(V))
3748 return !CFP->isNaN();
3749
3750 if (Depth == MaxAnalysisRecursionDepth)
3751 return false;
3752
3753 if (auto *Inst = dyn_cast<Instruction>(V)) {
3754 switch (Inst->getOpcode()) {
3755 case Instruction::FAdd:
3756 case Instruction::FSub:
3757 // Adding positive and negative infinity produces NaN.
3758 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3759 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3760 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3761 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3762
3763 case Instruction::FMul:
3764 // Zero multiplied with infinity produces NaN.
3765 // FIXME: If neither side can be zero fmul never produces NaN.
3766 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3767 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3768 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3769 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3770
3771 case Instruction::FDiv:
3772 case Instruction::FRem:
3773 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3774 return false;
3775
3776 case Instruction::Select: {
3777 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3778 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3779 }
3780 case Instruction::SIToFP:
3781 case Instruction::UIToFP:
3782 return true;
3783 case Instruction::FPTrunc:
3784 case Instruction::FPExt:
3785 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3786 default:
3787 break;
3788 }
3789 }
3790
3791 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3792 switch (II->getIntrinsicID()) {
3793 case Intrinsic::canonicalize:
3794 case Intrinsic::fabs:
3795 case Intrinsic::copysign:
3796 case Intrinsic::exp:
3797 case Intrinsic::exp2:
3798 case Intrinsic::floor:
3799 case Intrinsic::ceil:
3800 case Intrinsic::trunc:
3801 case Intrinsic::rint:
3802 case Intrinsic::nearbyint:
3803 case Intrinsic::round:
3804 case Intrinsic::roundeven:
3805 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3806 case Intrinsic::sqrt:
3807 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3808 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3809 case Intrinsic::minnum:
3810 case Intrinsic::maxnum:
3811 // If either operand is not NaN, the result is not NaN.
3812 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3813 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3814 default:
3815 return false;
3816 }
3817 }
3818
3819 // Try to handle fixed width vector constants
3820 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3821 if (VFVTy && isa<Constant>(V)) {
3822 // For vectors, verify that each element is not NaN.
3823 unsigned NumElts = VFVTy->getNumElements();
3824 for (unsigned i = 0; i != NumElts; ++i) {
3825 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3826 if (!Elt)
3827 return false;
3828 if (isa<UndefValue>(Elt))
3829 continue;
3830 auto *CElt = dyn_cast<ConstantFP>(Elt);
3831 if (!CElt || CElt->isNaN())
3832 return false;
3833 }
3834 // All elements were confirmed not-NaN or undefined.
3835 return true;
3836 }
3837
3838 // Was not able to prove that V never contains NaN
3839 return false;
3840 }
3841
isBytewiseValue(Value * V,const DataLayout & DL)3842 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3843
3844 // All byte-wide stores are splatable, even of arbitrary variables.
3845 if (V->getType()->isIntegerTy(8))
3846 return V;
3847
3848 LLVMContext &Ctx = V->getContext();
3849
3850 // Undef don't care.
3851 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3852 if (isa<UndefValue>(V))
3853 return UndefInt8;
3854
3855 // Return Undef for zero-sized type.
3856 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3857 return UndefInt8;
3858
3859 Constant *C = dyn_cast<Constant>(V);
3860 if (!C) {
3861 // Conceptually, we could handle things like:
3862 // %a = zext i8 %X to i16
3863 // %b = shl i16 %a, 8
3864 // %c = or i16 %a, %b
3865 // but until there is an example that actually needs this, it doesn't seem
3866 // worth worrying about.
3867 return nullptr;
3868 }
3869
3870 // Handle 'null' ConstantArrayZero etc.
3871 if (C->isNullValue())
3872 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3873
3874 // Constant floating-point values can be handled as integer values if the
3875 // corresponding integer value is "byteable". An important case is 0.0.
3876 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3877 Type *Ty = nullptr;
3878 if (CFP->getType()->isHalfTy())
3879 Ty = Type::getInt16Ty(Ctx);
3880 else if (CFP->getType()->isFloatTy())
3881 Ty = Type::getInt32Ty(Ctx);
3882 else if (CFP->getType()->isDoubleTy())
3883 Ty = Type::getInt64Ty(Ctx);
3884 // Don't handle long double formats, which have strange constraints.
3885 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3886 : nullptr;
3887 }
3888
3889 // We can handle constant integers that are multiple of 8 bits.
3890 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3891 if (CI->getBitWidth() % 8 == 0) {
3892 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3893 if (!CI->getValue().isSplat(8))
3894 return nullptr;
3895 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3896 }
3897 }
3898
3899 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3900 if (CE->getOpcode() == Instruction::IntToPtr) {
3901 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3902 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3903 return isBytewiseValue(
3904 ConstantExpr::getIntegerCast(CE->getOperand(0),
3905 Type::getIntNTy(Ctx, BitWidth), false),
3906 DL);
3907 }
3908 }
3909 }
3910
3911 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3912 if (LHS == RHS)
3913 return LHS;
3914 if (!LHS || !RHS)
3915 return nullptr;
3916 if (LHS == UndefInt8)
3917 return RHS;
3918 if (RHS == UndefInt8)
3919 return LHS;
3920 return nullptr;
3921 };
3922
3923 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3924 Value *Val = UndefInt8;
3925 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3926 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3927 return nullptr;
3928 return Val;
3929 }
3930
3931 if (isa<ConstantAggregate>(C)) {
3932 Value *Val = UndefInt8;
3933 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3934 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3935 return nullptr;
3936 return Val;
3937 }
3938
3939 // Don't try to handle the handful of other constants.
3940 return nullptr;
3941 }
3942
3943 // This is the recursive version of BuildSubAggregate. It takes a few different
3944 // arguments. Idxs is the index within the nested struct From that we are
3945 // looking at now (which is of type IndexedType). IdxSkip is the number of
3946 // indices from Idxs that should be left out when inserting into the resulting
3947 // struct. To is the result struct built so far, new insertvalue instructions
3948 // build on that.
BuildSubAggregate(Value * From,Value * To,Type * IndexedType,SmallVectorImpl<unsigned> & Idxs,unsigned IdxSkip,Instruction * InsertBefore)3949 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3950 SmallVectorImpl<unsigned> &Idxs,
3951 unsigned IdxSkip,
3952 Instruction *InsertBefore) {
3953 StructType *STy = dyn_cast<StructType>(IndexedType);
3954 if (STy) {
3955 // Save the original To argument so we can modify it
3956 Value *OrigTo = To;
3957 // General case, the type indexed by Idxs is a struct
3958 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3959 // Process each struct element recursively
3960 Idxs.push_back(i);
3961 Value *PrevTo = To;
3962 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3963 InsertBefore);
3964 Idxs.pop_back();
3965 if (!To) {
3966 // Couldn't find any inserted value for this index? Cleanup
3967 while (PrevTo != OrigTo) {
3968 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3969 PrevTo = Del->getAggregateOperand();
3970 Del->eraseFromParent();
3971 }
3972 // Stop processing elements
3973 break;
3974 }
3975 }
3976 // If we successfully found a value for each of our subaggregates
3977 if (To)
3978 return To;
3979 }
3980 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3981 // the struct's elements had a value that was inserted directly. In the latter
3982 // case, perhaps we can't determine each of the subelements individually, but
3983 // we might be able to find the complete struct somewhere.
3984
3985 // Find the value that is at that particular spot
3986 Value *V = FindInsertedValue(From, Idxs);
3987
3988 if (!V)
3989 return nullptr;
3990
3991 // Insert the value in the new (sub) aggregate
3992 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3993 "tmp", InsertBefore);
3994 }
3995
3996 // This helper takes a nested struct and extracts a part of it (which is again a
3997 // struct) into a new value. For example, given the struct:
3998 // { a, { b, { c, d }, e } }
3999 // and the indices "1, 1" this returns
4000 // { c, d }.
4001 //
4002 // It does this by inserting an insertvalue for each element in the resulting
4003 // struct, as opposed to just inserting a single struct. This will only work if
4004 // each of the elements of the substruct are known (ie, inserted into From by an
4005 // insertvalue instruction somewhere).
4006 //
4007 // All inserted insertvalue instructions are inserted before InsertBefore
BuildSubAggregate(Value * From,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)4008 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
4009 Instruction *InsertBefore) {
4010 assert(InsertBefore && "Must have someplace to insert!");
4011 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
4012 idx_range);
4013 Value *To = UndefValue::get(IndexedType);
4014 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
4015 unsigned IdxSkip = Idxs.size();
4016
4017 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
4018 }
4019
4020 /// Given an aggregate and a sequence of indices, see if the scalar value
4021 /// indexed is already around as a register, for example if it was inserted
4022 /// directly into the aggregate.
4023 ///
4024 /// If InsertBefore is not null, this function will duplicate (modified)
4025 /// insertvalues when a part of a nested struct is extracted.
FindInsertedValue(Value * V,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)4026 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
4027 Instruction *InsertBefore) {
4028 // Nothing to index? Just return V then (this is useful at the end of our
4029 // recursion).
4030 if (idx_range.empty())
4031 return V;
4032 // We have indices, so V should have an indexable type.
4033 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
4034 "Not looking at a struct or array?");
4035 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
4036 "Invalid indices for type?");
4037
4038 if (Constant *C = dyn_cast<Constant>(V)) {
4039 C = C->getAggregateElement(idx_range[0]);
4040 if (!C) return nullptr;
4041 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
4042 }
4043
4044 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
4045 // Loop the indices for the insertvalue instruction in parallel with the
4046 // requested indices
4047 const unsigned *req_idx = idx_range.begin();
4048 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4049 i != e; ++i, ++req_idx) {
4050 if (req_idx == idx_range.end()) {
4051 // We can't handle this without inserting insertvalues
4052 if (!InsertBefore)
4053 return nullptr;
4054
4055 // The requested index identifies a part of a nested aggregate. Handle
4056 // this specially. For example,
4057 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4058 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4059 // %C = extractvalue {i32, { i32, i32 } } %B, 1
4060 // This can be changed into
4061 // %A = insertvalue {i32, i32 } undef, i32 10, 0
4062 // %C = insertvalue {i32, i32 } %A, i32 11, 1
4063 // which allows the unused 0,0 element from the nested struct to be
4064 // removed.
4065 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4066 InsertBefore);
4067 }
4068
4069 // This insert value inserts something else than what we are looking for.
4070 // See if the (aggregate) value inserted into has the value we are
4071 // looking for, then.
4072 if (*req_idx != *i)
4073 return FindInsertedValue(I->getAggregateOperand(), idx_range,
4074 InsertBefore);
4075 }
4076 // If we end up here, the indices of the insertvalue match with those
4077 // requested (though possibly only partially). Now we recursively look at
4078 // the inserted value, passing any remaining indices.
4079 return FindInsertedValue(I->getInsertedValueOperand(),
4080 makeArrayRef(req_idx, idx_range.end()),
4081 InsertBefore);
4082 }
4083
4084 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4085 // If we're extracting a value from an aggregate that was extracted from
4086 // something else, we can extract from that something else directly instead.
4087 // However, we will need to chain I's indices with the requested indices.
4088
4089 // Calculate the number of indices required
4090 unsigned size = I->getNumIndices() + idx_range.size();
4091 // Allocate some space to put the new indices in
4092 SmallVector<unsigned, 5> Idxs;
4093 Idxs.reserve(size);
4094 // Add indices from the extract value instruction
4095 Idxs.append(I->idx_begin(), I->idx_end());
4096
4097 // Add requested indices
4098 Idxs.append(idx_range.begin(), idx_range.end());
4099
4100 assert(Idxs.size() == size
4101 && "Number of indices added not correct?");
4102
4103 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4104 }
4105 // Otherwise, we don't know (such as, extracting from a function return value
4106 // or load instruction)
4107 return nullptr;
4108 }
4109
isGEPBasedOnPointerToString(const GEPOperator * GEP,unsigned CharSize)4110 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4111 unsigned CharSize) {
4112 // Make sure the GEP has exactly three arguments.
4113 if (GEP->getNumOperands() != 3)
4114 return false;
4115
4116 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4117 // CharSize.
4118 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4119 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4120 return false;
4121
4122 // Check to make sure that the first operand of the GEP is an integer and
4123 // has value 0 so that we are sure we're indexing into the initializer.
4124 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4125 if (!FirstIdx || !FirstIdx->isZero())
4126 return false;
4127
4128 return true;
4129 }
4130
getConstantDataArrayInfo(const Value * V,ConstantDataArraySlice & Slice,unsigned ElementSize,uint64_t Offset)4131 bool llvm::getConstantDataArrayInfo(const Value *V,
4132 ConstantDataArraySlice &Slice,
4133 unsigned ElementSize, uint64_t Offset) {
4134 assert(V);
4135
4136 // Look through bitcast instructions and geps.
4137 V = V->stripPointerCasts();
4138
4139 // If the value is a GEP instruction or constant expression, treat it as an
4140 // offset.
4141 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4142 // The GEP operator should be based on a pointer to string constant, and is
4143 // indexing into the string constant.
4144 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4145 return false;
4146
4147 // If the second index isn't a ConstantInt, then this is a variable index
4148 // into the array. If this occurs, we can't say anything meaningful about
4149 // the string.
4150 uint64_t StartIdx = 0;
4151 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4152 StartIdx = CI->getZExtValue();
4153 else
4154 return false;
4155 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4156 StartIdx + Offset);
4157 }
4158
4159 // The GEP instruction, constant or instruction, must reference a global
4160 // variable that is a constant and is initialized. The referenced constant
4161 // initializer is the array that we'll use for optimization.
4162 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4163 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4164 return false;
4165
4166 const ConstantDataArray *Array;
4167 ArrayType *ArrayTy;
4168 if (GV->getInitializer()->isNullValue()) {
4169 Type *GVTy = GV->getValueType();
4170 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4171 // A zeroinitializer for the array; there is no ConstantDataArray.
4172 Array = nullptr;
4173 } else {
4174 const DataLayout &DL = GV->getParent()->getDataLayout();
4175 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4176 uint64_t Length = SizeInBytes / (ElementSize / 8);
4177 if (Length <= Offset)
4178 return false;
4179
4180 Slice.Array = nullptr;
4181 Slice.Offset = 0;
4182 Slice.Length = Length - Offset;
4183 return true;
4184 }
4185 } else {
4186 // This must be a ConstantDataArray.
4187 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4188 if (!Array)
4189 return false;
4190 ArrayTy = Array->getType();
4191 }
4192 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4193 return false;
4194
4195 uint64_t NumElts = ArrayTy->getArrayNumElements();
4196 if (Offset > NumElts)
4197 return false;
4198
4199 Slice.Array = Array;
4200 Slice.Offset = Offset;
4201 Slice.Length = NumElts - Offset;
4202 return true;
4203 }
4204
4205 /// This function computes the length of a null-terminated C string pointed to
4206 /// by V. If successful, it returns true and returns the string in Str.
4207 /// If unsuccessful, it returns false.
getConstantStringInfo(const Value * V,StringRef & Str,uint64_t Offset,bool TrimAtNul)4208 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4209 uint64_t Offset, bool TrimAtNul) {
4210 ConstantDataArraySlice Slice;
4211 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4212 return false;
4213
4214 if (Slice.Array == nullptr) {
4215 if (TrimAtNul) {
4216 Str = StringRef();
4217 return true;
4218 }
4219 if (Slice.Length == 1) {
4220 Str = StringRef("", 1);
4221 return true;
4222 }
4223 // We cannot instantiate a StringRef as we do not have an appropriate string
4224 // of 0s at hand.
4225 return false;
4226 }
4227
4228 // Start out with the entire array in the StringRef.
4229 Str = Slice.Array->getAsString();
4230 // Skip over 'offset' bytes.
4231 Str = Str.substr(Slice.Offset);
4232
4233 if (TrimAtNul) {
4234 // Trim off the \0 and anything after it. If the array is not nul
4235 // terminated, we just return the whole end of string. The client may know
4236 // some other way that the string is length-bound.
4237 Str = Str.substr(0, Str.find('\0'));
4238 }
4239 return true;
4240 }
4241
4242 // These next two are very similar to the above, but also look through PHI
4243 // nodes.
4244 // TODO: See if we can integrate these two together.
4245
4246 /// If we can compute the length of the string pointed to by
4247 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLengthH(const Value * V,SmallPtrSetImpl<const PHINode * > & PHIs,unsigned CharSize)4248 static uint64_t GetStringLengthH(const Value *V,
4249 SmallPtrSetImpl<const PHINode*> &PHIs,
4250 unsigned CharSize) {
4251 // Look through noop bitcast instructions.
4252 V = V->stripPointerCasts();
4253
4254 // If this is a PHI node, there are two cases: either we have already seen it
4255 // or we haven't.
4256 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4257 if (!PHIs.insert(PN).second)
4258 return ~0ULL; // already in the set.
4259
4260 // If it was new, see if all the input strings are the same length.
4261 uint64_t LenSoFar = ~0ULL;
4262 for (Value *IncValue : PN->incoming_values()) {
4263 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4264 if (Len == 0) return 0; // Unknown length -> unknown.
4265
4266 if (Len == ~0ULL) continue;
4267
4268 if (Len != LenSoFar && LenSoFar != ~0ULL)
4269 return 0; // Disagree -> unknown.
4270 LenSoFar = Len;
4271 }
4272
4273 // Success, all agree.
4274 return LenSoFar;
4275 }
4276
4277 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4278 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4279 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4280 if (Len1 == 0) return 0;
4281 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4282 if (Len2 == 0) return 0;
4283 if (Len1 == ~0ULL) return Len2;
4284 if (Len2 == ~0ULL) return Len1;
4285 if (Len1 != Len2) return 0;
4286 return Len1;
4287 }
4288
4289 // Otherwise, see if we can read the string.
4290 ConstantDataArraySlice Slice;
4291 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4292 return 0;
4293
4294 if (Slice.Array == nullptr)
4295 return 1;
4296
4297 // Search for nul characters
4298 unsigned NullIndex = 0;
4299 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4300 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4301 break;
4302 }
4303
4304 return NullIndex + 1;
4305 }
4306
4307 /// If we can compute the length of the string pointed to by
4308 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLength(const Value * V,unsigned CharSize)4309 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4310 if (!V->getType()->isPointerTy())
4311 return 0;
4312
4313 SmallPtrSet<const PHINode*, 32> PHIs;
4314 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4315 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4316 // an empty string as a length.
4317 return Len == ~0ULL ? 1 : Len;
4318 }
4319
4320 const Value *
getArgumentAliasingToReturnedPointer(const CallBase * Call,bool MustPreserveNullness)4321 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4322 bool MustPreserveNullness) {
4323 assert(Call &&
4324 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4325 if (const Value *RV = Call->getReturnedArgOperand())
4326 return RV;
4327 // This can be used only as a aliasing property.
4328 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4329 Call, MustPreserveNullness))
4330 return Call->getArgOperand(0);
4331 return nullptr;
4332 }
4333
isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase * Call,bool MustPreserveNullness)4334 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4335 const CallBase *Call, bool MustPreserveNullness) {
4336 switch (Call->getIntrinsicID()) {
4337 case Intrinsic::launder_invariant_group:
4338 case Intrinsic::strip_invariant_group:
4339 case Intrinsic::aarch64_irg:
4340 case Intrinsic::aarch64_tagp:
4341 return true;
4342 case Intrinsic::ptrmask:
4343 return !MustPreserveNullness;
4344 default:
4345 return false;
4346 }
4347 }
4348
4349 /// \p PN defines a loop-variant pointer to an object. Check if the
4350 /// previous iteration of the loop was referring to the same object as \p PN.
isSameUnderlyingObjectInLoop(const PHINode * PN,const LoopInfo * LI)4351 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4352 const LoopInfo *LI) {
4353 // Find the loop-defined value.
4354 Loop *L = LI->getLoopFor(PN->getParent());
4355 if (PN->getNumIncomingValues() != 2)
4356 return true;
4357
4358 // Find the value from previous iteration.
4359 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4360 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4361 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4362 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4363 return true;
4364
4365 // If a new pointer is loaded in the loop, the pointer references a different
4366 // object in every iteration. E.g.:
4367 // for (i)
4368 // int *p = a[i];
4369 // ...
4370 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4371 if (!L->isLoopInvariant(Load->getPointerOperand()))
4372 return false;
4373 return true;
4374 }
4375
getUnderlyingObject(const Value * V,unsigned MaxLookup)4376 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4377 if (!V->getType()->isPointerTy())
4378 return V;
4379 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4380 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4381 V = GEP->getPointerOperand();
4382 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4383 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4384 V = cast<Operator>(V)->getOperand(0);
4385 if (!V->getType()->isPointerTy())
4386 return V;
4387 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4388 if (GA->isInterposable())
4389 return V;
4390 V = GA->getAliasee();
4391 } else {
4392 if (auto *PHI = dyn_cast<PHINode>(V)) {
4393 // Look through single-arg phi nodes created by LCSSA.
4394 if (PHI->getNumIncomingValues() == 1) {
4395 V = PHI->getIncomingValue(0);
4396 continue;
4397 }
4398 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4399 // CaptureTracking can know about special capturing properties of some
4400 // intrinsics like launder.invariant.group, that can't be expressed with
4401 // the attributes, but have properties like returning aliasing pointer.
4402 // Because some analysis may assume that nocaptured pointer is not
4403 // returned from some special intrinsic (because function would have to
4404 // be marked with returns attribute), it is crucial to use this function
4405 // because it should be in sync with CaptureTracking. Not using it may
4406 // cause weird miscompilations where 2 aliasing pointers are assumed to
4407 // noalias.
4408 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4409 V = RP;
4410 continue;
4411 }
4412 }
4413
4414 return V;
4415 }
4416 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4417 }
4418 return V;
4419 }
4420
getUnderlyingObjects(const Value * V,SmallVectorImpl<const Value * > & Objects,LoopInfo * LI,unsigned MaxLookup)4421 void llvm::getUnderlyingObjects(const Value *V,
4422 SmallVectorImpl<const Value *> &Objects,
4423 LoopInfo *LI, unsigned MaxLookup) {
4424 SmallPtrSet<const Value *, 4> Visited;
4425 SmallVector<const Value *, 4> Worklist;
4426 Worklist.push_back(V);
4427 do {
4428 const Value *P = Worklist.pop_back_val();
4429 P = getUnderlyingObject(P, MaxLookup);
4430
4431 if (!Visited.insert(P).second)
4432 continue;
4433
4434 if (auto *SI = dyn_cast<SelectInst>(P)) {
4435 Worklist.push_back(SI->getTrueValue());
4436 Worklist.push_back(SI->getFalseValue());
4437 continue;
4438 }
4439
4440 if (auto *PN = dyn_cast<PHINode>(P)) {
4441 // If this PHI changes the underlying object in every iteration of the
4442 // loop, don't look through it. Consider:
4443 // int **A;
4444 // for (i) {
4445 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4446 // Curr = A[i];
4447 // *Prev, *Curr;
4448 //
4449 // Prev is tracking Curr one iteration behind so they refer to different
4450 // underlying objects.
4451 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4452 isSameUnderlyingObjectInLoop(PN, LI))
4453 append_range(Worklist, PN->incoming_values());
4454 continue;
4455 }
4456
4457 Objects.push_back(P);
4458 } while (!Worklist.empty());
4459 }
4460
4461 /// This is the function that does the work of looking through basic
4462 /// ptrtoint+arithmetic+inttoptr sequences.
getUnderlyingObjectFromInt(const Value * V)4463 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4464 do {
4465 if (const Operator *U = dyn_cast<Operator>(V)) {
4466 // If we find a ptrtoint, we can transfer control back to the
4467 // regular getUnderlyingObjectFromInt.
4468 if (U->getOpcode() == Instruction::PtrToInt)
4469 return U->getOperand(0);
4470 // If we find an add of a constant, a multiplied value, or a phi, it's
4471 // likely that the other operand will lead us to the base
4472 // object. We don't have to worry about the case where the
4473 // object address is somehow being computed by the multiply,
4474 // because our callers only care when the result is an
4475 // identifiable object.
4476 if (U->getOpcode() != Instruction::Add ||
4477 (!isa<ConstantInt>(U->getOperand(1)) &&
4478 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4479 !isa<PHINode>(U->getOperand(1))))
4480 return V;
4481 V = U->getOperand(0);
4482 } else {
4483 return V;
4484 }
4485 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4486 } while (true);
4487 }
4488
4489 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4490 /// ptrtoint+arithmetic+inttoptr sequences.
4491 /// It returns false if unidentified object is found in getUnderlyingObjects.
getUnderlyingObjectsForCodeGen(const Value * V,SmallVectorImpl<Value * > & Objects)4492 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4493 SmallVectorImpl<Value *> &Objects) {
4494 SmallPtrSet<const Value *, 16> Visited;
4495 SmallVector<const Value *, 4> Working(1, V);
4496 do {
4497 V = Working.pop_back_val();
4498
4499 SmallVector<const Value *, 4> Objs;
4500 getUnderlyingObjects(V, Objs);
4501
4502 for (const Value *V : Objs) {
4503 if (!Visited.insert(V).second)
4504 continue;
4505 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4506 const Value *O =
4507 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4508 if (O->getType()->isPointerTy()) {
4509 Working.push_back(O);
4510 continue;
4511 }
4512 }
4513 // If getUnderlyingObjects fails to find an identifiable object,
4514 // getUnderlyingObjectsForCodeGen also fails for safety.
4515 if (!isIdentifiedObject(V)) {
4516 Objects.clear();
4517 return false;
4518 }
4519 Objects.push_back(const_cast<Value *>(V));
4520 }
4521 } while (!Working.empty());
4522 return true;
4523 }
4524
findAllocaForValue(Value * V,bool OffsetZero)4525 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4526 AllocaInst *Result = nullptr;
4527 SmallPtrSet<Value *, 4> Visited;
4528 SmallVector<Value *, 4> Worklist;
4529
4530 auto AddWork = [&](Value *V) {
4531 if (Visited.insert(V).second)
4532 Worklist.push_back(V);
4533 };
4534
4535 AddWork(V);
4536 do {
4537 V = Worklist.pop_back_val();
4538 assert(Visited.count(V));
4539
4540 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4541 if (Result && Result != AI)
4542 return nullptr;
4543 Result = AI;
4544 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4545 AddWork(CI->getOperand(0));
4546 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4547 for (Value *IncValue : PN->incoming_values())
4548 AddWork(IncValue);
4549 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4550 AddWork(SI->getTrueValue());
4551 AddWork(SI->getFalseValue());
4552 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4553 if (OffsetZero && !GEP->hasAllZeroIndices())
4554 return nullptr;
4555 AddWork(GEP->getPointerOperand());
4556 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4557 Value *Returned = CB->getReturnedArgOperand();
4558 if (Returned)
4559 AddWork(Returned);
4560 else
4561 return nullptr;
4562 } else {
4563 return nullptr;
4564 }
4565 } while (!Worklist.empty());
4566
4567 return Result;
4568 }
4569
onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value * V,bool AllowLifetime,bool AllowDroppable)4570 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4571 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4572 for (const User *U : V->users()) {
4573 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4574 if (!II)
4575 return false;
4576
4577 if (AllowLifetime && II->isLifetimeStartOrEnd())
4578 continue;
4579
4580 if (AllowDroppable && II->isDroppable())
4581 continue;
4582
4583 return false;
4584 }
4585 return true;
4586 }
4587
onlyUsedByLifetimeMarkers(const Value * V)4588 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4589 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4590 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4591 }
onlyUsedByLifetimeMarkersOrDroppableInsts(const Value * V)4592 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4593 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4594 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4595 }
4596
mustSuppressSpeculation(const LoadInst & LI)4597 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4598 if (!LI.isUnordered())
4599 return true;
4600 const Function &F = *LI.getFunction();
4601 // Speculative load may create a race that did not exist in the source.
4602 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4603 // Speculative load may load data from dirty regions.
4604 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4605 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4606 }
4607
4608
isSafeToSpeculativelyExecute(const Value * V,const Instruction * CtxI,const DominatorTree * DT,const TargetLibraryInfo * TLI)4609 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4610 const Instruction *CtxI,
4611 const DominatorTree *DT,
4612 const TargetLibraryInfo *TLI) {
4613 const Operator *Inst = dyn_cast<Operator>(V);
4614 if (!Inst)
4615 return false;
4616
4617 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4618 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4619 if (C->canTrap())
4620 return false;
4621
4622 switch (Inst->getOpcode()) {
4623 default:
4624 return true;
4625 case Instruction::UDiv:
4626 case Instruction::URem: {
4627 // x / y is undefined if y == 0.
4628 const APInt *V;
4629 if (match(Inst->getOperand(1), m_APInt(V)))
4630 return *V != 0;
4631 return false;
4632 }
4633 case Instruction::SDiv:
4634 case Instruction::SRem: {
4635 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4636 const APInt *Numerator, *Denominator;
4637 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4638 return false;
4639 // We cannot hoist this division if the denominator is 0.
4640 if (*Denominator == 0)
4641 return false;
4642 // It's safe to hoist if the denominator is not 0 or -1.
4643 if (!Denominator->isAllOnes())
4644 return true;
4645 // At this point we know that the denominator is -1. It is safe to hoist as
4646 // long we know that the numerator is not INT_MIN.
4647 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4648 return !Numerator->isMinSignedValue();
4649 // The numerator *might* be MinSignedValue.
4650 return false;
4651 }
4652 case Instruction::Load: {
4653 const LoadInst *LI = cast<LoadInst>(Inst);
4654 if (mustSuppressSpeculation(*LI))
4655 return false;
4656 const DataLayout &DL = LI->getModule()->getDataLayout();
4657 return isDereferenceableAndAlignedPointer(
4658 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4659 DL, CtxI, DT, TLI);
4660 }
4661 case Instruction::Call: {
4662 auto *CI = cast<const CallInst>(Inst);
4663 const Function *Callee = CI->getCalledFunction();
4664
4665 // The called function could have undefined behavior or side-effects, even
4666 // if marked readnone nounwind.
4667 return Callee && Callee->isSpeculatable();
4668 }
4669 case Instruction::VAArg:
4670 case Instruction::Alloca:
4671 case Instruction::Invoke:
4672 case Instruction::CallBr:
4673 case Instruction::PHI:
4674 case Instruction::Store:
4675 case Instruction::Ret:
4676 case Instruction::Br:
4677 case Instruction::IndirectBr:
4678 case Instruction::Switch:
4679 case Instruction::Unreachable:
4680 case Instruction::Fence:
4681 case Instruction::AtomicRMW:
4682 case Instruction::AtomicCmpXchg:
4683 case Instruction::LandingPad:
4684 case Instruction::Resume:
4685 case Instruction::CatchSwitch:
4686 case Instruction::CatchPad:
4687 case Instruction::CatchRet:
4688 case Instruction::CleanupPad:
4689 case Instruction::CleanupRet:
4690 return false; // Misc instructions which have effects
4691 }
4692 }
4693
mayBeMemoryDependent(const Instruction & I)4694 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4695 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4696 }
4697
4698 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
mapOverflowResult(ConstantRange::OverflowResult OR)4699 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4700 switch (OR) {
4701 case ConstantRange::OverflowResult::MayOverflow:
4702 return OverflowResult::MayOverflow;
4703 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4704 return OverflowResult::AlwaysOverflowsLow;
4705 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4706 return OverflowResult::AlwaysOverflowsHigh;
4707 case ConstantRange::OverflowResult::NeverOverflows:
4708 return OverflowResult::NeverOverflows;
4709 }
4710 llvm_unreachable("Unknown OverflowResult");
4711 }
4712
4713 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
computeConstantRangeIncludingKnownBits(const Value * V,bool ForSigned,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE=nullptr,bool UseInstrInfo=true)4714 static ConstantRange computeConstantRangeIncludingKnownBits(
4715 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4716 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4717 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4718 KnownBits Known = computeKnownBits(
4719 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4720 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4721 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4722 ConstantRange::PreferredRangeType RangeType =
4723 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4724 return CR1.intersectWith(CR2, RangeType);
4725 }
4726
computeOverflowForUnsignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4727 OverflowResult llvm::computeOverflowForUnsignedMul(
4728 const Value *LHS, const Value *RHS, const DataLayout &DL,
4729 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4730 bool UseInstrInfo) {
4731 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4732 nullptr, UseInstrInfo);
4733 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4734 nullptr, UseInstrInfo);
4735 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4736 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4737 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4738 }
4739
4740 OverflowResult
computeOverflowForSignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4741 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4742 const DataLayout &DL, AssumptionCache *AC,
4743 const Instruction *CxtI,
4744 const DominatorTree *DT, bool UseInstrInfo) {
4745 // Multiplying n * m significant bits yields a result of n + m significant
4746 // bits. If the total number of significant bits does not exceed the
4747 // result bit width (minus 1), there is no overflow.
4748 // This means if we have enough leading sign bits in the operands
4749 // we can guarantee that the result does not overflow.
4750 // Ref: "Hacker's Delight" by Henry Warren
4751 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4752
4753 // Note that underestimating the number of sign bits gives a more
4754 // conservative answer.
4755 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4756 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4757
4758 // First handle the easy case: if we have enough sign bits there's
4759 // definitely no overflow.
4760 if (SignBits > BitWidth + 1)
4761 return OverflowResult::NeverOverflows;
4762
4763 // There are two ambiguous cases where there can be no overflow:
4764 // SignBits == BitWidth + 1 and
4765 // SignBits == BitWidth
4766 // The second case is difficult to check, therefore we only handle the
4767 // first case.
4768 if (SignBits == BitWidth + 1) {
4769 // It overflows only when both arguments are negative and the true
4770 // product is exactly the minimum negative number.
4771 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4772 // For simplicity we just check if at least one side is not negative.
4773 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4774 nullptr, UseInstrInfo);
4775 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4776 nullptr, UseInstrInfo);
4777 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4778 return OverflowResult::NeverOverflows;
4779 }
4780 return OverflowResult::MayOverflow;
4781 }
4782
computeOverflowForUnsignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4783 OverflowResult llvm::computeOverflowForUnsignedAdd(
4784 const Value *LHS, const Value *RHS, const DataLayout &DL,
4785 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4786 bool UseInstrInfo) {
4787 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4788 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4789 nullptr, UseInstrInfo);
4790 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4791 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4792 nullptr, UseInstrInfo);
4793 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4794 }
4795
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4796 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4797 const Value *RHS,
4798 const AddOperator *Add,
4799 const DataLayout &DL,
4800 AssumptionCache *AC,
4801 const Instruction *CxtI,
4802 const DominatorTree *DT) {
4803 if (Add && Add->hasNoSignedWrap()) {
4804 return OverflowResult::NeverOverflows;
4805 }
4806
4807 // If LHS and RHS each have at least two sign bits, the addition will look
4808 // like
4809 //
4810 // XX..... +
4811 // YY.....
4812 //
4813 // If the carry into the most significant position is 0, X and Y can't both
4814 // be 1 and therefore the carry out of the addition is also 0.
4815 //
4816 // If the carry into the most significant position is 1, X and Y can't both
4817 // be 0 and therefore the carry out of the addition is also 1.
4818 //
4819 // Since the carry into the most significant position is always equal to
4820 // the carry out of the addition, there is no signed overflow.
4821 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4822 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4823 return OverflowResult::NeverOverflows;
4824
4825 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4826 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4827 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4828 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4829 OverflowResult OR =
4830 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4831 if (OR != OverflowResult::MayOverflow)
4832 return OR;
4833
4834 // The remaining code needs Add to be available. Early returns if not so.
4835 if (!Add)
4836 return OverflowResult::MayOverflow;
4837
4838 // If the sign of Add is the same as at least one of the operands, this add
4839 // CANNOT overflow. If this can be determined from the known bits of the
4840 // operands the above signedAddMayOverflow() check will have already done so.
4841 // The only other way to improve on the known bits is from an assumption, so
4842 // call computeKnownBitsFromAssume() directly.
4843 bool LHSOrRHSKnownNonNegative =
4844 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4845 bool LHSOrRHSKnownNegative =
4846 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4847 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4848 KnownBits AddKnown(LHSRange.getBitWidth());
4849 computeKnownBitsFromAssume(
4850 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4851 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4852 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4853 return OverflowResult::NeverOverflows;
4854 }
4855
4856 return OverflowResult::MayOverflow;
4857 }
4858
computeOverflowForUnsignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4859 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4860 const Value *RHS,
4861 const DataLayout &DL,
4862 AssumptionCache *AC,
4863 const Instruction *CxtI,
4864 const DominatorTree *DT) {
4865 // Checking for conditions implied by dominating conditions may be expensive.
4866 // Limit it to usub_with_overflow calls for now.
4867 if (match(CxtI,
4868 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4869 if (auto C =
4870 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4871 if (*C)
4872 return OverflowResult::NeverOverflows;
4873 return OverflowResult::AlwaysOverflowsLow;
4874 }
4875 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4876 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4877 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4878 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4879 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4880 }
4881
computeOverflowForSignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4882 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4883 const Value *RHS,
4884 const DataLayout &DL,
4885 AssumptionCache *AC,
4886 const Instruction *CxtI,
4887 const DominatorTree *DT) {
4888 // If LHS and RHS each have at least two sign bits, the subtraction
4889 // cannot overflow.
4890 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4891 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4892 return OverflowResult::NeverOverflows;
4893
4894 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4895 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4896 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4897 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4898 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4899 }
4900
isOverflowIntrinsicNoWrap(const WithOverflowInst * WO,const DominatorTree & DT)4901 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4902 const DominatorTree &DT) {
4903 SmallVector<const BranchInst *, 2> GuardingBranches;
4904 SmallVector<const ExtractValueInst *, 2> Results;
4905
4906 for (const User *U : WO->users()) {
4907 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4908 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4909
4910 if (EVI->getIndices()[0] == 0)
4911 Results.push_back(EVI);
4912 else {
4913 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4914
4915 for (const auto *U : EVI->users())
4916 if (const auto *B = dyn_cast<BranchInst>(U)) {
4917 assert(B->isConditional() && "How else is it using an i1?");
4918 GuardingBranches.push_back(B);
4919 }
4920 }
4921 } else {
4922 // We are using the aggregate directly in a way we don't want to analyze
4923 // here (storing it to a global, say).
4924 return false;
4925 }
4926 }
4927
4928 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4929 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4930 if (!NoWrapEdge.isSingleEdge())
4931 return false;
4932
4933 // Check if all users of the add are provably no-wrap.
4934 for (const auto *Result : Results) {
4935 // If the extractvalue itself is not executed on overflow, the we don't
4936 // need to check each use separately, since domination is transitive.
4937 if (DT.dominates(NoWrapEdge, Result->getParent()))
4938 continue;
4939
4940 for (auto &RU : Result->uses())
4941 if (!DT.dominates(NoWrapEdge, RU))
4942 return false;
4943 }
4944
4945 return true;
4946 };
4947
4948 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4949 }
4950
canCreateUndefOrPoison(const Operator * Op,bool PoisonOnly)4951 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4952 // See whether I has flags that may create poison
4953 if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4954 if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4955 return true;
4956 }
4957 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4958 if (ExactOp->isExact())
4959 return true;
4960 if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4961 auto FMF = FP->getFastMathFlags();
4962 if (FMF.noNaNs() || FMF.noInfs())
4963 return true;
4964 }
4965
4966 unsigned Opcode = Op->getOpcode();
4967
4968 // Check whether opcode is a poison/undef-generating operation
4969 switch (Opcode) {
4970 case Instruction::Shl:
4971 case Instruction::AShr:
4972 case Instruction::LShr: {
4973 // Shifts return poison if shiftwidth is larger than the bitwidth.
4974 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4975 SmallVector<Constant *, 4> ShiftAmounts;
4976 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4977 unsigned NumElts = FVTy->getNumElements();
4978 for (unsigned i = 0; i < NumElts; ++i)
4979 ShiftAmounts.push_back(C->getAggregateElement(i));
4980 } else if (isa<ScalableVectorType>(C->getType()))
4981 return true; // Can't tell, just return true to be safe
4982 else
4983 ShiftAmounts.push_back(C);
4984
4985 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4986 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4987 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4988 });
4989 return !Safe;
4990 }
4991 return true;
4992 }
4993 case Instruction::FPToSI:
4994 case Instruction::FPToUI:
4995 // fptosi/ui yields poison if the resulting value does not fit in the
4996 // destination type.
4997 return true;
4998 case Instruction::Call:
4999 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
5000 switch (II->getIntrinsicID()) {
5001 // TODO: Add more intrinsics.
5002 case Intrinsic::ctpop:
5003 case Intrinsic::sadd_with_overflow:
5004 case Intrinsic::ssub_with_overflow:
5005 case Intrinsic::smul_with_overflow:
5006 case Intrinsic::uadd_with_overflow:
5007 case Intrinsic::usub_with_overflow:
5008 case Intrinsic::umul_with_overflow:
5009 return false;
5010 }
5011 }
5012 LLVM_FALLTHROUGH;
5013 case Instruction::CallBr:
5014 case Instruction::Invoke: {
5015 const auto *CB = cast<CallBase>(Op);
5016 return !CB->hasRetAttr(Attribute::NoUndef);
5017 }
5018 case Instruction::InsertElement:
5019 case Instruction::ExtractElement: {
5020 // If index exceeds the length of the vector, it returns poison
5021 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
5022 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
5023 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
5024 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
5025 return true;
5026 return false;
5027 }
5028 case Instruction::ShuffleVector: {
5029 // shufflevector may return undef.
5030 if (PoisonOnly)
5031 return false;
5032 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
5033 ? cast<ConstantExpr>(Op)->getShuffleMask()
5034 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
5035 return is_contained(Mask, UndefMaskElem);
5036 }
5037 case Instruction::FNeg:
5038 case Instruction::PHI:
5039 case Instruction::Select:
5040 case Instruction::URem:
5041 case Instruction::SRem:
5042 case Instruction::ExtractValue:
5043 case Instruction::InsertValue:
5044 case Instruction::Freeze:
5045 case Instruction::ICmp:
5046 case Instruction::FCmp:
5047 return false;
5048 case Instruction::GetElementPtr: {
5049 const auto *GEP = cast<GEPOperator>(Op);
5050 return GEP->isInBounds();
5051 }
5052 default: {
5053 const auto *CE = dyn_cast<ConstantExpr>(Op);
5054 if (isa<CastInst>(Op) || (CE && CE->isCast()))
5055 return false;
5056 else if (Instruction::isBinaryOp(Opcode))
5057 return false;
5058 // Be conservative and return true.
5059 return true;
5060 }
5061 }
5062 }
5063
canCreateUndefOrPoison(const Operator * Op)5064 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
5065 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
5066 }
5067
canCreatePoison(const Operator * Op)5068 bool llvm::canCreatePoison(const Operator *Op) {
5069 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
5070 }
5071
directlyImpliesPoison(const Value * ValAssumedPoison,const Value * V,unsigned Depth)5072 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5073 const Value *V, unsigned Depth) {
5074 if (ValAssumedPoison == V)
5075 return true;
5076
5077 const unsigned MaxDepth = 2;
5078 if (Depth >= MaxDepth)
5079 return false;
5080
5081 if (const auto *I = dyn_cast<Instruction>(V)) {
5082 if (propagatesPoison(cast<Operator>(I)))
5083 return any_of(I->operands(), [=](const Value *Op) {
5084 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5085 });
5086
5087 // 'select ValAssumedPoison, _, _' is poison.
5088 if (const auto *SI = dyn_cast<SelectInst>(I))
5089 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
5090 Depth + 1);
5091 // V = extractvalue V0, idx
5092 // V2 = extractvalue V0, idx2
5093 // V0's elements are all poison or not. (e.g., add_with_overflow)
5094 const WithOverflowInst *II;
5095 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5096 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5097 llvm::is_contained(II->args(), ValAssumedPoison)))
5098 return true;
5099 }
5100 return false;
5101 }
5102
impliesPoison(const Value * ValAssumedPoison,const Value * V,unsigned Depth)5103 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5104 unsigned Depth) {
5105 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5106 return true;
5107
5108 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5109 return true;
5110
5111 const unsigned MaxDepth = 2;
5112 if (Depth >= MaxDepth)
5113 return false;
5114
5115 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5116 if (I && !canCreatePoison(cast<Operator>(I))) {
5117 return all_of(I->operands(), [=](const Value *Op) {
5118 return impliesPoison(Op, V, Depth + 1);
5119 });
5120 }
5121 return false;
5122 }
5123
impliesPoison(const Value * ValAssumedPoison,const Value * V)5124 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5125 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5126 }
5127
5128 static bool programUndefinedIfUndefOrPoison(const Value *V,
5129 bool PoisonOnly);
5130
isGuaranteedNotToBeUndefOrPoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth,bool PoisonOnly)5131 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5132 AssumptionCache *AC,
5133 const Instruction *CtxI,
5134 const DominatorTree *DT,
5135 unsigned Depth, bool PoisonOnly) {
5136 if (Depth >= MaxAnalysisRecursionDepth)
5137 return false;
5138
5139 if (isa<MetadataAsValue>(V))
5140 return false;
5141
5142 if (const auto *A = dyn_cast<Argument>(V)) {
5143 if (A->hasAttribute(Attribute::NoUndef))
5144 return true;
5145 }
5146
5147 if (auto *C = dyn_cast<Constant>(V)) {
5148 if (isa<UndefValue>(C))
5149 return PoisonOnly && !isa<PoisonValue>(C);
5150
5151 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5152 isa<ConstantPointerNull>(C) || isa<Function>(C))
5153 return true;
5154
5155 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5156 return (PoisonOnly ? !C->containsPoisonElement()
5157 : !C->containsUndefOrPoisonElement()) &&
5158 !C->containsConstantExpression();
5159 }
5160
5161 // Strip cast operations from a pointer value.
5162 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5163 // inbounds with zero offset. To guarantee that the result isn't poison, the
5164 // stripped pointer is checked as it has to be pointing into an allocated
5165 // object or be null `null` to ensure `inbounds` getelement pointers with a
5166 // zero offset could not produce poison.
5167 // It can strip off addrspacecast that do not change bit representation as
5168 // well. We believe that such addrspacecast is equivalent to no-op.
5169 auto *StrippedV = V->stripPointerCastsSameRepresentation();
5170 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5171 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5172 return true;
5173
5174 auto OpCheck = [&](const Value *V) {
5175 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5176 PoisonOnly);
5177 };
5178
5179 if (auto *Opr = dyn_cast<Operator>(V)) {
5180 // If the value is a freeze instruction, then it can never
5181 // be undef or poison.
5182 if (isa<FreezeInst>(V))
5183 return true;
5184
5185 if (const auto *CB = dyn_cast<CallBase>(V)) {
5186 if (CB->hasRetAttr(Attribute::NoUndef))
5187 return true;
5188 }
5189
5190 if (const auto *PN = dyn_cast<PHINode>(V)) {
5191 unsigned Num = PN->getNumIncomingValues();
5192 bool IsWellDefined = true;
5193 for (unsigned i = 0; i < Num; ++i) {
5194 auto *TI = PN->getIncomingBlock(i)->getTerminator();
5195 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5196 DT, Depth + 1, PoisonOnly)) {
5197 IsWellDefined = false;
5198 break;
5199 }
5200 }
5201 if (IsWellDefined)
5202 return true;
5203 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5204 return true;
5205 }
5206
5207 if (auto *I = dyn_cast<LoadInst>(V))
5208 if (I->getMetadata(LLVMContext::MD_noundef))
5209 return true;
5210
5211 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5212 return true;
5213
5214 // CxtI may be null or a cloned instruction.
5215 if (!CtxI || !CtxI->getParent() || !DT)
5216 return false;
5217
5218 auto *DNode = DT->getNode(CtxI->getParent());
5219 if (!DNode)
5220 // Unreachable block
5221 return false;
5222
5223 // If V is used as a branch condition before reaching CtxI, V cannot be
5224 // undef or poison.
5225 // br V, BB1, BB2
5226 // BB1:
5227 // CtxI ; V cannot be undef or poison here
5228 auto *Dominator = DNode->getIDom();
5229 while (Dominator) {
5230 auto *TI = Dominator->getBlock()->getTerminator();
5231
5232 Value *Cond = nullptr;
5233 if (auto BI = dyn_cast<BranchInst>(TI)) {
5234 if (BI->isConditional())
5235 Cond = BI->getCondition();
5236 } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5237 Cond = SI->getCondition();
5238 }
5239
5240 if (Cond) {
5241 if (Cond == V)
5242 return true;
5243 else if (PoisonOnly && isa<Operator>(Cond)) {
5244 // For poison, we can analyze further
5245 auto *Opr = cast<Operator>(Cond);
5246 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5247 return true;
5248 }
5249 }
5250
5251 Dominator = Dominator->getIDom();
5252 }
5253
5254 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
5255 if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
5256 return true;
5257
5258 return false;
5259 }
5260
isGuaranteedNotToBeUndefOrPoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)5261 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5262 const Instruction *CtxI,
5263 const DominatorTree *DT,
5264 unsigned Depth) {
5265 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5266 }
5267
isGuaranteedNotToBePoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)5268 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5269 const Instruction *CtxI,
5270 const DominatorTree *DT, unsigned Depth) {
5271 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5272 }
5273
computeOverflowForSignedAdd(const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5274 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5275 const DataLayout &DL,
5276 AssumptionCache *AC,
5277 const Instruction *CxtI,
5278 const DominatorTree *DT) {
5279 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5280 Add, DL, AC, CxtI, DT);
5281 }
5282
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5283 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5284 const Value *RHS,
5285 const DataLayout &DL,
5286 AssumptionCache *AC,
5287 const Instruction *CxtI,
5288 const DominatorTree *DT) {
5289 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5290 }
5291
isGuaranteedToTransferExecutionToSuccessor(const Instruction * I)5292 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5293 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5294 // of time because it's possible for another thread to interfere with it for an
5295 // arbitrary length of time, but programs aren't allowed to rely on that.
5296
5297 // If there is no successor, then execution can't transfer to it.
5298 if (isa<ReturnInst>(I))
5299 return false;
5300 if (isa<UnreachableInst>(I))
5301 return false;
5302
5303 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5304 // Instruction::willReturn.
5305 //
5306 // FIXME: Move this check into Instruction::willReturn.
5307 if (isa<CatchPadInst>(I)) {
5308 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5309 default:
5310 // A catchpad may invoke exception object constructors and such, which
5311 // in some languages can be arbitrary code, so be conservative by default.
5312 return false;
5313 case EHPersonality::CoreCLR:
5314 // For CoreCLR, it just involves a type test.
5315 return true;
5316 }
5317 }
5318
5319 // An instruction that returns without throwing must transfer control flow
5320 // to a successor.
5321 return !I->mayThrow() && I->willReturn();
5322 }
5323
isGuaranteedToTransferExecutionToSuccessor(const BasicBlock * BB)5324 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5325 // TODO: This is slightly conservative for invoke instruction since exiting
5326 // via an exception *is* normal control for them.
5327 for (const Instruction &I : *BB)
5328 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5329 return false;
5330 return true;
5331 }
5332
isGuaranteedToTransferExecutionToSuccessor(BasicBlock::const_iterator Begin,BasicBlock::const_iterator End,unsigned ScanLimit)5333 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5334 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
5335 unsigned ScanLimit) {
5336 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
5337 ScanLimit);
5338 }
5339
isGuaranteedToTransferExecutionToSuccessor(iterator_range<BasicBlock::const_iterator> Range,unsigned ScanLimit)5340 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5341 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
5342 assert(ScanLimit && "scan limit must be non-zero");
5343 for (const Instruction &I : Range) {
5344 if (isa<DbgInfoIntrinsic>(I))
5345 continue;
5346 if (--ScanLimit == 0)
5347 return false;
5348 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5349 return false;
5350 }
5351 return true;
5352 }
5353
isGuaranteedToExecuteForEveryIteration(const Instruction * I,const Loop * L)5354 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5355 const Loop *L) {
5356 // The loop header is guaranteed to be executed for every iteration.
5357 //
5358 // FIXME: Relax this constraint to cover all basic blocks that are
5359 // guaranteed to be executed at every iteration.
5360 if (I->getParent() != L->getHeader()) return false;
5361
5362 for (const Instruction &LI : *L->getHeader()) {
5363 if (&LI == I) return true;
5364 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5365 }
5366 llvm_unreachable("Instruction not contained in its own parent basic block.");
5367 }
5368
propagatesPoison(const Operator * I)5369 bool llvm::propagatesPoison(const Operator *I) {
5370 switch (I->getOpcode()) {
5371 case Instruction::Freeze:
5372 case Instruction::Select:
5373 case Instruction::PHI:
5374 case Instruction::Invoke:
5375 return false;
5376 case Instruction::Call:
5377 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5378 switch (II->getIntrinsicID()) {
5379 // TODO: Add more intrinsics.
5380 case Intrinsic::sadd_with_overflow:
5381 case Intrinsic::ssub_with_overflow:
5382 case Intrinsic::smul_with_overflow:
5383 case Intrinsic::uadd_with_overflow:
5384 case Intrinsic::usub_with_overflow:
5385 case Intrinsic::umul_with_overflow:
5386 // If an input is a vector containing a poison element, the
5387 // two output vectors (calculated results, overflow bits)'
5388 // corresponding lanes are poison.
5389 return true;
5390 case Intrinsic::ctpop:
5391 return true;
5392 }
5393 }
5394 return false;
5395 case Instruction::ICmp:
5396 case Instruction::FCmp:
5397 case Instruction::GetElementPtr:
5398 return true;
5399 default:
5400 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5401 return true;
5402
5403 // Be conservative and return false.
5404 return false;
5405 }
5406 }
5407
getGuaranteedWellDefinedOps(const Instruction * I,SmallPtrSetImpl<const Value * > & Operands)5408 void llvm::getGuaranteedWellDefinedOps(
5409 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5410 switch (I->getOpcode()) {
5411 case Instruction::Store:
5412 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5413 break;
5414
5415 case Instruction::Load:
5416 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5417 break;
5418
5419 // Since dereferenceable attribute imply noundef, atomic operations
5420 // also implicitly have noundef pointers too
5421 case Instruction::AtomicCmpXchg:
5422 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5423 break;
5424
5425 case Instruction::AtomicRMW:
5426 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5427 break;
5428
5429 case Instruction::Call:
5430 case Instruction::Invoke: {
5431 const CallBase *CB = cast<CallBase>(I);
5432 if (CB->isIndirectCall())
5433 Operands.insert(CB->getCalledOperand());
5434 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5435 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5436 CB->paramHasAttr(i, Attribute::Dereferenceable))
5437 Operands.insert(CB->getArgOperand(i));
5438 }
5439 break;
5440 }
5441 case Instruction::Ret:
5442 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
5443 Operands.insert(I->getOperand(0));
5444 break;
5445 default:
5446 break;
5447 }
5448 }
5449
getGuaranteedNonPoisonOps(const Instruction * I,SmallPtrSetImpl<const Value * > & Operands)5450 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5451 SmallPtrSetImpl<const Value *> &Operands) {
5452 getGuaranteedWellDefinedOps(I, Operands);
5453 switch (I->getOpcode()) {
5454 // Divisors of these operations are allowed to be partially undef.
5455 case Instruction::UDiv:
5456 case Instruction::SDiv:
5457 case Instruction::URem:
5458 case Instruction::SRem:
5459 Operands.insert(I->getOperand(1));
5460 break;
5461
5462 default:
5463 break;
5464 }
5465 }
5466
mustTriggerUB(const Instruction * I,const SmallSet<const Value *,16> & KnownPoison)5467 bool llvm::mustTriggerUB(const Instruction *I,
5468 const SmallSet<const Value *, 16>& KnownPoison) {
5469 SmallPtrSet<const Value *, 4> NonPoisonOps;
5470 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5471
5472 for (const auto *V : NonPoisonOps)
5473 if (KnownPoison.count(V))
5474 return true;
5475
5476 return false;
5477 }
5478
programUndefinedIfUndefOrPoison(const Value * V,bool PoisonOnly)5479 static bool programUndefinedIfUndefOrPoison(const Value *V,
5480 bool PoisonOnly) {
5481 // We currently only look for uses of values within the same basic
5482 // block, as that makes it easier to guarantee that the uses will be
5483 // executed given that Inst is executed.
5484 //
5485 // FIXME: Expand this to consider uses beyond the same basic block. To do
5486 // this, look out for the distinction between post-dominance and strong
5487 // post-dominance.
5488 const BasicBlock *BB = nullptr;
5489 BasicBlock::const_iterator Begin;
5490 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5491 BB = Inst->getParent();
5492 Begin = Inst->getIterator();
5493 Begin++;
5494 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5495 BB = &Arg->getParent()->getEntryBlock();
5496 Begin = BB->begin();
5497 } else {
5498 return false;
5499 }
5500
5501 // Limit number of instructions we look at, to avoid scanning through large
5502 // blocks. The current limit is chosen arbitrarily.
5503 unsigned ScanLimit = 32;
5504 BasicBlock::const_iterator End = BB->end();
5505
5506 if (!PoisonOnly) {
5507 // Since undef does not propagate eagerly, be conservative & just check
5508 // whether a value is directly passed to an instruction that must take
5509 // well-defined operands.
5510
5511 for (auto &I : make_range(Begin, End)) {
5512 if (isa<DbgInfoIntrinsic>(I))
5513 continue;
5514 if (--ScanLimit == 0)
5515 break;
5516
5517 SmallPtrSet<const Value *, 4> WellDefinedOps;
5518 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5519 if (WellDefinedOps.contains(V))
5520 return true;
5521
5522 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5523 break;
5524 }
5525 return false;
5526 }
5527
5528 // Set of instructions that we have proved will yield poison if Inst
5529 // does.
5530 SmallSet<const Value *, 16> YieldsPoison;
5531 SmallSet<const BasicBlock *, 4> Visited;
5532
5533 YieldsPoison.insert(V);
5534 auto Propagate = [&](const User *User) {
5535 if (propagatesPoison(cast<Operator>(User)))
5536 YieldsPoison.insert(User);
5537 };
5538 for_each(V->users(), Propagate);
5539 Visited.insert(BB);
5540
5541 while (true) {
5542 for (auto &I : make_range(Begin, End)) {
5543 if (isa<DbgInfoIntrinsic>(I))
5544 continue;
5545 if (--ScanLimit == 0)
5546 return false;
5547 if (mustTriggerUB(&I, YieldsPoison))
5548 return true;
5549 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5550 return false;
5551
5552 // Mark poison that propagates from I through uses of I.
5553 if (YieldsPoison.count(&I))
5554 for_each(I.users(), Propagate);
5555 }
5556
5557 BB = BB->getSingleSuccessor();
5558 if (!BB || !Visited.insert(BB).second)
5559 break;
5560
5561 Begin = BB->getFirstNonPHI()->getIterator();
5562 End = BB->end();
5563 }
5564 return false;
5565 }
5566
programUndefinedIfUndefOrPoison(const Instruction * Inst)5567 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5568 return ::programUndefinedIfUndefOrPoison(Inst, false);
5569 }
5570
programUndefinedIfPoison(const Instruction * Inst)5571 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5572 return ::programUndefinedIfUndefOrPoison(Inst, true);
5573 }
5574
isKnownNonNaN(const Value * V,FastMathFlags FMF)5575 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5576 if (FMF.noNaNs())
5577 return true;
5578
5579 if (auto *C = dyn_cast<ConstantFP>(V))
5580 return !C->isNaN();
5581
5582 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5583 if (!C->getElementType()->isFloatingPointTy())
5584 return false;
5585 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5586 if (C->getElementAsAPFloat(I).isNaN())
5587 return false;
5588 }
5589 return true;
5590 }
5591
5592 if (isa<ConstantAggregateZero>(V))
5593 return true;
5594
5595 return false;
5596 }
5597
isKnownNonZero(const Value * V)5598 static bool isKnownNonZero(const Value *V) {
5599 if (auto *C = dyn_cast<ConstantFP>(V))
5600 return !C->isZero();
5601
5602 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5603 if (!C->getElementType()->isFloatingPointTy())
5604 return false;
5605 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5606 if (C->getElementAsAPFloat(I).isZero())
5607 return false;
5608 }
5609 return true;
5610 }
5611
5612 return false;
5613 }
5614
5615 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5616 /// Given non-min/max outer cmp/select from the clamp pattern this
5617 /// function recognizes if it can be substitued by a "canonical" min/max
5618 /// pattern.
matchFastFloatClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS)5619 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5620 Value *CmpLHS, Value *CmpRHS,
5621 Value *TrueVal, Value *FalseVal,
5622 Value *&LHS, Value *&RHS) {
5623 // Try to match
5624 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5625 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5626 // and return description of the outer Max/Min.
5627
5628 // First, check if select has inverse order:
5629 if (CmpRHS == FalseVal) {
5630 std::swap(TrueVal, FalseVal);
5631 Pred = CmpInst::getInversePredicate(Pred);
5632 }
5633
5634 // Assume success now. If there's no match, callers should not use these anyway.
5635 LHS = TrueVal;
5636 RHS = FalseVal;
5637
5638 const APFloat *FC1;
5639 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5640 return {SPF_UNKNOWN, SPNB_NA, false};
5641
5642 const APFloat *FC2;
5643 switch (Pred) {
5644 case CmpInst::FCMP_OLT:
5645 case CmpInst::FCMP_OLE:
5646 case CmpInst::FCMP_ULT:
5647 case CmpInst::FCMP_ULE:
5648 if (match(FalseVal,
5649 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5650 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5651 *FC1 < *FC2)
5652 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5653 break;
5654 case CmpInst::FCMP_OGT:
5655 case CmpInst::FCMP_OGE:
5656 case CmpInst::FCMP_UGT:
5657 case CmpInst::FCMP_UGE:
5658 if (match(FalseVal,
5659 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5660 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5661 *FC1 > *FC2)
5662 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5663 break;
5664 default:
5665 break;
5666 }
5667
5668 return {SPF_UNKNOWN, SPNB_NA, false};
5669 }
5670
5671 /// Recognize variations of:
5672 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
matchClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal)5673 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5674 Value *CmpLHS, Value *CmpRHS,
5675 Value *TrueVal, Value *FalseVal) {
5676 // Swap the select operands and predicate to match the patterns below.
5677 if (CmpRHS != TrueVal) {
5678 Pred = ICmpInst::getSwappedPredicate(Pred);
5679 std::swap(TrueVal, FalseVal);
5680 }
5681 const APInt *C1;
5682 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5683 const APInt *C2;
5684 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5685 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5686 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5687 return {SPF_SMAX, SPNB_NA, false};
5688
5689 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5690 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5691 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5692 return {SPF_SMIN, SPNB_NA, false};
5693
5694 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5695 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5696 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5697 return {SPF_UMAX, SPNB_NA, false};
5698
5699 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5700 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5701 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5702 return {SPF_UMIN, SPNB_NA, false};
5703 }
5704 return {SPF_UNKNOWN, SPNB_NA, false};
5705 }
5706
5707 /// Recognize variations of:
5708 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
matchMinMaxOfMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TVal,Value * FVal,unsigned Depth)5709 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5710 Value *CmpLHS, Value *CmpRHS,
5711 Value *TVal, Value *FVal,
5712 unsigned Depth) {
5713 // TODO: Allow FP min/max with nnan/nsz.
5714 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5715
5716 Value *A = nullptr, *B = nullptr;
5717 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5718 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5719 return {SPF_UNKNOWN, SPNB_NA, false};
5720
5721 Value *C = nullptr, *D = nullptr;
5722 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5723 if (L.Flavor != R.Flavor)
5724 return {SPF_UNKNOWN, SPNB_NA, false};
5725
5726 // We have something like: x Pred y ? min(a, b) : min(c, d).
5727 // Try to match the compare to the min/max operations of the select operands.
5728 // First, make sure we have the right compare predicate.
5729 switch (L.Flavor) {
5730 case SPF_SMIN:
5731 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5732 Pred = ICmpInst::getSwappedPredicate(Pred);
5733 std::swap(CmpLHS, CmpRHS);
5734 }
5735 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5736 break;
5737 return {SPF_UNKNOWN, SPNB_NA, false};
5738 case SPF_SMAX:
5739 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5740 Pred = ICmpInst::getSwappedPredicate(Pred);
5741 std::swap(CmpLHS, CmpRHS);
5742 }
5743 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5744 break;
5745 return {SPF_UNKNOWN, SPNB_NA, false};
5746 case SPF_UMIN:
5747 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5748 Pred = ICmpInst::getSwappedPredicate(Pred);
5749 std::swap(CmpLHS, CmpRHS);
5750 }
5751 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5752 break;
5753 return {SPF_UNKNOWN, SPNB_NA, false};
5754 case SPF_UMAX:
5755 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5756 Pred = ICmpInst::getSwappedPredicate(Pred);
5757 std::swap(CmpLHS, CmpRHS);
5758 }
5759 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5760 break;
5761 return {SPF_UNKNOWN, SPNB_NA, false};
5762 default:
5763 return {SPF_UNKNOWN, SPNB_NA, false};
5764 }
5765
5766 // If there is a common operand in the already matched min/max and the other
5767 // min/max operands match the compare operands (either directly or inverted),
5768 // then this is min/max of the same flavor.
5769
5770 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5771 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5772 if (D == B) {
5773 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5774 match(A, m_Not(m_Specific(CmpRHS)))))
5775 return {L.Flavor, SPNB_NA, false};
5776 }
5777 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5778 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5779 if (C == B) {
5780 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5781 match(A, m_Not(m_Specific(CmpRHS)))))
5782 return {L.Flavor, SPNB_NA, false};
5783 }
5784 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5785 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5786 if (D == A) {
5787 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5788 match(B, m_Not(m_Specific(CmpRHS)))))
5789 return {L.Flavor, SPNB_NA, false};
5790 }
5791 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5792 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5793 if (C == A) {
5794 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5795 match(B, m_Not(m_Specific(CmpRHS)))))
5796 return {L.Flavor, SPNB_NA, false};
5797 }
5798
5799 return {SPF_UNKNOWN, SPNB_NA, false};
5800 }
5801
5802 /// If the input value is the result of a 'not' op, constant integer, or vector
5803 /// splat of a constant integer, return the bitwise-not source value.
5804 /// TODO: This could be extended to handle non-splat vector integer constants.
getNotValue(Value * V)5805 static Value *getNotValue(Value *V) {
5806 Value *NotV;
5807 if (match(V, m_Not(m_Value(NotV))))
5808 return NotV;
5809
5810 const APInt *C;
5811 if (match(V, m_APInt(C)))
5812 return ConstantInt::get(V->getType(), ~(*C));
5813
5814 return nullptr;
5815 }
5816
5817 /// Match non-obvious integer minimum and maximum sequences.
matchMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5818 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5819 Value *CmpLHS, Value *CmpRHS,
5820 Value *TrueVal, Value *FalseVal,
5821 Value *&LHS, Value *&RHS,
5822 unsigned Depth) {
5823 // Assume success. If there's no match, callers should not use these anyway.
5824 LHS = TrueVal;
5825 RHS = FalseVal;
5826
5827 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5828 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5829 return SPR;
5830
5831 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5832 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5833 return SPR;
5834
5835 // Look through 'not' ops to find disguised min/max.
5836 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5837 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5838 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5839 switch (Pred) {
5840 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5841 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5842 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5843 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5844 default: break;
5845 }
5846 }
5847
5848 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5849 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5850 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5851 switch (Pred) {
5852 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5853 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5854 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5855 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5856 default: break;
5857 }
5858 }
5859
5860 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5861 return {SPF_UNKNOWN, SPNB_NA, false};
5862
5863 // Z = X -nsw Y
5864 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5865 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5866 if (match(TrueVal, m_Zero()) &&
5867 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5868 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5869
5870 // Z = X -nsw Y
5871 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5872 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5873 if (match(FalseVal, m_Zero()) &&
5874 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5875 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5876
5877 const APInt *C1;
5878 if (!match(CmpRHS, m_APInt(C1)))
5879 return {SPF_UNKNOWN, SPNB_NA, false};
5880
5881 // An unsigned min/max can be written with a signed compare.
5882 const APInt *C2;
5883 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5884 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5885 // Is the sign bit set?
5886 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5887 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5888 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
5889 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5890
5891 // Is the sign bit clear?
5892 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5893 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5894 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
5895 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5896 }
5897
5898 return {SPF_UNKNOWN, SPNB_NA, false};
5899 }
5900
isKnownNegation(const Value * X,const Value * Y,bool NeedNSW)5901 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5902 assert(X && Y && "Invalid operand");
5903
5904 // X = sub (0, Y) || X = sub nsw (0, Y)
5905 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5906 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5907 return true;
5908
5909 // Y = sub (0, X) || Y = sub nsw (0, X)
5910 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5911 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5912 return true;
5913
5914 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5915 Value *A, *B;
5916 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5917 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5918 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5919 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5920 }
5921
matchSelectPattern(CmpInst::Predicate Pred,FastMathFlags FMF,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5922 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5923 FastMathFlags FMF,
5924 Value *CmpLHS, Value *CmpRHS,
5925 Value *TrueVal, Value *FalseVal,
5926 Value *&LHS, Value *&RHS,
5927 unsigned Depth) {
5928 if (CmpInst::isFPPredicate(Pred)) {
5929 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5930 // 0.0 operand, set the compare's 0.0 operands to that same value for the
5931 // purpose of identifying min/max. Disregard vector constants with undefined
5932 // elements because those can not be back-propagated for analysis.
5933 Value *OutputZeroVal = nullptr;
5934 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5935 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5936 OutputZeroVal = TrueVal;
5937 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5938 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5939 OutputZeroVal = FalseVal;
5940
5941 if (OutputZeroVal) {
5942 if (match(CmpLHS, m_AnyZeroFP()))
5943 CmpLHS = OutputZeroVal;
5944 if (match(CmpRHS, m_AnyZeroFP()))
5945 CmpRHS = OutputZeroVal;
5946 }
5947 }
5948
5949 LHS = CmpLHS;
5950 RHS = CmpRHS;
5951
5952 // Signed zero may return inconsistent results between implementations.
5953 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5954 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5955 // Therefore, we behave conservatively and only proceed if at least one of the
5956 // operands is known to not be zero or if we don't care about signed zero.
5957 switch (Pred) {
5958 default: break;
5959 // FIXME: Include OGT/OLT/UGT/ULT.
5960 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5961 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5962 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5963 !isKnownNonZero(CmpRHS))
5964 return {SPF_UNKNOWN, SPNB_NA, false};
5965 }
5966
5967 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5968 bool Ordered = false;
5969
5970 // When given one NaN and one non-NaN input:
5971 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5972 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5973 // ordered comparison fails), which could be NaN or non-NaN.
5974 // so here we discover exactly what NaN behavior is required/accepted.
5975 if (CmpInst::isFPPredicate(Pred)) {
5976 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5977 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5978
5979 if (LHSSafe && RHSSafe) {
5980 // Both operands are known non-NaN.
5981 NaNBehavior = SPNB_RETURNS_ANY;
5982 } else if (CmpInst::isOrdered(Pred)) {
5983 // An ordered comparison will return false when given a NaN, so it
5984 // returns the RHS.
5985 Ordered = true;
5986 if (LHSSafe)
5987 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5988 NaNBehavior = SPNB_RETURNS_NAN;
5989 else if (RHSSafe)
5990 NaNBehavior = SPNB_RETURNS_OTHER;
5991 else
5992 // Completely unsafe.
5993 return {SPF_UNKNOWN, SPNB_NA, false};
5994 } else {
5995 Ordered = false;
5996 // An unordered comparison will return true when given a NaN, so it
5997 // returns the LHS.
5998 if (LHSSafe)
5999 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
6000 NaNBehavior = SPNB_RETURNS_OTHER;
6001 else if (RHSSafe)
6002 NaNBehavior = SPNB_RETURNS_NAN;
6003 else
6004 // Completely unsafe.
6005 return {SPF_UNKNOWN, SPNB_NA, false};
6006 }
6007 }
6008
6009 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
6010 std::swap(CmpLHS, CmpRHS);
6011 Pred = CmpInst::getSwappedPredicate(Pred);
6012 if (NaNBehavior == SPNB_RETURNS_NAN)
6013 NaNBehavior = SPNB_RETURNS_OTHER;
6014 else if (NaNBehavior == SPNB_RETURNS_OTHER)
6015 NaNBehavior = SPNB_RETURNS_NAN;
6016 Ordered = !Ordered;
6017 }
6018
6019 // ([if]cmp X, Y) ? X : Y
6020 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
6021 switch (Pred) {
6022 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
6023 case ICmpInst::ICMP_UGT:
6024 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
6025 case ICmpInst::ICMP_SGT:
6026 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
6027 case ICmpInst::ICMP_ULT:
6028 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
6029 case ICmpInst::ICMP_SLT:
6030 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
6031 case FCmpInst::FCMP_UGT:
6032 case FCmpInst::FCMP_UGE:
6033 case FCmpInst::FCMP_OGT:
6034 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
6035 case FCmpInst::FCMP_ULT:
6036 case FCmpInst::FCMP_ULE:
6037 case FCmpInst::FCMP_OLT:
6038 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
6039 }
6040 }
6041
6042 if (isKnownNegation(TrueVal, FalseVal)) {
6043 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
6044 // match against either LHS or sext(LHS).
6045 auto MaybeSExtCmpLHS =
6046 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
6047 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
6048 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
6049 if (match(TrueVal, MaybeSExtCmpLHS)) {
6050 // Set the return values. If the compare uses the negated value (-X >s 0),
6051 // swap the return values because the negated value is always 'RHS'.
6052 LHS = TrueVal;
6053 RHS = FalseVal;
6054 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
6055 std::swap(LHS, RHS);
6056
6057 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
6058 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
6059 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6060 return {SPF_ABS, SPNB_NA, false};
6061
6062 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
6063 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
6064 return {SPF_ABS, SPNB_NA, false};
6065
6066 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
6067 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
6068 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6069 return {SPF_NABS, SPNB_NA, false};
6070 }
6071 else if (match(FalseVal, MaybeSExtCmpLHS)) {
6072 // Set the return values. If the compare uses the negated value (-X >s 0),
6073 // swap the return values because the negated value is always 'RHS'.
6074 LHS = FalseVal;
6075 RHS = TrueVal;
6076 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
6077 std::swap(LHS, RHS);
6078
6079 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
6080 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
6081 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6082 return {SPF_NABS, SPNB_NA, false};
6083
6084 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
6085 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
6086 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6087 return {SPF_ABS, SPNB_NA, false};
6088 }
6089 }
6090
6091 if (CmpInst::isIntPredicate(Pred))
6092 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
6093
6094 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
6095 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
6096 // semantics than minNum. Be conservative in such case.
6097 if (NaNBehavior != SPNB_RETURNS_ANY ||
6098 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6099 !isKnownNonZero(CmpRHS)))
6100 return {SPF_UNKNOWN, SPNB_NA, false};
6101
6102 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
6103 }
6104
6105 /// Helps to match a select pattern in case of a type mismatch.
6106 ///
6107 /// The function processes the case when type of true and false values of a
6108 /// select instruction differs from type of the cmp instruction operands because
6109 /// of a cast instruction. The function checks if it is legal to move the cast
6110 /// operation after "select". If yes, it returns the new second value of
6111 /// "select" (with the assumption that cast is moved):
6112 /// 1. As operand of cast instruction when both values of "select" are same cast
6113 /// instructions.
6114 /// 2. As restored constant (by applying reverse cast operation) when the first
6115 /// value of the "select" is a cast operation and the second value is a
6116 /// constant.
6117 /// NOTE: We return only the new second value because the first value could be
6118 /// accessed as operand of cast instruction.
lookThroughCast(CmpInst * CmpI,Value * V1,Value * V2,Instruction::CastOps * CastOp)6119 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6120 Instruction::CastOps *CastOp) {
6121 auto *Cast1 = dyn_cast<CastInst>(V1);
6122 if (!Cast1)
6123 return nullptr;
6124
6125 *CastOp = Cast1->getOpcode();
6126 Type *SrcTy = Cast1->getSrcTy();
6127 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6128 // If V1 and V2 are both the same cast from the same type, look through V1.
6129 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6130 return Cast2->getOperand(0);
6131 return nullptr;
6132 }
6133
6134 auto *C = dyn_cast<Constant>(V2);
6135 if (!C)
6136 return nullptr;
6137
6138 Constant *CastedTo = nullptr;
6139 switch (*CastOp) {
6140 case Instruction::ZExt:
6141 if (CmpI->isUnsigned())
6142 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6143 break;
6144 case Instruction::SExt:
6145 if (CmpI->isSigned())
6146 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6147 break;
6148 case Instruction::Trunc:
6149 Constant *CmpConst;
6150 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6151 CmpConst->getType() == SrcTy) {
6152 // Here we have the following case:
6153 //
6154 // %cond = cmp iN %x, CmpConst
6155 // %tr = trunc iN %x to iK
6156 // %narrowsel = select i1 %cond, iK %t, iK C
6157 //
6158 // We can always move trunc after select operation:
6159 //
6160 // %cond = cmp iN %x, CmpConst
6161 // %widesel = select i1 %cond, iN %x, iN CmpConst
6162 // %tr = trunc iN %widesel to iK
6163 //
6164 // Note that C could be extended in any way because we don't care about
6165 // upper bits after truncation. It can't be abs pattern, because it would
6166 // look like:
6167 //
6168 // select i1 %cond, x, -x.
6169 //
6170 // So only min/max pattern could be matched. Such match requires widened C
6171 // == CmpConst. That is why set widened C = CmpConst, condition trunc
6172 // CmpConst == C is checked below.
6173 CastedTo = CmpConst;
6174 } else {
6175 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6176 }
6177 break;
6178 case Instruction::FPTrunc:
6179 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6180 break;
6181 case Instruction::FPExt:
6182 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6183 break;
6184 case Instruction::FPToUI:
6185 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6186 break;
6187 case Instruction::FPToSI:
6188 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6189 break;
6190 case Instruction::UIToFP:
6191 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6192 break;
6193 case Instruction::SIToFP:
6194 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6195 break;
6196 default:
6197 break;
6198 }
6199
6200 if (!CastedTo)
6201 return nullptr;
6202
6203 // Make sure the cast doesn't lose any information.
6204 Constant *CastedBack =
6205 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6206 if (CastedBack != C)
6207 return nullptr;
6208
6209 return CastedTo;
6210 }
6211
matchSelectPattern(Value * V,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)6212 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6213 Instruction::CastOps *CastOp,
6214 unsigned Depth) {
6215 if (Depth >= MaxAnalysisRecursionDepth)
6216 return {SPF_UNKNOWN, SPNB_NA, false};
6217
6218 SelectInst *SI = dyn_cast<SelectInst>(V);
6219 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6220
6221 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6222 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6223
6224 Value *TrueVal = SI->getTrueValue();
6225 Value *FalseVal = SI->getFalseValue();
6226
6227 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6228 CastOp, Depth);
6229 }
6230
matchDecomposedSelectPattern(CmpInst * CmpI,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)6231 SelectPatternResult llvm::matchDecomposedSelectPattern(
6232 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6233 Instruction::CastOps *CastOp, unsigned Depth) {
6234 CmpInst::Predicate Pred = CmpI->getPredicate();
6235 Value *CmpLHS = CmpI->getOperand(0);
6236 Value *CmpRHS = CmpI->getOperand(1);
6237 FastMathFlags FMF;
6238 if (isa<FPMathOperator>(CmpI))
6239 FMF = CmpI->getFastMathFlags();
6240
6241 // Bail out early.
6242 if (CmpI->isEquality())
6243 return {SPF_UNKNOWN, SPNB_NA, false};
6244
6245 // Deal with type mismatches.
6246 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6247 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6248 // If this is a potential fmin/fmax with a cast to integer, then ignore
6249 // -0.0 because there is no corresponding integer value.
6250 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6251 FMF.setNoSignedZeros();
6252 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6253 cast<CastInst>(TrueVal)->getOperand(0), C,
6254 LHS, RHS, Depth);
6255 }
6256 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6257 // If this is a potential fmin/fmax with a cast to integer, then ignore
6258 // -0.0 because there is no corresponding integer value.
6259 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6260 FMF.setNoSignedZeros();
6261 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6262 C, cast<CastInst>(FalseVal)->getOperand(0),
6263 LHS, RHS, Depth);
6264 }
6265 }
6266 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6267 LHS, RHS, Depth);
6268 }
6269
getMinMaxPred(SelectPatternFlavor SPF,bool Ordered)6270 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6271 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6272 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6273 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6274 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6275 if (SPF == SPF_FMINNUM)
6276 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6277 if (SPF == SPF_FMAXNUM)
6278 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6279 llvm_unreachable("unhandled!");
6280 }
6281
getInverseMinMaxFlavor(SelectPatternFlavor SPF)6282 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6283 if (SPF == SPF_SMIN) return SPF_SMAX;
6284 if (SPF == SPF_UMIN) return SPF_UMAX;
6285 if (SPF == SPF_SMAX) return SPF_SMIN;
6286 if (SPF == SPF_UMAX) return SPF_UMIN;
6287 llvm_unreachable("unhandled!");
6288 }
6289
getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)6290 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6291 switch (MinMaxID) {
6292 case Intrinsic::smax: return Intrinsic::smin;
6293 case Intrinsic::smin: return Intrinsic::smax;
6294 case Intrinsic::umax: return Intrinsic::umin;
6295 case Intrinsic::umin: return Intrinsic::umax;
6296 default: llvm_unreachable("Unexpected intrinsic");
6297 }
6298 }
6299
getInverseMinMaxPred(SelectPatternFlavor SPF)6300 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6301 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6302 }
6303
getMinMaxLimit(SelectPatternFlavor SPF,unsigned BitWidth)6304 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
6305 switch (SPF) {
6306 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
6307 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
6308 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
6309 case SPF_UMIN: return APInt::getMinValue(BitWidth);
6310 default: llvm_unreachable("Unexpected flavor");
6311 }
6312 }
6313
6314 std::pair<Intrinsic::ID, bool>
canConvertToMinOrMaxIntrinsic(ArrayRef<Value * > VL)6315 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6316 // Check if VL contains select instructions that can be folded into a min/max
6317 // vector intrinsic and return the intrinsic if it is possible.
6318 // TODO: Support floating point min/max.
6319 bool AllCmpSingleUse = true;
6320 SelectPatternResult SelectPattern;
6321 SelectPattern.Flavor = SPF_UNKNOWN;
6322 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6323 Value *LHS, *RHS;
6324 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6325 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6326 CurrentPattern.Flavor == SPF_FMINNUM ||
6327 CurrentPattern.Flavor == SPF_FMAXNUM ||
6328 !I->getType()->isIntOrIntVectorTy())
6329 return false;
6330 if (SelectPattern.Flavor != SPF_UNKNOWN &&
6331 SelectPattern.Flavor != CurrentPattern.Flavor)
6332 return false;
6333 SelectPattern = CurrentPattern;
6334 AllCmpSingleUse &=
6335 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6336 return true;
6337 })) {
6338 switch (SelectPattern.Flavor) {
6339 case SPF_SMIN:
6340 return {Intrinsic::smin, AllCmpSingleUse};
6341 case SPF_UMIN:
6342 return {Intrinsic::umin, AllCmpSingleUse};
6343 case SPF_SMAX:
6344 return {Intrinsic::smax, AllCmpSingleUse};
6345 case SPF_UMAX:
6346 return {Intrinsic::umax, AllCmpSingleUse};
6347 default:
6348 llvm_unreachable("unexpected select pattern flavor");
6349 }
6350 }
6351 return {Intrinsic::not_intrinsic, false};
6352 }
6353
matchSimpleRecurrence(const PHINode * P,BinaryOperator * & BO,Value * & Start,Value * & Step)6354 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6355 Value *&Start, Value *&Step) {
6356 // Handle the case of a simple two-predecessor recurrence PHI.
6357 // There's a lot more that could theoretically be done here, but
6358 // this is sufficient to catch some interesting cases.
6359 if (P->getNumIncomingValues() != 2)
6360 return false;
6361
6362 for (unsigned i = 0; i != 2; ++i) {
6363 Value *L = P->getIncomingValue(i);
6364 Value *R = P->getIncomingValue(!i);
6365 Operator *LU = dyn_cast<Operator>(L);
6366 if (!LU)
6367 continue;
6368 unsigned Opcode = LU->getOpcode();
6369
6370 switch (Opcode) {
6371 default:
6372 continue;
6373 // TODO: Expand list -- xor, div, gep, uaddo, etc..
6374 case Instruction::LShr:
6375 case Instruction::AShr:
6376 case Instruction::Shl:
6377 case Instruction::Add:
6378 case Instruction::Sub:
6379 case Instruction::And:
6380 case Instruction::Or:
6381 case Instruction::Mul: {
6382 Value *LL = LU->getOperand(0);
6383 Value *LR = LU->getOperand(1);
6384 // Find a recurrence.
6385 if (LL == P)
6386 L = LR;
6387 else if (LR == P)
6388 L = LL;
6389 else
6390 continue; // Check for recurrence with L and R flipped.
6391
6392 break; // Match!
6393 }
6394 };
6395
6396 // We have matched a recurrence of the form:
6397 // %iv = [R, %entry], [%iv.next, %backedge]
6398 // %iv.next = binop %iv, L
6399 // OR
6400 // %iv = [R, %entry], [%iv.next, %backedge]
6401 // %iv.next = binop L, %iv
6402 BO = cast<BinaryOperator>(LU);
6403 Start = R;
6404 Step = L;
6405 return true;
6406 }
6407 return false;
6408 }
6409
matchSimpleRecurrence(const BinaryOperator * I,PHINode * & P,Value * & Start,Value * & Step)6410 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6411 Value *&Start, Value *&Step) {
6412 BinaryOperator *BO = nullptr;
6413 P = dyn_cast<PHINode>(I->getOperand(0));
6414 if (!P)
6415 P = dyn_cast<PHINode>(I->getOperand(1));
6416 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6417 }
6418
6419 /// Return true if "icmp Pred LHS RHS" is always true.
isTruePredicate(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const DataLayout & DL,unsigned Depth)6420 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6421 const Value *RHS, const DataLayout &DL,
6422 unsigned Depth) {
6423 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6424 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6425 return true;
6426
6427 switch (Pred) {
6428 default:
6429 return false;
6430
6431 case CmpInst::ICMP_SLE: {
6432 const APInt *C;
6433
6434 // LHS s<= LHS +_{nsw} C if C >= 0
6435 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6436 return !C->isNegative();
6437 return false;
6438 }
6439
6440 case CmpInst::ICMP_ULE: {
6441 const APInt *C;
6442
6443 // LHS u<= LHS +_{nuw} C for any C
6444 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6445 return true;
6446
6447 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6448 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6449 const Value *&X,
6450 const APInt *&CA, const APInt *&CB) {
6451 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6452 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6453 return true;
6454
6455 // If X & C == 0 then (X | C) == X +_{nuw} C
6456 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6457 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6458 KnownBits Known(CA->getBitWidth());
6459 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6460 /*CxtI*/ nullptr, /*DT*/ nullptr);
6461 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6462 return true;
6463 }
6464
6465 return false;
6466 };
6467
6468 const Value *X;
6469 const APInt *CLHS, *CRHS;
6470 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6471 return CLHS->ule(*CRHS);
6472
6473 return false;
6474 }
6475 }
6476 }
6477
6478 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6479 /// ALHS ARHS" is true. Otherwise, return None.
6480 static Optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred,const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,const DataLayout & DL,unsigned Depth)6481 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6482 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6483 const DataLayout &DL, unsigned Depth) {
6484 switch (Pred) {
6485 default:
6486 return None;
6487
6488 case CmpInst::ICMP_SLT:
6489 case CmpInst::ICMP_SLE:
6490 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6491 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6492 return true;
6493 return None;
6494
6495 case CmpInst::ICMP_ULT:
6496 case CmpInst::ICMP_ULE:
6497 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6498 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6499 return true;
6500 return None;
6501 }
6502 }
6503
6504 /// Return true if the operands of the two compares match. IsSwappedOps is true
6505 /// when the operands match, but are swapped.
isMatchingOps(const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,bool & IsSwappedOps)6506 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6507 const Value *BLHS, const Value *BRHS,
6508 bool &IsSwappedOps) {
6509
6510 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6511 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6512 return IsMatchingOps || IsSwappedOps;
6513 }
6514
6515 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6516 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6517 /// Otherwise, return None if we can't infer anything.
isImpliedCondMatchingOperands(CmpInst::Predicate APred,CmpInst::Predicate BPred,bool AreSwappedOps)6518 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6519 CmpInst::Predicate BPred,
6520 bool AreSwappedOps) {
6521 // Canonicalize the predicate as if the operands were not commuted.
6522 if (AreSwappedOps)
6523 BPred = ICmpInst::getSwappedPredicate(BPred);
6524
6525 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6526 return true;
6527 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6528 return false;
6529
6530 return None;
6531 }
6532
6533 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6534 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6535 /// Otherwise, return None if we can't infer anything.
6536 static Optional<bool>
isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,const ConstantInt * C1,CmpInst::Predicate BPred,const ConstantInt * C2)6537 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6538 const ConstantInt *C1,
6539 CmpInst::Predicate BPred,
6540 const ConstantInt *C2) {
6541 ConstantRange DomCR =
6542 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6543 ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2->getValue());
6544 ConstantRange Intersection = DomCR.intersectWith(CR);
6545 ConstantRange Difference = DomCR.difference(CR);
6546 if (Intersection.isEmptySet())
6547 return false;
6548 if (Difference.isEmptySet())
6549 return true;
6550 return None;
6551 }
6552
6553 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6554 /// false. Otherwise, return None if we can't infer anything.
isImpliedCondICmps(const ICmpInst * LHS,CmpInst::Predicate BPred,const Value * BLHS,const Value * BRHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6555 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6556 CmpInst::Predicate BPred,
6557 const Value *BLHS, const Value *BRHS,
6558 const DataLayout &DL, bool LHSIsTrue,
6559 unsigned Depth) {
6560 Value *ALHS = LHS->getOperand(0);
6561 Value *ARHS = LHS->getOperand(1);
6562
6563 // The rest of the logic assumes the LHS condition is true. If that's not the
6564 // case, invert the predicate to make it so.
6565 CmpInst::Predicate APred =
6566 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6567
6568 // Can we infer anything when the two compares have matching operands?
6569 bool AreSwappedOps;
6570 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6571 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6572 APred, BPred, AreSwappedOps))
6573 return Implication;
6574 // No amount of additional analysis will infer the second condition, so
6575 // early exit.
6576 return None;
6577 }
6578
6579 // Can we infer anything when the LHS operands match and the RHS operands are
6580 // constants (not necessarily matching)?
6581 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6582 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6583 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6584 return Implication;
6585 // No amount of additional analysis will infer the second condition, so
6586 // early exit.
6587 return None;
6588 }
6589
6590 if (APred == BPred)
6591 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6592 return None;
6593 }
6594
6595 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6596 /// false. Otherwise, return None if we can't infer anything. We expect the
6597 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6598 static Optional<bool>
isImpliedCondAndOr(const Instruction * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6599 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6600 const Value *RHSOp0, const Value *RHSOp1,
6601 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6602 // The LHS must be an 'or', 'and', or a 'select' instruction.
6603 assert((LHS->getOpcode() == Instruction::And ||
6604 LHS->getOpcode() == Instruction::Or ||
6605 LHS->getOpcode() == Instruction::Select) &&
6606 "Expected LHS to be 'and', 'or', or 'select'.");
6607
6608 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6609
6610 // If the result of an 'or' is false, then we know both legs of the 'or' are
6611 // false. Similarly, if the result of an 'and' is true, then we know both
6612 // legs of the 'and' are true.
6613 const Value *ALHS, *ARHS;
6614 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6615 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6616 // FIXME: Make this non-recursion.
6617 if (Optional<bool> Implication = isImpliedCondition(
6618 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6619 return Implication;
6620 if (Optional<bool> Implication = isImpliedCondition(
6621 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6622 return Implication;
6623 return None;
6624 }
6625 return None;
6626 }
6627
6628 Optional<bool>
isImpliedCondition(const Value * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6629 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6630 const Value *RHSOp0, const Value *RHSOp1,
6631 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6632 // Bail out when we hit the limit.
6633 if (Depth == MaxAnalysisRecursionDepth)
6634 return None;
6635
6636 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6637 // example.
6638 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6639 return None;
6640
6641 Type *OpTy = LHS->getType();
6642 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6643
6644 // FIXME: Extending the code below to handle vectors.
6645 if (OpTy->isVectorTy())
6646 return None;
6647
6648 assert(OpTy->isIntegerTy(1) && "implied by above");
6649
6650 // Both LHS and RHS are icmps.
6651 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6652 if (LHSCmp)
6653 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6654 Depth);
6655
6656 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
6657 /// the RHS to be an icmp.
6658 /// FIXME: Add support for and/or/select on the RHS.
6659 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6660 if ((LHSI->getOpcode() == Instruction::And ||
6661 LHSI->getOpcode() == Instruction::Or ||
6662 LHSI->getOpcode() == Instruction::Select))
6663 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6664 Depth);
6665 }
6666 return None;
6667 }
6668
isImpliedCondition(const Value * LHS,const Value * RHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6669 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6670 const DataLayout &DL, bool LHSIsTrue,
6671 unsigned Depth) {
6672 // LHS ==> RHS by definition
6673 if (LHS == RHS)
6674 return LHSIsTrue;
6675
6676 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6677 if (RHSCmp)
6678 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6679 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6680 LHSIsTrue, Depth);
6681 return None;
6682 }
6683
6684 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6685 // condition dominating ContextI or nullptr, if no condition is found.
6686 static std::pair<Value *, bool>
getDomPredecessorCondition(const Instruction * ContextI)6687 getDomPredecessorCondition(const Instruction *ContextI) {
6688 if (!ContextI || !ContextI->getParent())
6689 return {nullptr, false};
6690
6691 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6692 // dominator tree (eg, from a SimplifyQuery) instead?
6693 const BasicBlock *ContextBB = ContextI->getParent();
6694 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6695 if (!PredBB)
6696 return {nullptr, false};
6697
6698 // We need a conditional branch in the predecessor.
6699 Value *PredCond;
6700 BasicBlock *TrueBB, *FalseBB;
6701 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6702 return {nullptr, false};
6703
6704 // The branch should get simplified. Don't bother simplifying this condition.
6705 if (TrueBB == FalseBB)
6706 return {nullptr, false};
6707
6708 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6709 "Predecessor block does not point to successor?");
6710
6711 // Is this condition implied by the predecessor condition?
6712 return {PredCond, TrueBB == ContextBB};
6713 }
6714
isImpliedByDomCondition(const Value * Cond,const Instruction * ContextI,const DataLayout & DL)6715 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6716 const Instruction *ContextI,
6717 const DataLayout &DL) {
6718 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6719 auto PredCond = getDomPredecessorCondition(ContextI);
6720 if (PredCond.first)
6721 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6722 return None;
6723 }
6724
isImpliedByDomCondition(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const Instruction * ContextI,const DataLayout & DL)6725 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6726 const Value *LHS, const Value *RHS,
6727 const Instruction *ContextI,
6728 const DataLayout &DL) {
6729 auto PredCond = getDomPredecessorCondition(ContextI);
6730 if (PredCond.first)
6731 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6732 PredCond.second);
6733 return None;
6734 }
6735
setLimitsForBinOp(const BinaryOperator & BO,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6736 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6737 APInt &Upper, const InstrInfoQuery &IIQ) {
6738 unsigned Width = Lower.getBitWidth();
6739 const APInt *C;
6740 switch (BO.getOpcode()) {
6741 case Instruction::Add:
6742 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
6743 // FIXME: If we have both nuw and nsw, we should reduce the range further.
6744 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6745 // 'add nuw x, C' produces [C, UINT_MAX].
6746 Lower = *C;
6747 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6748 if (C->isNegative()) {
6749 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6750 Lower = APInt::getSignedMinValue(Width);
6751 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6752 } else {
6753 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6754 Lower = APInt::getSignedMinValue(Width) + *C;
6755 Upper = APInt::getSignedMaxValue(Width) + 1;
6756 }
6757 }
6758 }
6759 break;
6760
6761 case Instruction::And:
6762 if (match(BO.getOperand(1), m_APInt(C)))
6763 // 'and x, C' produces [0, C].
6764 Upper = *C + 1;
6765 break;
6766
6767 case Instruction::Or:
6768 if (match(BO.getOperand(1), m_APInt(C)))
6769 // 'or x, C' produces [C, UINT_MAX].
6770 Lower = *C;
6771 break;
6772
6773 case Instruction::AShr:
6774 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6775 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6776 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6777 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6778 } else if (match(BO.getOperand(0), m_APInt(C))) {
6779 unsigned ShiftAmount = Width - 1;
6780 if (!C->isZero() && IIQ.isExact(&BO))
6781 ShiftAmount = C->countTrailingZeros();
6782 if (C->isNegative()) {
6783 // 'ashr C, x' produces [C, C >> (Width-1)]
6784 Lower = *C;
6785 Upper = C->ashr(ShiftAmount) + 1;
6786 } else {
6787 // 'ashr C, x' produces [C >> (Width-1), C]
6788 Lower = C->ashr(ShiftAmount);
6789 Upper = *C + 1;
6790 }
6791 }
6792 break;
6793
6794 case Instruction::LShr:
6795 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6796 // 'lshr x, C' produces [0, UINT_MAX >> C].
6797 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
6798 } else if (match(BO.getOperand(0), m_APInt(C))) {
6799 // 'lshr C, x' produces [C >> (Width-1), C].
6800 unsigned ShiftAmount = Width - 1;
6801 if (!C->isZero() && IIQ.isExact(&BO))
6802 ShiftAmount = C->countTrailingZeros();
6803 Lower = C->lshr(ShiftAmount);
6804 Upper = *C + 1;
6805 }
6806 break;
6807
6808 case Instruction::Shl:
6809 if (match(BO.getOperand(0), m_APInt(C))) {
6810 if (IIQ.hasNoUnsignedWrap(&BO)) {
6811 // 'shl nuw C, x' produces [C, C << CLZ(C)]
6812 Lower = *C;
6813 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6814 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6815 if (C->isNegative()) {
6816 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6817 unsigned ShiftAmount = C->countLeadingOnes() - 1;
6818 Lower = C->shl(ShiftAmount);
6819 Upper = *C + 1;
6820 } else {
6821 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6822 unsigned ShiftAmount = C->countLeadingZeros() - 1;
6823 Lower = *C;
6824 Upper = C->shl(ShiftAmount) + 1;
6825 }
6826 }
6827 }
6828 break;
6829
6830 case Instruction::SDiv:
6831 if (match(BO.getOperand(1), m_APInt(C))) {
6832 APInt IntMin = APInt::getSignedMinValue(Width);
6833 APInt IntMax = APInt::getSignedMaxValue(Width);
6834 if (C->isAllOnes()) {
6835 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6836 // where C != -1 and C != 0 and C != 1
6837 Lower = IntMin + 1;
6838 Upper = IntMax + 1;
6839 } else if (C->countLeadingZeros() < Width - 1) {
6840 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6841 // where C != -1 and C != 0 and C != 1
6842 Lower = IntMin.sdiv(*C);
6843 Upper = IntMax.sdiv(*C);
6844 if (Lower.sgt(Upper))
6845 std::swap(Lower, Upper);
6846 Upper = Upper + 1;
6847 assert(Upper != Lower && "Upper part of range has wrapped!");
6848 }
6849 } else if (match(BO.getOperand(0), m_APInt(C))) {
6850 if (C->isMinSignedValue()) {
6851 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6852 Lower = *C;
6853 Upper = Lower.lshr(1) + 1;
6854 } else {
6855 // 'sdiv C, x' produces [-|C|, |C|].
6856 Upper = C->abs() + 1;
6857 Lower = (-Upper) + 1;
6858 }
6859 }
6860 break;
6861
6862 case Instruction::UDiv:
6863 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
6864 // 'udiv x, C' produces [0, UINT_MAX / C].
6865 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6866 } else if (match(BO.getOperand(0), m_APInt(C))) {
6867 // 'udiv C, x' produces [0, C].
6868 Upper = *C + 1;
6869 }
6870 break;
6871
6872 case Instruction::SRem:
6873 if (match(BO.getOperand(1), m_APInt(C))) {
6874 // 'srem x, C' produces (-|C|, |C|).
6875 Upper = C->abs();
6876 Lower = (-Upper) + 1;
6877 }
6878 break;
6879
6880 case Instruction::URem:
6881 if (match(BO.getOperand(1), m_APInt(C)))
6882 // 'urem x, C' produces [0, C).
6883 Upper = *C;
6884 break;
6885
6886 default:
6887 break;
6888 }
6889 }
6890
setLimitsForIntrinsic(const IntrinsicInst & II,APInt & Lower,APInt & Upper)6891 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6892 APInt &Upper) {
6893 unsigned Width = Lower.getBitWidth();
6894 const APInt *C;
6895 switch (II.getIntrinsicID()) {
6896 case Intrinsic::ctpop:
6897 case Intrinsic::ctlz:
6898 case Intrinsic::cttz:
6899 // Maximum of set/clear bits is the bit width.
6900 assert(Lower == 0 && "Expected lower bound to be zero");
6901 Upper = Width + 1;
6902 break;
6903 case Intrinsic::uadd_sat:
6904 // uadd.sat(x, C) produces [C, UINT_MAX].
6905 if (match(II.getOperand(0), m_APInt(C)) ||
6906 match(II.getOperand(1), m_APInt(C)))
6907 Lower = *C;
6908 break;
6909 case Intrinsic::sadd_sat:
6910 if (match(II.getOperand(0), m_APInt(C)) ||
6911 match(II.getOperand(1), m_APInt(C))) {
6912 if (C->isNegative()) {
6913 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6914 Lower = APInt::getSignedMinValue(Width);
6915 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6916 } else {
6917 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6918 Lower = APInt::getSignedMinValue(Width) + *C;
6919 Upper = APInt::getSignedMaxValue(Width) + 1;
6920 }
6921 }
6922 break;
6923 case Intrinsic::usub_sat:
6924 // usub.sat(C, x) produces [0, C].
6925 if (match(II.getOperand(0), m_APInt(C)))
6926 Upper = *C + 1;
6927 // usub.sat(x, C) produces [0, UINT_MAX - C].
6928 else if (match(II.getOperand(1), m_APInt(C)))
6929 Upper = APInt::getMaxValue(Width) - *C + 1;
6930 break;
6931 case Intrinsic::ssub_sat:
6932 if (match(II.getOperand(0), m_APInt(C))) {
6933 if (C->isNegative()) {
6934 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6935 Lower = APInt::getSignedMinValue(Width);
6936 Upper = *C - APInt::getSignedMinValue(Width) + 1;
6937 } else {
6938 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6939 Lower = *C - APInt::getSignedMaxValue(Width);
6940 Upper = APInt::getSignedMaxValue(Width) + 1;
6941 }
6942 } else if (match(II.getOperand(1), m_APInt(C))) {
6943 if (C->isNegative()) {
6944 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6945 Lower = APInt::getSignedMinValue(Width) - *C;
6946 Upper = APInt::getSignedMaxValue(Width) + 1;
6947 } else {
6948 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6949 Lower = APInt::getSignedMinValue(Width);
6950 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6951 }
6952 }
6953 break;
6954 case Intrinsic::umin:
6955 case Intrinsic::umax:
6956 case Intrinsic::smin:
6957 case Intrinsic::smax:
6958 if (!match(II.getOperand(0), m_APInt(C)) &&
6959 !match(II.getOperand(1), m_APInt(C)))
6960 break;
6961
6962 switch (II.getIntrinsicID()) {
6963 case Intrinsic::umin:
6964 Upper = *C + 1;
6965 break;
6966 case Intrinsic::umax:
6967 Lower = *C;
6968 break;
6969 case Intrinsic::smin:
6970 Lower = APInt::getSignedMinValue(Width);
6971 Upper = *C + 1;
6972 break;
6973 case Intrinsic::smax:
6974 Lower = *C;
6975 Upper = APInt::getSignedMaxValue(Width) + 1;
6976 break;
6977 default:
6978 llvm_unreachable("Must be min/max intrinsic");
6979 }
6980 break;
6981 case Intrinsic::abs:
6982 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6983 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6984 if (match(II.getOperand(1), m_One()))
6985 Upper = APInt::getSignedMaxValue(Width) + 1;
6986 else
6987 Upper = APInt::getSignedMinValue(Width) + 1;
6988 break;
6989 default:
6990 break;
6991 }
6992 }
6993
setLimitsForSelectPattern(const SelectInst & SI,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6994 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6995 APInt &Upper, const InstrInfoQuery &IIQ) {
6996 const Value *LHS = nullptr, *RHS = nullptr;
6997 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6998 if (R.Flavor == SPF_UNKNOWN)
6999 return;
7000
7001 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
7002
7003 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
7004 // If the negation part of the abs (in RHS) has the NSW flag,
7005 // then the result of abs(X) is [0..SIGNED_MAX],
7006 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7007 Lower = APInt::getZero(BitWidth);
7008 if (match(RHS, m_Neg(m_Specific(LHS))) &&
7009 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
7010 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7011 else
7012 Upper = APInt::getSignedMinValue(BitWidth) + 1;
7013 return;
7014 }
7015
7016 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
7017 // The result of -abs(X) is <= 0.
7018 Lower = APInt::getSignedMinValue(BitWidth);
7019 Upper = APInt(BitWidth, 1);
7020 return;
7021 }
7022
7023 const APInt *C;
7024 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
7025 return;
7026
7027 switch (R.Flavor) {
7028 case SPF_UMIN:
7029 Upper = *C + 1;
7030 break;
7031 case SPF_UMAX:
7032 Lower = *C;
7033 break;
7034 case SPF_SMIN:
7035 Lower = APInt::getSignedMinValue(BitWidth);
7036 Upper = *C + 1;
7037 break;
7038 case SPF_SMAX:
7039 Lower = *C;
7040 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7041 break;
7042 default:
7043 break;
7044 }
7045 }
7046
computeConstantRange(const Value * V,bool UseInstrInfo,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)7047 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
7048 AssumptionCache *AC,
7049 const Instruction *CtxI,
7050 const DominatorTree *DT,
7051 unsigned Depth) {
7052 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
7053
7054 if (Depth == MaxAnalysisRecursionDepth)
7055 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
7056
7057 const APInt *C;
7058 if (match(V, m_APInt(C)))
7059 return ConstantRange(*C);
7060
7061 InstrInfoQuery IIQ(UseInstrInfo);
7062 unsigned BitWidth = V->getType()->getScalarSizeInBits();
7063 APInt Lower = APInt(BitWidth, 0);
7064 APInt Upper = APInt(BitWidth, 0);
7065 if (auto *BO = dyn_cast<BinaryOperator>(V))
7066 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
7067 else if (auto *II = dyn_cast<IntrinsicInst>(V))
7068 setLimitsForIntrinsic(*II, Lower, Upper);
7069 else if (auto *SI = dyn_cast<SelectInst>(V))
7070 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
7071
7072 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
7073
7074 if (auto *I = dyn_cast<Instruction>(V))
7075 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
7076 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
7077
7078 if (CtxI && AC) {
7079 // Try to restrict the range based on information from assumptions.
7080 for (auto &AssumeVH : AC->assumptionsFor(V)) {
7081 if (!AssumeVH)
7082 continue;
7083 CallInst *I = cast<CallInst>(AssumeVH);
7084 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
7085 "Got assumption for the wrong function!");
7086 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
7087 "must be an assume intrinsic");
7088
7089 if (!isValidAssumeForContext(I, CtxI, DT))
7090 continue;
7091 Value *Arg = I->getArgOperand(0);
7092 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
7093 // Currently we just use information from comparisons.
7094 if (!Cmp || Cmp->getOperand(0) != V)
7095 continue;
7096 ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
7097 AC, I, DT, Depth + 1);
7098 CR = CR.intersectWith(
7099 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
7100 }
7101 }
7102
7103 return CR;
7104 }
7105
7106 static Optional<int64_t>
getOffsetFromIndex(const GEPOperator * GEP,unsigned Idx,const DataLayout & DL)7107 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
7108 // Skip over the first indices.
7109 gep_type_iterator GTI = gep_type_begin(GEP);
7110 for (unsigned i = 1; i != Idx; ++i, ++GTI)
7111 /*skip along*/;
7112
7113 // Compute the offset implied by the rest of the indices.
7114 int64_t Offset = 0;
7115 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7116 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7117 if (!OpC)
7118 return None;
7119 if (OpC->isZero())
7120 continue; // No offset.
7121
7122 // Handle struct indices, which add their field offset to the pointer.
7123 if (StructType *STy = GTI.getStructTypeOrNull()) {
7124 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7125 continue;
7126 }
7127
7128 // Otherwise, we have a sequential type like an array or fixed-length
7129 // vector. Multiply the index by the ElementSize.
7130 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7131 if (Size.isScalable())
7132 return None;
7133 Offset += Size.getFixedSize() * OpC->getSExtValue();
7134 }
7135
7136 return Offset;
7137 }
7138
isPointerOffset(const Value * Ptr1,const Value * Ptr2,const DataLayout & DL)7139 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
7140 const DataLayout &DL) {
7141 Ptr1 = Ptr1->stripPointerCasts();
7142 Ptr2 = Ptr2->stripPointerCasts();
7143
7144 // Handle the trivial case first.
7145 if (Ptr1 == Ptr2) {
7146 return 0;
7147 }
7148
7149 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7150 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7151
7152 // If one pointer is a GEP see if the GEP is a constant offset from the base,
7153 // as in "P" and "gep P, 1".
7154 // Also do this iteratively to handle the the following case:
7155 // Ptr_t1 = GEP Ptr1, c1
7156 // Ptr_t2 = GEP Ptr_t1, c2
7157 // Ptr2 = GEP Ptr_t2, c3
7158 // where we will return c1+c2+c3.
7159 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
7160 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
7161 // are the same, and return the difference between offsets.
7162 auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
7163 const Value *Ptr) -> Optional<int64_t> {
7164 const GEPOperator *GEP_T = GEP;
7165 int64_t OffsetVal = 0;
7166 bool HasSameBase = false;
7167 while (GEP_T) {
7168 auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
7169 if (!Offset)
7170 return None;
7171 OffsetVal += *Offset;
7172 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
7173 if (Op0 == Ptr) {
7174 HasSameBase = true;
7175 break;
7176 }
7177 GEP_T = dyn_cast<GEPOperator>(Op0);
7178 }
7179 if (!HasSameBase)
7180 return None;
7181 return OffsetVal;
7182 };
7183
7184 if (GEP1) {
7185 auto Offset = getOffsetFromBase(GEP1, Ptr2);
7186 if (Offset)
7187 return -*Offset;
7188 }
7189 if (GEP2) {
7190 auto Offset = getOffsetFromBase(GEP2, Ptr1);
7191 if (Offset)
7192 return Offset;
7193 }
7194
7195 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7196 // base. After that base, they may have some number of common (and
7197 // potentially variable) indices. After that they handle some constant
7198 // offset, which determines their offset from each other. At this point, we
7199 // handle no other case.
7200 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
7201 return None;
7202
7203 // Skip any common indices and track the GEP types.
7204 unsigned Idx = 1;
7205 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7206 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7207 break;
7208
7209 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
7210 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
7211 if (!Offset1 || !Offset2)
7212 return None;
7213 return *Offset2 - *Offset1;
7214 }
7215