1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83 cl::Hidden, cl::init(20));
84
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
getBitWidth(Type * Ty,const DataLayout & DL)87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88 if (unsigned BitWidth = Ty->getScalarSizeInBits())
89 return BitWidth;
90
91 return DL.getPointerTypeSizeInBits(Ty);
92 }
93
94 namespace {
95
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101 const DataLayout &DL;
102 AssumptionCache *AC;
103 const Instruction *CxtI;
104 const DominatorTree *DT;
105
106 // Unlike the other analyses, this may be a nullptr because not all clients
107 // provide it currently.
108 OptimizationRemarkEmitter *ORE;
109
110 /// Set of assumptions that should be excluded from further queries.
111 /// This is because of the potential for mutual recursion to cause
112 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113 /// classic case of this is assume(x = y), which will attempt to determine
114 /// bits in x from bits in y, which will attempt to determine bits in y from
115 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117 /// (all of which can call computeKnownBits), and so on.
118 std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119
120 /// If true, it is safe to use metadata during simplification.
121 InstrInfoQuery IIQ;
122
123 unsigned NumExcluded = 0;
124
Query__anonaab7a92b0111::Query125 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126 const DominatorTree *DT, bool UseInstrInfo,
127 OptimizationRemarkEmitter *ORE = nullptr)
128 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129
Query__anonaab7a92b0111::Query130 Query(const Query &Q, const Value *NewExcl)
131 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132 NumExcluded(Q.NumExcluded) {
133 Excluded = Q.Excluded;
134 Excluded[NumExcluded++] = NewExcl;
135 assert(NumExcluded <= Excluded.size());
136 }
137
isExcluded__anonaab7a92b0111::Query138 bool isExcluded(const Value *Value) const {
139 if (NumExcluded == 0)
140 return false;
141 auto End = Excluded.begin() + NumExcluded;
142 return std::find(Excluded.begin(), End, Value) != End;
143 }
144 };
145
146 } // end anonymous namespace
147
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
safeCxtI(const Value * V,const Instruction * CxtI)150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151 // If we've been provided with a context instruction, then use that (provided
152 // it has been inserted).
153 if (CxtI && CxtI->getParent())
154 return CxtI;
155
156 // If the value is really an already-inserted instruction, then use that.
157 CxtI = dyn_cast<Instruction>(V);
158 if (CxtI && CxtI->getParent())
159 return CxtI;
160
161 return nullptr;
162 }
163
getShuffleDemandedElts(const ShuffleVectorInst * Shuf,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)164 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
165 const APInt &DemandedElts,
166 APInt &DemandedLHS, APInt &DemandedRHS) {
167 // The length of scalable vectors is unknown at compile time, thus we
168 // cannot check their values
169 if (isa<ScalableVectorType>(Shuf->getType()))
170 return false;
171
172 int NumElts =
173 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
174 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
175 DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
176 if (DemandedElts.isNullValue())
177 return true;
178 // Simple case of a shuffle with zeroinitializer.
179 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
180 DemandedLHS.setBit(0);
181 return true;
182 }
183 for (int i = 0; i != NumMaskElts; ++i) {
184 if (!DemandedElts[i])
185 continue;
186 int M = Shuf->getMaskValue(i);
187 assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
188
189 // For undef elements, we don't know anything about the common state of
190 // the shuffle result.
191 if (M == -1)
192 return false;
193 if (M < NumElts)
194 DemandedLHS.setBit(M % NumElts);
195 else
196 DemandedRHS.setBit(M % NumElts);
197 }
198
199 return true;
200 }
201
202 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
203 KnownBits &Known, unsigned Depth, const Query &Q);
204
computeKnownBits(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)205 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
206 const Query &Q) {
207 // FIXME: We currently have no way to represent the DemandedElts of a scalable
208 // vector
209 if (isa<ScalableVectorType>(V->getType())) {
210 Known.resetAll();
211 return;
212 }
213
214 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
215 APInt DemandedElts =
216 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
217 computeKnownBits(V, DemandedElts, Known, Depth, Q);
218 }
219
computeKnownBits(const Value * V,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)220 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
221 const DataLayout &DL, unsigned Depth,
222 AssumptionCache *AC, const Instruction *CxtI,
223 const DominatorTree *DT,
224 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
225 ::computeKnownBits(V, Known, Depth,
226 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
227 }
228
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)229 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
230 KnownBits &Known, const DataLayout &DL,
231 unsigned Depth, AssumptionCache *AC,
232 const Instruction *CxtI, const DominatorTree *DT,
233 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
234 ::computeKnownBits(V, DemandedElts, Known, Depth,
235 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
236 }
237
238 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
239 unsigned Depth, const Query &Q);
240
241 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
242 const Query &Q);
243
computeKnownBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)244 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
245 unsigned Depth, AssumptionCache *AC,
246 const Instruction *CxtI,
247 const DominatorTree *DT,
248 OptimizationRemarkEmitter *ORE,
249 bool UseInstrInfo) {
250 return ::computeKnownBits(
251 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
252 }
253
computeKnownBits(const Value * V,const APInt & DemandedElts,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)254 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
255 const DataLayout &DL, unsigned Depth,
256 AssumptionCache *AC, const Instruction *CxtI,
257 const DominatorTree *DT,
258 OptimizationRemarkEmitter *ORE,
259 bool UseInstrInfo) {
260 return ::computeKnownBits(
261 V, DemandedElts, Depth,
262 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
263 }
264
haveNoCommonBitsSet(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)265 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
266 const DataLayout &DL, AssumptionCache *AC,
267 const Instruction *CxtI, const DominatorTree *DT,
268 bool UseInstrInfo) {
269 assert(LHS->getType() == RHS->getType() &&
270 "LHS and RHS should have the same type");
271 assert(LHS->getType()->isIntOrIntVectorTy() &&
272 "LHS and RHS should be integers");
273 // Look for an inverted mask: (X & ~M) op (Y & M).
274 Value *M;
275 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
276 match(RHS, m_c_And(m_Specific(M), m_Value())))
277 return true;
278 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279 match(LHS, m_c_And(m_Specific(M), m_Value())))
280 return true;
281 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
282 KnownBits LHSKnown(IT->getBitWidth());
283 KnownBits RHSKnown(IT->getBitWidth());
284 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
285 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
286 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
287 }
288
isOnlyUsedInZeroEqualityComparison(const Instruction * CxtI)289 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
290 for (const User *U : CxtI->users()) {
291 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
292 if (IC->isEquality())
293 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
294 if (C->isNullValue())
295 continue;
296 return false;
297 }
298 return true;
299 }
300
301 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
302 const Query &Q);
303
isKnownToBeAPowerOfTwo(const Value * V,const DataLayout & DL,bool OrZero,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)304 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
305 bool OrZero, unsigned Depth,
306 AssumptionCache *AC, const Instruction *CxtI,
307 const DominatorTree *DT, bool UseInstrInfo) {
308 return ::isKnownToBeAPowerOfTwo(
309 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
310 }
311
312 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
313 unsigned Depth, const Query &Q);
314
315 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
316
isKnownNonZero(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)317 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
318 AssumptionCache *AC, const Instruction *CxtI,
319 const DominatorTree *DT, bool UseInstrInfo) {
320 return ::isKnownNonZero(V, Depth,
321 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
322 }
323
isKnownNonNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)324 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
325 unsigned Depth, AssumptionCache *AC,
326 const Instruction *CxtI, const DominatorTree *DT,
327 bool UseInstrInfo) {
328 KnownBits Known =
329 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
330 return Known.isNonNegative();
331 }
332
isKnownPositive(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)333 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
334 AssumptionCache *AC, const Instruction *CxtI,
335 const DominatorTree *DT, bool UseInstrInfo) {
336 if (auto *CI = dyn_cast<ConstantInt>(V))
337 return CI->getValue().isStrictlyPositive();
338
339 // TODO: We'd doing two recursive queries here. We should factor this such
340 // that only a single query is needed.
341 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
342 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
343 }
344
isKnownNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)345 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
346 AssumptionCache *AC, const Instruction *CxtI,
347 const DominatorTree *DT, bool UseInstrInfo) {
348 KnownBits Known =
349 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
350 return Known.isNegative();
351 }
352
353 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
354 const Query &Q);
355
isKnownNonEqual(const Value * V1,const Value * V2,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)356 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
357 const DataLayout &DL, AssumptionCache *AC,
358 const Instruction *CxtI, const DominatorTree *DT,
359 bool UseInstrInfo) {
360 return ::isKnownNonEqual(V1, V2, 0,
361 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
362 UseInstrInfo, /*ORE=*/nullptr));
363 }
364
365 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
366 const Query &Q);
367
MaskedValueIsZero(const Value * V,const APInt & Mask,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)368 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
369 const DataLayout &DL, unsigned Depth,
370 AssumptionCache *AC, const Instruction *CxtI,
371 const DominatorTree *DT, bool UseInstrInfo) {
372 return ::MaskedValueIsZero(
373 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
374 }
375
376 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
377 unsigned Depth, const Query &Q);
378
ComputeNumSignBits(const Value * V,unsigned Depth,const Query & Q)379 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
380 const Query &Q) {
381 // FIXME: We currently have no way to represent the DemandedElts of a scalable
382 // vector
383 if (isa<ScalableVectorType>(V->getType()))
384 return 1;
385
386 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
387 APInt DemandedElts =
388 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
389 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
390 }
391
ComputeNumSignBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)392 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
393 unsigned Depth, AssumptionCache *AC,
394 const Instruction *CxtI,
395 const DominatorTree *DT, bool UseInstrInfo) {
396 return ::ComputeNumSignBits(
397 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
398 }
399
computeKnownBitsAddSub(bool Add,const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & KnownOut,KnownBits & Known2,unsigned Depth,const Query & Q)400 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
401 bool NSW, const APInt &DemandedElts,
402 KnownBits &KnownOut, KnownBits &Known2,
403 unsigned Depth, const Query &Q) {
404 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
405
406 // If one operand is unknown and we have no nowrap information,
407 // the result will be unknown independently of the second operand.
408 if (KnownOut.isUnknown() && !NSW)
409 return;
410
411 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
412 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
413 }
414
computeKnownBitsMul(const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q)415 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
416 const APInt &DemandedElts, KnownBits &Known,
417 KnownBits &Known2, unsigned Depth,
418 const Query &Q) {
419 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
420 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
421
422 bool isKnownNegative = false;
423 bool isKnownNonNegative = false;
424 // If the multiplication is known not to overflow, compute the sign bit.
425 if (NSW) {
426 if (Op0 == Op1) {
427 // The product of a number with itself is non-negative.
428 isKnownNonNegative = true;
429 } else {
430 bool isKnownNonNegativeOp1 = Known.isNonNegative();
431 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
432 bool isKnownNegativeOp1 = Known.isNegative();
433 bool isKnownNegativeOp0 = Known2.isNegative();
434 // The product of two numbers with the same sign is non-negative.
435 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
436 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
437 // The product of a negative number and a non-negative number is either
438 // negative or zero.
439 if (!isKnownNonNegative)
440 isKnownNegative =
441 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
442 Known2.isNonZero()) ||
443 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
444 }
445 }
446
447 Known = KnownBits::computeForMul(Known, Known2);
448
449 // Only make use of no-wrap flags if we failed to compute the sign bit
450 // directly. This matters if the multiplication always overflows, in
451 // which case we prefer to follow the result of the direct computation,
452 // though as the program is invoking undefined behaviour we can choose
453 // whatever we like here.
454 if (isKnownNonNegative && !Known.isNegative())
455 Known.makeNonNegative();
456 else if (isKnownNegative && !Known.isNonNegative())
457 Known.makeNegative();
458 }
459
computeKnownBitsFromRangeMetadata(const MDNode & Ranges,KnownBits & Known)460 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
461 KnownBits &Known) {
462 unsigned BitWidth = Known.getBitWidth();
463 unsigned NumRanges = Ranges.getNumOperands() / 2;
464 assert(NumRanges >= 1);
465
466 Known.Zero.setAllBits();
467 Known.One.setAllBits();
468
469 for (unsigned i = 0; i < NumRanges; ++i) {
470 ConstantInt *Lower =
471 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
472 ConstantInt *Upper =
473 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
474 ConstantRange Range(Lower->getValue(), Upper->getValue());
475
476 // The first CommonPrefixBits of all values in Range are equal.
477 unsigned CommonPrefixBits =
478 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
479 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
480 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
481 Known.One &= UnsignedMax & Mask;
482 Known.Zero &= ~UnsignedMax & Mask;
483 }
484 }
485
isEphemeralValueOf(const Instruction * I,const Value * E)486 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
487 SmallVector<const Value *, 16> WorkSet(1, I);
488 SmallPtrSet<const Value *, 32> Visited;
489 SmallPtrSet<const Value *, 16> EphValues;
490
491 // The instruction defining an assumption's condition itself is always
492 // considered ephemeral to that assumption (even if it has other
493 // non-ephemeral users). See r246696's test case for an example.
494 if (is_contained(I->operands(), E))
495 return true;
496
497 while (!WorkSet.empty()) {
498 const Value *V = WorkSet.pop_back_val();
499 if (!Visited.insert(V).second)
500 continue;
501
502 // If all uses of this value are ephemeral, then so is this value.
503 if (llvm::all_of(V->users(), [&](const User *U) {
504 return EphValues.count(U);
505 })) {
506 if (V == E)
507 return true;
508
509 if (V == I || isSafeToSpeculativelyExecute(V)) {
510 EphValues.insert(V);
511 if (const User *U = dyn_cast<User>(V))
512 append_range(WorkSet, U->operands());
513 }
514 }
515 }
516
517 return false;
518 }
519
520 // Is this an intrinsic that cannot be speculated but also cannot trap?
isAssumeLikeIntrinsic(const Instruction * I)521 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
522 if (const CallInst *CI = dyn_cast<CallInst>(I))
523 if (Function *F = CI->getCalledFunction())
524 switch (F->getIntrinsicID()) {
525 default: break;
526 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
527 case Intrinsic::assume:
528 case Intrinsic::sideeffect:
529 case Intrinsic::pseudoprobe:
530 case Intrinsic::dbg_declare:
531 case Intrinsic::dbg_value:
532 case Intrinsic::dbg_label:
533 case Intrinsic::invariant_start:
534 case Intrinsic::invariant_end:
535 case Intrinsic::lifetime_start:
536 case Intrinsic::lifetime_end:
537 case Intrinsic::experimental_noalias_scope_decl:
538 case Intrinsic::objectsize:
539 case Intrinsic::ptr_annotation:
540 case Intrinsic::var_annotation:
541 return true;
542 }
543
544 return false;
545 }
546
isValidAssumeForContext(const Instruction * Inv,const Instruction * CxtI,const DominatorTree * DT)547 bool llvm::isValidAssumeForContext(const Instruction *Inv,
548 const Instruction *CxtI,
549 const DominatorTree *DT) {
550 // There are two restrictions on the use of an assume:
551 // 1. The assume must dominate the context (or the control flow must
552 // reach the assume whenever it reaches the context).
553 // 2. The context must not be in the assume's set of ephemeral values
554 // (otherwise we will use the assume to prove that the condition
555 // feeding the assume is trivially true, thus causing the removal of
556 // the assume).
557
558 if (Inv->getParent() == CxtI->getParent()) {
559 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
560 // in the BB.
561 if (Inv->comesBefore(CxtI))
562 return true;
563
564 // Don't let an assume affect itself - this would cause the problems
565 // `isEphemeralValueOf` is trying to prevent, and it would also make
566 // the loop below go out of bounds.
567 if (Inv == CxtI)
568 return false;
569
570 // The context comes first, but they're both in the same block.
571 // Make sure there is nothing in between that might interrupt
572 // the control flow, not even CxtI itself.
573 for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
574 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
575 return false;
576
577 return !isEphemeralValueOf(Inv, CxtI);
578 }
579
580 // Inv and CxtI are in different blocks.
581 if (DT) {
582 if (DT->dominates(Inv, CxtI))
583 return true;
584 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
585 // We don't have a DT, but this trivially dominates.
586 return true;
587 }
588
589 return false;
590 }
591
cmpExcludesZero(CmpInst::Predicate Pred,const Value * RHS)592 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
593 // v u> y implies v != 0.
594 if (Pred == ICmpInst::ICMP_UGT)
595 return true;
596
597 // Special-case v != 0 to also handle v != null.
598 if (Pred == ICmpInst::ICMP_NE)
599 return match(RHS, m_Zero());
600
601 // All other predicates - rely on generic ConstantRange handling.
602 const APInt *C;
603 if (!match(RHS, m_APInt(C)))
604 return false;
605
606 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
607 return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
608 }
609
isKnownNonZeroFromAssume(const Value * V,const Query & Q)610 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
611 // Use of assumptions is context-sensitive. If we don't have a context, we
612 // cannot use them!
613 if (!Q.AC || !Q.CxtI)
614 return false;
615
616 if (Q.CxtI && V->getType()->isPointerTy()) {
617 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
618 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
619 V->getType()->getPointerAddressSpace()))
620 AttrKinds.push_back(Attribute::Dereferenceable);
621
622 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
623 return true;
624 }
625
626 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
627 if (!AssumeVH)
628 continue;
629 CallInst *I = cast<CallInst>(AssumeVH);
630 assert(I->getFunction() == Q.CxtI->getFunction() &&
631 "Got assumption for the wrong function!");
632 if (Q.isExcluded(I))
633 continue;
634
635 // Warning: This loop can end up being somewhat performance sensitive.
636 // We're running this loop for once for each value queried resulting in a
637 // runtime of ~O(#assumes * #values).
638
639 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
640 "must be an assume intrinsic");
641
642 Value *RHS;
643 CmpInst::Predicate Pred;
644 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
645 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
646 return false;
647
648 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
649 return true;
650 }
651
652 return false;
653 }
654
computeKnownBitsFromAssume(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)655 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
656 unsigned Depth, const Query &Q) {
657 // Use of assumptions is context-sensitive. If we don't have a context, we
658 // cannot use them!
659 if (!Q.AC || !Q.CxtI)
660 return;
661
662 unsigned BitWidth = Known.getBitWidth();
663
664 // Refine Known set if the pointer alignment is set by assume bundles.
665 if (V->getType()->isPointerTy()) {
666 if (RetainedKnowledge RK = getKnowledgeValidInContext(
667 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
668 Known.Zero.setLowBits(Log2_32(RK.ArgValue));
669 }
670 }
671
672 // Note that the patterns below need to be kept in sync with the code
673 // in AssumptionCache::updateAffectedValues.
674
675 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
676 if (!AssumeVH)
677 continue;
678 CallInst *I = cast<CallInst>(AssumeVH);
679 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
680 "Got assumption for the wrong function!");
681 if (Q.isExcluded(I))
682 continue;
683
684 // Warning: This loop can end up being somewhat performance sensitive.
685 // We're running this loop for once for each value queried resulting in a
686 // runtime of ~O(#assumes * #values).
687
688 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
689 "must be an assume intrinsic");
690
691 Value *Arg = I->getArgOperand(0);
692
693 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694 assert(BitWidth == 1 && "assume operand is not i1?");
695 Known.setAllOnes();
696 return;
697 }
698 if (match(Arg, m_Not(m_Specific(V))) &&
699 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
700 assert(BitWidth == 1 && "assume operand is not i1?");
701 Known.setAllZero();
702 return;
703 }
704
705 // The remaining tests are all recursive, so bail out if we hit the limit.
706 if (Depth == MaxAnalysisRecursionDepth)
707 continue;
708
709 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
710 if (!Cmp)
711 continue;
712
713 // Note that ptrtoint may change the bitwidth.
714 Value *A, *B;
715 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
716
717 CmpInst::Predicate Pred;
718 uint64_t C;
719 switch (Cmp->getPredicate()) {
720 default:
721 break;
722 case ICmpInst::ICMP_EQ:
723 // assume(v = a)
724 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
725 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
726 KnownBits RHSKnown =
727 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
728 Known.Zero |= RHSKnown.Zero;
729 Known.One |= RHSKnown.One;
730 // assume(v & b = a)
731 } else if (match(Cmp,
732 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
733 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
734 KnownBits RHSKnown =
735 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
736 KnownBits MaskKnown =
737 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
738
739 // For those bits in the mask that are known to be one, we can propagate
740 // known bits from the RHS to V.
741 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
742 Known.One |= RHSKnown.One & MaskKnown.One;
743 // assume(~(v & b) = a)
744 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
745 m_Value(A))) &&
746 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
747 KnownBits RHSKnown =
748 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
749 KnownBits MaskKnown =
750 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
751
752 // For those bits in the mask that are known to be one, we can propagate
753 // inverted known bits from the RHS to V.
754 Known.Zero |= RHSKnown.One & MaskKnown.One;
755 Known.One |= RHSKnown.Zero & MaskKnown.One;
756 // assume(v | b = a)
757 } else if (match(Cmp,
758 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
759 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
760 KnownBits RHSKnown =
761 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
762 KnownBits BKnown =
763 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
764
765 // For those bits in B that are known to be zero, we can propagate known
766 // bits from the RHS to V.
767 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
768 Known.One |= RHSKnown.One & BKnown.Zero;
769 // assume(~(v | b) = a)
770 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
771 m_Value(A))) &&
772 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
773 KnownBits RHSKnown =
774 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
775 KnownBits BKnown =
776 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
777
778 // For those bits in B that are known to be zero, we can propagate
779 // inverted known bits from the RHS to V.
780 Known.Zero |= RHSKnown.One & BKnown.Zero;
781 Known.One |= RHSKnown.Zero & BKnown.Zero;
782 // assume(v ^ b = a)
783 } else if (match(Cmp,
784 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
785 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
786 KnownBits RHSKnown =
787 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
788 KnownBits BKnown =
789 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
790
791 // For those bits in B that are known to be zero, we can propagate known
792 // bits from the RHS to V. For those bits in B that are known to be one,
793 // we can propagate inverted known bits from the RHS to V.
794 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
795 Known.One |= RHSKnown.One & BKnown.Zero;
796 Known.Zero |= RHSKnown.One & BKnown.One;
797 Known.One |= RHSKnown.Zero & BKnown.One;
798 // assume(~(v ^ b) = a)
799 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
800 m_Value(A))) &&
801 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
802 KnownBits RHSKnown =
803 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
804 KnownBits BKnown =
805 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
806
807 // For those bits in B that are known to be zero, we can propagate
808 // inverted known bits from the RHS to V. For those bits in B that are
809 // known to be one, we can propagate known bits from the RHS to V.
810 Known.Zero |= RHSKnown.One & BKnown.Zero;
811 Known.One |= RHSKnown.Zero & BKnown.Zero;
812 Known.Zero |= RHSKnown.Zero & BKnown.One;
813 Known.One |= RHSKnown.One & BKnown.One;
814 // assume(v << c = a)
815 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
816 m_Value(A))) &&
817 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
818 KnownBits RHSKnown =
819 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
820
821 // For those bits in RHS that are known, we can propagate them to known
822 // bits in V shifted to the right by C.
823 RHSKnown.Zero.lshrInPlace(C);
824 Known.Zero |= RHSKnown.Zero;
825 RHSKnown.One.lshrInPlace(C);
826 Known.One |= RHSKnown.One;
827 // assume(~(v << c) = a)
828 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
829 m_Value(A))) &&
830 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
831 KnownBits RHSKnown =
832 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
833 // For those bits in RHS that are known, we can propagate them inverted
834 // to known bits in V shifted to the right by C.
835 RHSKnown.One.lshrInPlace(C);
836 Known.Zero |= RHSKnown.One;
837 RHSKnown.Zero.lshrInPlace(C);
838 Known.One |= RHSKnown.Zero;
839 // assume(v >> c = a)
840 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
841 m_Value(A))) &&
842 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
843 KnownBits RHSKnown =
844 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
845 // For those bits in RHS that are known, we can propagate them to known
846 // bits in V shifted to the right by C.
847 Known.Zero |= RHSKnown.Zero << C;
848 Known.One |= RHSKnown.One << C;
849 // assume(~(v >> c) = a)
850 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
851 m_Value(A))) &&
852 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
853 KnownBits RHSKnown =
854 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
855 // For those bits in RHS that are known, we can propagate them inverted
856 // to known bits in V shifted to the right by C.
857 Known.Zero |= RHSKnown.One << C;
858 Known.One |= RHSKnown.Zero << C;
859 }
860 break;
861 case ICmpInst::ICMP_SGE:
862 // assume(v >=_s c) where c is non-negative
863 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
864 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
865 KnownBits RHSKnown =
866 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
867
868 if (RHSKnown.isNonNegative()) {
869 // We know that the sign bit is zero.
870 Known.makeNonNegative();
871 }
872 }
873 break;
874 case ICmpInst::ICMP_SGT:
875 // assume(v >_s c) where c is at least -1.
876 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
877 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
878 KnownBits RHSKnown =
879 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
880
881 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
882 // We know that the sign bit is zero.
883 Known.makeNonNegative();
884 }
885 }
886 break;
887 case ICmpInst::ICMP_SLE:
888 // assume(v <=_s c) where c is negative
889 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
890 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
891 KnownBits RHSKnown =
892 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
893
894 if (RHSKnown.isNegative()) {
895 // We know that the sign bit is one.
896 Known.makeNegative();
897 }
898 }
899 break;
900 case ICmpInst::ICMP_SLT:
901 // assume(v <_s c) where c is non-positive
902 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
903 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
904 KnownBits RHSKnown =
905 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
906
907 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
908 // We know that the sign bit is one.
909 Known.makeNegative();
910 }
911 }
912 break;
913 case ICmpInst::ICMP_ULE:
914 // assume(v <=_u c)
915 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
916 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
917 KnownBits RHSKnown =
918 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
919
920 // Whatever high bits in c are zero are known to be zero.
921 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
922 }
923 break;
924 case ICmpInst::ICMP_ULT:
925 // assume(v <_u c)
926 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
927 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
928 KnownBits RHSKnown =
929 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
930
931 // If the RHS is known zero, then this assumption must be wrong (nothing
932 // is unsigned less than zero). Signal a conflict and get out of here.
933 if (RHSKnown.isZero()) {
934 Known.Zero.setAllBits();
935 Known.One.setAllBits();
936 break;
937 }
938
939 // Whatever high bits in c are zero are known to be zero (if c is a power
940 // of 2, then one more).
941 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
942 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
943 else
944 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
945 }
946 break;
947 }
948 }
949
950 // If assumptions conflict with each other or previous known bits, then we
951 // have a logical fallacy. It's possible that the assumption is not reachable,
952 // so this isn't a real bug. On the other hand, the program may have undefined
953 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
954 // clear out the known bits, try to warn the user, and hope for the best.
955 if (Known.Zero.intersects(Known.One)) {
956 Known.resetAll();
957
958 if (Q.ORE)
959 Q.ORE->emit([&]() {
960 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
961 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
962 CxtI)
963 << "Detected conflicting code assumptions. Program may "
964 "have undefined behavior, or compiler may have "
965 "internal error.";
966 });
967 }
968 }
969
970 /// Compute known bits from a shift operator, including those with a
971 /// non-constant shift amount. Known is the output of this function. Known2 is a
972 /// pre-allocated temporary with the same bit width as Known and on return
973 /// contains the known bit of the shift value source. KF is an
974 /// operator-specific function that, given the known-bits and a shift amount,
975 /// compute the implied known-bits of the shift operator's result respectively
976 /// for that shift amount. The results from calling KF are conservatively
977 /// combined for all permitted shift amounts.
computeKnownBitsFromShiftOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q,function_ref<KnownBits (const KnownBits &,const KnownBits &)> KF)978 static void computeKnownBitsFromShiftOperator(
979 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
980 KnownBits &Known2, unsigned Depth, const Query &Q,
981 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
982 unsigned BitWidth = Known.getBitWidth();
983 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
984 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
985
986 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
987 // BitWidth > 64 and any upper bits are known, we'll end up returning the
988 // limit value (which implies all bits are known).
989 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
990 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
991 bool ShiftAmtIsConstant = Known.isConstant();
992 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
993
994 if (ShiftAmtIsConstant) {
995 Known = KF(Known2, Known);
996
997 // If the known bits conflict, this must be an overflowing left shift, so
998 // the shift result is poison. We can return anything we want. Choose 0 for
999 // the best folding opportunity.
1000 if (Known.hasConflict())
1001 Known.setAllZero();
1002
1003 return;
1004 }
1005
1006 // If the shift amount could be greater than or equal to the bit-width of the
1007 // LHS, the value could be poison, but bail out because the check below is
1008 // expensive.
1009 // TODO: Should we just carry on?
1010 if (MaxShiftAmtIsOutOfRange) {
1011 Known.resetAll();
1012 return;
1013 }
1014
1015 // It would be more-clearly correct to use the two temporaries for this
1016 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1017 Known.resetAll();
1018
1019 // If we know the shifter operand is nonzero, we can sometimes infer more
1020 // known bits. However this is expensive to compute, so be lazy about it and
1021 // only compute it when absolutely necessary.
1022 Optional<bool> ShifterOperandIsNonZero;
1023
1024 // Early exit if we can't constrain any well-defined shift amount.
1025 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1026 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1027 ShifterOperandIsNonZero =
1028 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1029 if (!*ShifterOperandIsNonZero)
1030 return;
1031 }
1032
1033 Known.Zero.setAllBits();
1034 Known.One.setAllBits();
1035 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1036 // Combine the shifted known input bits only for those shift amounts
1037 // compatible with its known constraints.
1038 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1039 continue;
1040 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1041 continue;
1042 // If we know the shifter is nonzero, we may be able to infer more known
1043 // bits. This check is sunk down as far as possible to avoid the expensive
1044 // call to isKnownNonZero if the cheaper checks above fail.
1045 if (ShiftAmt == 0) {
1046 if (!ShifterOperandIsNonZero.hasValue())
1047 ShifterOperandIsNonZero =
1048 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1049 if (*ShifterOperandIsNonZero)
1050 continue;
1051 }
1052
1053 Known = KnownBits::commonBits(
1054 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1055 }
1056
1057 // If the known bits conflict, the result is poison. Return a 0 and hope the
1058 // caller can further optimize that.
1059 if (Known.hasConflict())
1060 Known.setAllZero();
1061 }
1062
computeKnownBitsFromOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1063 static void computeKnownBitsFromOperator(const Operator *I,
1064 const APInt &DemandedElts,
1065 KnownBits &Known, unsigned Depth,
1066 const Query &Q) {
1067 unsigned BitWidth = Known.getBitWidth();
1068
1069 KnownBits Known2(BitWidth);
1070 switch (I->getOpcode()) {
1071 default: break;
1072 case Instruction::Load:
1073 if (MDNode *MD =
1074 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1075 computeKnownBitsFromRangeMetadata(*MD, Known);
1076 break;
1077 case Instruction::And: {
1078 // If either the LHS or the RHS are Zero, the result is zero.
1079 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1080 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1081
1082 Known &= Known2;
1083
1084 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1085 // here we handle the more general case of adding any odd number by
1086 // matching the form add(x, add(x, y)) where y is odd.
1087 // TODO: This could be generalized to clearing any bit set in y where the
1088 // following bit is known to be unset in y.
1089 Value *X = nullptr, *Y = nullptr;
1090 if (!Known.Zero[0] && !Known.One[0] &&
1091 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1092 Known2.resetAll();
1093 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1094 if (Known2.countMinTrailingOnes() > 0)
1095 Known.Zero.setBit(0);
1096 }
1097 break;
1098 }
1099 case Instruction::Or:
1100 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1101 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1102
1103 Known |= Known2;
1104 break;
1105 case Instruction::Xor:
1106 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1107 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1108
1109 Known ^= Known2;
1110 break;
1111 case Instruction::Mul: {
1112 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1113 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1114 Known, Known2, Depth, Q);
1115 break;
1116 }
1117 case Instruction::UDiv: {
1118 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1119 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1120 Known = KnownBits::udiv(Known, Known2);
1121 break;
1122 }
1123 case Instruction::Select: {
1124 const Value *LHS = nullptr, *RHS = nullptr;
1125 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1126 if (SelectPatternResult::isMinOrMax(SPF)) {
1127 computeKnownBits(RHS, Known, Depth + 1, Q);
1128 computeKnownBits(LHS, Known2, Depth + 1, Q);
1129 switch (SPF) {
1130 default:
1131 llvm_unreachable("Unhandled select pattern flavor!");
1132 case SPF_SMAX:
1133 Known = KnownBits::smax(Known, Known2);
1134 break;
1135 case SPF_SMIN:
1136 Known = KnownBits::smin(Known, Known2);
1137 break;
1138 case SPF_UMAX:
1139 Known = KnownBits::umax(Known, Known2);
1140 break;
1141 case SPF_UMIN:
1142 Known = KnownBits::umin(Known, Known2);
1143 break;
1144 }
1145 break;
1146 }
1147
1148 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1149 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1150
1151 // Only known if known in both the LHS and RHS.
1152 Known = KnownBits::commonBits(Known, Known2);
1153
1154 if (SPF == SPF_ABS) {
1155 // RHS from matchSelectPattern returns the negation part of abs pattern.
1156 // If the negate has an NSW flag we can assume the sign bit of the result
1157 // will be 0 because that makes abs(INT_MIN) undefined.
1158 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1159 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1160 Known.Zero.setSignBit();
1161 }
1162
1163 break;
1164 }
1165 case Instruction::FPTrunc:
1166 case Instruction::FPExt:
1167 case Instruction::FPToUI:
1168 case Instruction::FPToSI:
1169 case Instruction::SIToFP:
1170 case Instruction::UIToFP:
1171 break; // Can't work with floating point.
1172 case Instruction::PtrToInt:
1173 case Instruction::IntToPtr:
1174 // Fall through and handle them the same as zext/trunc.
1175 LLVM_FALLTHROUGH;
1176 case Instruction::ZExt:
1177 case Instruction::Trunc: {
1178 Type *SrcTy = I->getOperand(0)->getType();
1179
1180 unsigned SrcBitWidth;
1181 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1182 // which fall through here.
1183 Type *ScalarTy = SrcTy->getScalarType();
1184 SrcBitWidth = ScalarTy->isPointerTy() ?
1185 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1186 Q.DL.getTypeSizeInBits(ScalarTy);
1187
1188 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1189 Known = Known.anyextOrTrunc(SrcBitWidth);
1190 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1191 Known = Known.zextOrTrunc(BitWidth);
1192 break;
1193 }
1194 case Instruction::BitCast: {
1195 Type *SrcTy = I->getOperand(0)->getType();
1196 if (SrcTy->isIntOrPtrTy() &&
1197 // TODO: For now, not handling conversions like:
1198 // (bitcast i64 %x to <2 x i32>)
1199 !I->getType()->isVectorTy()) {
1200 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1201 break;
1202 }
1203 break;
1204 }
1205 case Instruction::SExt: {
1206 // Compute the bits in the result that are not present in the input.
1207 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1208
1209 Known = Known.trunc(SrcBitWidth);
1210 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1211 // If the sign bit of the input is known set or clear, then we know the
1212 // top bits of the result.
1213 Known = Known.sext(BitWidth);
1214 break;
1215 }
1216 case Instruction::Shl: {
1217 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1218 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1219 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1220 // If this shift has "nsw" keyword, then the result is either a poison
1221 // value or has the same sign bit as the first operand.
1222 if (NSW) {
1223 if (KnownVal.Zero.isSignBitSet())
1224 Result.Zero.setSignBit();
1225 if (KnownVal.One.isSignBitSet())
1226 Result.One.setSignBit();
1227 }
1228 return Result;
1229 };
1230 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1231 KF);
1232 break;
1233 }
1234 case Instruction::LShr: {
1235 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1236 return KnownBits::lshr(KnownVal, KnownAmt);
1237 };
1238 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1239 KF);
1240 break;
1241 }
1242 case Instruction::AShr: {
1243 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1244 return KnownBits::ashr(KnownVal, KnownAmt);
1245 };
1246 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1247 KF);
1248 break;
1249 }
1250 case Instruction::Sub: {
1251 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1252 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1253 DemandedElts, Known, Known2, Depth, Q);
1254 break;
1255 }
1256 case Instruction::Add: {
1257 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1258 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1259 DemandedElts, Known, Known2, Depth, Q);
1260 break;
1261 }
1262 case Instruction::SRem:
1263 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1264 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1265 Known = KnownBits::srem(Known, Known2);
1266 break;
1267
1268 case Instruction::URem:
1269 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1270 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1271 Known = KnownBits::urem(Known, Known2);
1272 break;
1273 case Instruction::Alloca:
1274 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1275 break;
1276 case Instruction::GetElementPtr: {
1277 // Analyze all of the subscripts of this getelementptr instruction
1278 // to determine if we can prove known low zero bits.
1279 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1280 // Accumulate the constant indices in a separate variable
1281 // to minimize the number of calls to computeForAddSub.
1282 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1283
1284 gep_type_iterator GTI = gep_type_begin(I);
1285 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1286 // TrailZ can only become smaller, short-circuit if we hit zero.
1287 if (Known.isUnknown())
1288 break;
1289
1290 Value *Index = I->getOperand(i);
1291
1292 // Handle case when index is zero.
1293 Constant *CIndex = dyn_cast<Constant>(Index);
1294 if (CIndex && CIndex->isZeroValue())
1295 continue;
1296
1297 if (StructType *STy = GTI.getStructTypeOrNull()) {
1298 // Handle struct member offset arithmetic.
1299
1300 assert(CIndex &&
1301 "Access to structure field must be known at compile time");
1302
1303 if (CIndex->getType()->isVectorTy())
1304 Index = CIndex->getSplatValue();
1305
1306 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1307 const StructLayout *SL = Q.DL.getStructLayout(STy);
1308 uint64_t Offset = SL->getElementOffset(Idx);
1309 AccConstIndices += Offset;
1310 continue;
1311 }
1312
1313 // Handle array index arithmetic.
1314 Type *IndexedTy = GTI.getIndexedType();
1315 if (!IndexedTy->isSized()) {
1316 Known.resetAll();
1317 break;
1318 }
1319
1320 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1321 KnownBits IndexBits(IndexBitWidth);
1322 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1323 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1324 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1325 KnownBits ScalingFactor(IndexBitWidth);
1326 // Multiply by current sizeof type.
1327 // &A[i] == A + i * sizeof(*A[i]).
1328 if (IndexTypeSize.isScalable()) {
1329 // For scalable types the only thing we know about sizeof is
1330 // that this is a multiple of the minimum size.
1331 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1332 } else if (IndexBits.isConstant()) {
1333 APInt IndexConst = IndexBits.getConstant();
1334 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1335 IndexConst *= ScalingFactor;
1336 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1337 continue;
1338 } else {
1339 ScalingFactor =
1340 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1341 }
1342 IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
1343
1344 // If the offsets have a different width from the pointer, according
1345 // to the language reference we need to sign-extend or truncate them
1346 // to the width of the pointer.
1347 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1348
1349 // Note that inbounds does *not* guarantee nsw for the addition, as only
1350 // the offset is signed, while the base address is unsigned.
1351 Known = KnownBits::computeForAddSub(
1352 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1353 }
1354 if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1355 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1356 Known = KnownBits::computeForAddSub(
1357 /*Add=*/true, /*NSW=*/false, Known, Index);
1358 }
1359 break;
1360 }
1361 case Instruction::PHI: {
1362 const PHINode *P = cast<PHINode>(I);
1363 // Handle the case of a simple two-predecessor recurrence PHI.
1364 // There's a lot more that could theoretically be done here, but
1365 // this is sufficient to catch some interesting cases.
1366 if (P->getNumIncomingValues() == 2) {
1367 for (unsigned i = 0; i != 2; ++i) {
1368 Value *L = P->getIncomingValue(i);
1369 Value *R = P->getIncomingValue(!i);
1370 Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1371 Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1372 Operator *LU = dyn_cast<Operator>(L);
1373 if (!LU)
1374 continue;
1375 unsigned Opcode = LU->getOpcode();
1376 // Check for operations that have the property that if
1377 // both their operands have low zero bits, the result
1378 // will have low zero bits.
1379 if (Opcode == Instruction::Add ||
1380 Opcode == Instruction::Sub ||
1381 Opcode == Instruction::And ||
1382 Opcode == Instruction::Or ||
1383 Opcode == Instruction::Mul) {
1384 Value *LL = LU->getOperand(0);
1385 Value *LR = LU->getOperand(1);
1386 // Find a recurrence.
1387 if (LL == I)
1388 L = LR;
1389 else if (LR == I)
1390 L = LL;
1391 else
1392 continue; // Check for recurrence with L and R flipped.
1393
1394 // Change the context instruction to the "edge" that flows into the
1395 // phi. This is important because that is where the value is actually
1396 // "evaluated" even though it is used later somewhere else. (see also
1397 // D69571).
1398 Query RecQ = Q;
1399
1400 // Ok, we have a PHI of the form L op= R. Check for low
1401 // zero bits.
1402 RecQ.CxtI = RInst;
1403 computeKnownBits(R, Known2, Depth + 1, RecQ);
1404
1405 // We need to take the minimum number of known bits
1406 KnownBits Known3(BitWidth);
1407 RecQ.CxtI = LInst;
1408 computeKnownBits(L, Known3, Depth + 1, RecQ);
1409
1410 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1411 Known3.countMinTrailingZeros()));
1412
1413 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1414 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1415 // If initial value of recurrence is nonnegative, and we are adding
1416 // a nonnegative number with nsw, the result can only be nonnegative
1417 // or poison value regardless of the number of times we execute the
1418 // add in phi recurrence. If initial value is negative and we are
1419 // adding a negative number with nsw, the result can only be
1420 // negative or poison value. Similar arguments apply to sub and mul.
1421 //
1422 // (add non-negative, non-negative) --> non-negative
1423 // (add negative, negative) --> negative
1424 if (Opcode == Instruction::Add) {
1425 if (Known2.isNonNegative() && Known3.isNonNegative())
1426 Known.makeNonNegative();
1427 else if (Known2.isNegative() && Known3.isNegative())
1428 Known.makeNegative();
1429 }
1430
1431 // (sub nsw non-negative, negative) --> non-negative
1432 // (sub nsw negative, non-negative) --> negative
1433 else if (Opcode == Instruction::Sub && LL == I) {
1434 if (Known2.isNonNegative() && Known3.isNegative())
1435 Known.makeNonNegative();
1436 else if (Known2.isNegative() && Known3.isNonNegative())
1437 Known.makeNegative();
1438 }
1439
1440 // (mul nsw non-negative, non-negative) --> non-negative
1441 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1442 Known3.isNonNegative())
1443 Known.makeNonNegative();
1444 }
1445
1446 break;
1447 }
1448 }
1449 }
1450
1451 // Unreachable blocks may have zero-operand PHI nodes.
1452 if (P->getNumIncomingValues() == 0)
1453 break;
1454
1455 // Otherwise take the unions of the known bit sets of the operands,
1456 // taking conservative care to avoid excessive recursion.
1457 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1458 // Skip if every incoming value references to ourself.
1459 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1460 break;
1461
1462 Known.Zero.setAllBits();
1463 Known.One.setAllBits();
1464 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1465 Value *IncValue = P->getIncomingValue(u);
1466 // Skip direct self references.
1467 if (IncValue == P) continue;
1468
1469 // Change the context instruction to the "edge" that flows into the
1470 // phi. This is important because that is where the value is actually
1471 // "evaluated" even though it is used later somewhere else. (see also
1472 // D69571).
1473 Query RecQ = Q;
1474 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1475
1476 Known2 = KnownBits(BitWidth);
1477 // Recurse, but cap the recursion to one level, because we don't
1478 // want to waste time spinning around in loops.
1479 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1480 Known = KnownBits::commonBits(Known, Known2);
1481 // If all bits have been ruled out, there's no need to check
1482 // more operands.
1483 if (Known.isUnknown())
1484 break;
1485 }
1486 }
1487 break;
1488 }
1489 case Instruction::Call:
1490 case Instruction::Invoke:
1491 // If range metadata is attached to this call, set known bits from that,
1492 // and then intersect with known bits based on other properties of the
1493 // function.
1494 if (MDNode *MD =
1495 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1496 computeKnownBitsFromRangeMetadata(*MD, Known);
1497 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1498 computeKnownBits(RV, Known2, Depth + 1, Q);
1499 Known.Zero |= Known2.Zero;
1500 Known.One |= Known2.One;
1501 }
1502 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1503 switch (II->getIntrinsicID()) {
1504 default: break;
1505 case Intrinsic::abs: {
1506 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1507 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1508 Known = Known2.abs(IntMinIsPoison);
1509 break;
1510 }
1511 case Intrinsic::bitreverse:
1512 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1513 Known.Zero |= Known2.Zero.reverseBits();
1514 Known.One |= Known2.One.reverseBits();
1515 break;
1516 case Intrinsic::bswap:
1517 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1518 Known.Zero |= Known2.Zero.byteSwap();
1519 Known.One |= Known2.One.byteSwap();
1520 break;
1521 case Intrinsic::ctlz: {
1522 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1523 // If we have a known 1, its position is our upper bound.
1524 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1525 // If this call is undefined for 0, the result will be less than 2^n.
1526 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1527 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1528 unsigned LowBits = Log2_32(PossibleLZ)+1;
1529 Known.Zero.setBitsFrom(LowBits);
1530 break;
1531 }
1532 case Intrinsic::cttz: {
1533 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1534 // If we have a known 1, its position is our upper bound.
1535 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1536 // If this call is undefined for 0, the result will be less than 2^n.
1537 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1538 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1539 unsigned LowBits = Log2_32(PossibleTZ)+1;
1540 Known.Zero.setBitsFrom(LowBits);
1541 break;
1542 }
1543 case Intrinsic::ctpop: {
1544 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1545 // We can bound the space the count needs. Also, bits known to be zero
1546 // can't contribute to the population.
1547 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1548 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1549 Known.Zero.setBitsFrom(LowBits);
1550 // TODO: we could bound KnownOne using the lower bound on the number
1551 // of bits which might be set provided by popcnt KnownOne2.
1552 break;
1553 }
1554 case Intrinsic::fshr:
1555 case Intrinsic::fshl: {
1556 const APInt *SA;
1557 if (!match(I->getOperand(2), m_APInt(SA)))
1558 break;
1559
1560 // Normalize to funnel shift left.
1561 uint64_t ShiftAmt = SA->urem(BitWidth);
1562 if (II->getIntrinsicID() == Intrinsic::fshr)
1563 ShiftAmt = BitWidth - ShiftAmt;
1564
1565 KnownBits Known3(BitWidth);
1566 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1567 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1568
1569 Known.Zero =
1570 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1571 Known.One =
1572 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1573 break;
1574 }
1575 case Intrinsic::uadd_sat:
1576 case Intrinsic::usub_sat: {
1577 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1578 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1579 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1580
1581 // Add: Leading ones of either operand are preserved.
1582 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1583 // as leading zeros in the result.
1584 unsigned LeadingKnown;
1585 if (IsAdd)
1586 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1587 Known2.countMinLeadingOnes());
1588 else
1589 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1590 Known2.countMinLeadingOnes());
1591
1592 Known = KnownBits::computeForAddSub(
1593 IsAdd, /* NSW */ false, Known, Known2);
1594
1595 // We select between the operation result and all-ones/zero
1596 // respectively, so we can preserve known ones/zeros.
1597 if (IsAdd) {
1598 Known.One.setHighBits(LeadingKnown);
1599 Known.Zero.clearAllBits();
1600 } else {
1601 Known.Zero.setHighBits(LeadingKnown);
1602 Known.One.clearAllBits();
1603 }
1604 break;
1605 }
1606 case Intrinsic::umin:
1607 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1608 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1609 Known = KnownBits::umin(Known, Known2);
1610 break;
1611 case Intrinsic::umax:
1612 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1613 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1614 Known = KnownBits::umax(Known, Known2);
1615 break;
1616 case Intrinsic::smin:
1617 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1618 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1619 Known = KnownBits::smin(Known, Known2);
1620 break;
1621 case Intrinsic::smax:
1622 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1623 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1624 Known = KnownBits::smax(Known, Known2);
1625 break;
1626 case Intrinsic::x86_sse42_crc32_64_64:
1627 Known.Zero.setBitsFrom(32);
1628 break;
1629 }
1630 }
1631 break;
1632 case Instruction::ShuffleVector: {
1633 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1634 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1635 if (!Shuf) {
1636 Known.resetAll();
1637 return;
1638 }
1639 // For undef elements, we don't know anything about the common state of
1640 // the shuffle result.
1641 APInt DemandedLHS, DemandedRHS;
1642 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1643 Known.resetAll();
1644 return;
1645 }
1646 Known.One.setAllBits();
1647 Known.Zero.setAllBits();
1648 if (!!DemandedLHS) {
1649 const Value *LHS = Shuf->getOperand(0);
1650 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1651 // If we don't know any bits, early out.
1652 if (Known.isUnknown())
1653 break;
1654 }
1655 if (!!DemandedRHS) {
1656 const Value *RHS = Shuf->getOperand(1);
1657 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1658 Known = KnownBits::commonBits(Known, Known2);
1659 }
1660 break;
1661 }
1662 case Instruction::InsertElement: {
1663 const Value *Vec = I->getOperand(0);
1664 const Value *Elt = I->getOperand(1);
1665 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1666 // Early out if the index is non-constant or out-of-range.
1667 unsigned NumElts = DemandedElts.getBitWidth();
1668 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1669 Known.resetAll();
1670 return;
1671 }
1672 Known.One.setAllBits();
1673 Known.Zero.setAllBits();
1674 unsigned EltIdx = CIdx->getZExtValue();
1675 // Do we demand the inserted element?
1676 if (DemandedElts[EltIdx]) {
1677 computeKnownBits(Elt, Known, Depth + 1, Q);
1678 // If we don't know any bits, early out.
1679 if (Known.isUnknown())
1680 break;
1681 }
1682 // We don't need the base vector element that has been inserted.
1683 APInt DemandedVecElts = DemandedElts;
1684 DemandedVecElts.clearBit(EltIdx);
1685 if (!!DemandedVecElts) {
1686 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1687 Known = KnownBits::commonBits(Known, Known2);
1688 }
1689 break;
1690 }
1691 case Instruction::ExtractElement: {
1692 // Look through extract element. If the index is non-constant or
1693 // out-of-range demand all elements, otherwise just the extracted element.
1694 const Value *Vec = I->getOperand(0);
1695 const Value *Idx = I->getOperand(1);
1696 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1697 if (isa<ScalableVectorType>(Vec->getType())) {
1698 // FIXME: there's probably *something* we can do with scalable vectors
1699 Known.resetAll();
1700 break;
1701 }
1702 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1703 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1704 if (CIdx && CIdx->getValue().ult(NumElts))
1705 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1706 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1707 break;
1708 }
1709 case Instruction::ExtractValue:
1710 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1711 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1712 if (EVI->getNumIndices() != 1) break;
1713 if (EVI->getIndices()[0] == 0) {
1714 switch (II->getIntrinsicID()) {
1715 default: break;
1716 case Intrinsic::uadd_with_overflow:
1717 case Intrinsic::sadd_with_overflow:
1718 computeKnownBitsAddSub(true, II->getArgOperand(0),
1719 II->getArgOperand(1), false, DemandedElts,
1720 Known, Known2, Depth, Q);
1721 break;
1722 case Intrinsic::usub_with_overflow:
1723 case Intrinsic::ssub_with_overflow:
1724 computeKnownBitsAddSub(false, II->getArgOperand(0),
1725 II->getArgOperand(1), false, DemandedElts,
1726 Known, Known2, Depth, Q);
1727 break;
1728 case Intrinsic::umul_with_overflow:
1729 case Intrinsic::smul_with_overflow:
1730 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1731 DemandedElts, Known, Known2, Depth, Q);
1732 break;
1733 }
1734 }
1735 }
1736 break;
1737 case Instruction::Freeze:
1738 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1739 Depth + 1))
1740 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1741 break;
1742 }
1743 }
1744
1745 /// Determine which bits of V are known to be either zero or one and return
1746 /// them.
computeKnownBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)1747 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1748 unsigned Depth, const Query &Q) {
1749 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1750 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1751 return Known;
1752 }
1753
1754 /// Determine which bits of V are known to be either zero or one and return
1755 /// them.
computeKnownBits(const Value * V,unsigned Depth,const Query & Q)1756 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1757 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1758 computeKnownBits(V, Known, Depth, Q);
1759 return Known;
1760 }
1761
1762 /// Determine which bits of V are known to be either zero or one and return
1763 /// them in the Known bit set.
1764 ///
1765 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1766 /// we cannot optimize based on the assumption that it is zero without changing
1767 /// it to be an explicit zero. If we don't change it to zero, other code could
1768 /// optimized based on the contradictory assumption that it is non-zero.
1769 /// Because instcombine aggressively folds operations with undef args anyway,
1770 /// this won't lose us code quality.
1771 ///
1772 /// This function is defined on values with integer type, values with pointer
1773 /// type, and vectors of integers. In the case
1774 /// where V is a vector, known zero, and known one values are the
1775 /// same width as the vector element, and the bit is set only if it is true
1776 /// for all of the demanded elements in the vector specified by DemandedElts.
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1777 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1778 KnownBits &Known, unsigned Depth, const Query &Q) {
1779 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1780 // No demanded elts or V is a scalable vector, better to assume we don't
1781 // know anything.
1782 Known.resetAll();
1783 return;
1784 }
1785
1786 assert(V && "No Value?");
1787 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1788
1789 #ifndef NDEBUG
1790 Type *Ty = V->getType();
1791 unsigned BitWidth = Known.getBitWidth();
1792
1793 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1794 "Not integer or pointer type!");
1795
1796 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1797 assert(
1798 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1799 "DemandedElt width should equal the fixed vector number of elements");
1800 } else {
1801 assert(DemandedElts == APInt(1, 1) &&
1802 "DemandedElt width should be 1 for scalars");
1803 }
1804
1805 Type *ScalarTy = Ty->getScalarType();
1806 if (ScalarTy->isPointerTy()) {
1807 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1808 "V and Known should have same BitWidth");
1809 } else {
1810 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1811 "V and Known should have same BitWidth");
1812 }
1813 #endif
1814
1815 const APInt *C;
1816 if (match(V, m_APInt(C))) {
1817 // We know all of the bits for a scalar constant or a splat vector constant!
1818 Known = KnownBits::makeConstant(*C);
1819 return;
1820 }
1821 // Null and aggregate-zero are all-zeros.
1822 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1823 Known.setAllZero();
1824 return;
1825 }
1826 // Handle a constant vector by taking the intersection of the known bits of
1827 // each element.
1828 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1829 // We know that CDV must be a vector of integers. Take the intersection of
1830 // each element.
1831 Known.Zero.setAllBits(); Known.One.setAllBits();
1832 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1833 if (!DemandedElts[i])
1834 continue;
1835 APInt Elt = CDV->getElementAsAPInt(i);
1836 Known.Zero &= ~Elt;
1837 Known.One &= Elt;
1838 }
1839 return;
1840 }
1841
1842 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1843 // We know that CV must be a vector of integers. Take the intersection of
1844 // each element.
1845 Known.Zero.setAllBits(); Known.One.setAllBits();
1846 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1847 if (!DemandedElts[i])
1848 continue;
1849 Constant *Element = CV->getAggregateElement(i);
1850 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1851 if (!ElementCI) {
1852 Known.resetAll();
1853 return;
1854 }
1855 const APInt &Elt = ElementCI->getValue();
1856 Known.Zero &= ~Elt;
1857 Known.One &= Elt;
1858 }
1859 return;
1860 }
1861
1862 // Start out not knowing anything.
1863 Known.resetAll();
1864
1865 // We can't imply anything about undefs.
1866 if (isa<UndefValue>(V))
1867 return;
1868
1869 // There's no point in looking through other users of ConstantData for
1870 // assumptions. Confirm that we've handled them all.
1871 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1872
1873 // All recursive calls that increase depth must come after this.
1874 if (Depth == MaxAnalysisRecursionDepth)
1875 return;
1876
1877 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1878 // the bits of its aliasee.
1879 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1880 if (!GA->isInterposable())
1881 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1882 return;
1883 }
1884
1885 if (const Operator *I = dyn_cast<Operator>(V))
1886 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1887
1888 // Aligned pointers have trailing zeros - refine Known.Zero set
1889 if (isa<PointerType>(V->getType())) {
1890 Align Alignment = V->getPointerAlignment(Q.DL);
1891 Known.Zero.setLowBits(Log2(Alignment));
1892 }
1893
1894 // computeKnownBitsFromAssume strictly refines Known.
1895 // Therefore, we run them after computeKnownBitsFromOperator.
1896
1897 // Check whether a nearby assume intrinsic can determine some known bits.
1898 computeKnownBitsFromAssume(V, Known, Depth, Q);
1899
1900 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1901 }
1902
1903 /// Return true if the given value is known to have exactly one
1904 /// bit set when defined. For vectors return true if every element is known to
1905 /// be a power of two when defined. Supports values with integer or pointer
1906 /// types and vectors of integers.
isKnownToBeAPowerOfTwo(const Value * V,bool OrZero,unsigned Depth,const Query & Q)1907 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1908 const Query &Q) {
1909 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1910
1911 // Attempt to match against constants.
1912 if (OrZero && match(V, m_Power2OrZero()))
1913 return true;
1914 if (match(V, m_Power2()))
1915 return true;
1916
1917 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1918 // it is shifted off the end then the result is undefined.
1919 if (match(V, m_Shl(m_One(), m_Value())))
1920 return true;
1921
1922 // (signmask) >>l X is clearly a power of two if the one is not shifted off
1923 // the bottom. If it is shifted off the bottom then the result is undefined.
1924 if (match(V, m_LShr(m_SignMask(), m_Value())))
1925 return true;
1926
1927 // The remaining tests are all recursive, so bail out if we hit the limit.
1928 if (Depth++ == MaxAnalysisRecursionDepth)
1929 return false;
1930
1931 Value *X = nullptr, *Y = nullptr;
1932 // A shift left or a logical shift right of a power of two is a power of two
1933 // or zero.
1934 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1935 match(V, m_LShr(m_Value(X), m_Value()))))
1936 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1937
1938 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1939 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1940
1941 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1942 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1943 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1944
1945 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1946 // A power of two and'd with anything is a power of two or zero.
1947 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1948 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1949 return true;
1950 // X & (-X) is always a power of two or zero.
1951 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1952 return true;
1953 return false;
1954 }
1955
1956 // Adding a power-of-two or zero to the same power-of-two or zero yields
1957 // either the original power-of-two, a larger power-of-two or zero.
1958 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1959 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1960 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1961 Q.IIQ.hasNoSignedWrap(VOBO)) {
1962 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1963 match(X, m_And(m_Value(), m_Specific(Y))))
1964 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1965 return true;
1966 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1967 match(Y, m_And(m_Value(), m_Specific(X))))
1968 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1969 return true;
1970
1971 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1972 KnownBits LHSBits(BitWidth);
1973 computeKnownBits(X, LHSBits, Depth, Q);
1974
1975 KnownBits RHSBits(BitWidth);
1976 computeKnownBits(Y, RHSBits, Depth, Q);
1977 // If i8 V is a power of two or zero:
1978 // ZeroBits: 1 1 1 0 1 1 1 1
1979 // ~ZeroBits: 0 0 0 1 0 0 0 0
1980 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1981 // If OrZero isn't set, we cannot give back a zero result.
1982 // Make sure either the LHS or RHS has a bit set.
1983 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1984 return true;
1985 }
1986 }
1987
1988 // An exact divide or right shift can only shift off zero bits, so the result
1989 // is a power of two only if the first operand is a power of two and not
1990 // copying a sign bit (sdiv int_min, 2).
1991 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1992 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1993 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1994 Depth, Q);
1995 }
1996
1997 return false;
1998 }
1999
2000 /// Test whether a GEP's result is known to be non-null.
2001 ///
2002 /// Uses properties inherent in a GEP to try to determine whether it is known
2003 /// to be non-null.
2004 ///
2005 /// Currently this routine does not support vector GEPs.
isGEPKnownNonNull(const GEPOperator * GEP,unsigned Depth,const Query & Q)2006 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2007 const Query &Q) {
2008 const Function *F = nullptr;
2009 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2010 F = I->getFunction();
2011
2012 if (!GEP->isInBounds() ||
2013 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2014 return false;
2015
2016 // FIXME: Support vector-GEPs.
2017 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2018
2019 // If the base pointer is non-null, we cannot walk to a null address with an
2020 // inbounds GEP in address space zero.
2021 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2022 return true;
2023
2024 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2025 // If so, then the GEP cannot produce a null pointer, as doing so would
2026 // inherently violate the inbounds contract within address space zero.
2027 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2028 GTI != GTE; ++GTI) {
2029 // Struct types are easy -- they must always be indexed by a constant.
2030 if (StructType *STy = GTI.getStructTypeOrNull()) {
2031 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2032 unsigned ElementIdx = OpC->getZExtValue();
2033 const StructLayout *SL = Q.DL.getStructLayout(STy);
2034 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2035 if (ElementOffset > 0)
2036 return true;
2037 continue;
2038 }
2039
2040 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2041 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2042 continue;
2043
2044 // Fast path the constant operand case both for efficiency and so we don't
2045 // increment Depth when just zipping down an all-constant GEP.
2046 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2047 if (!OpC->isZero())
2048 return true;
2049 continue;
2050 }
2051
2052 // We post-increment Depth here because while isKnownNonZero increments it
2053 // as well, when we pop back up that increment won't persist. We don't want
2054 // to recurse 10k times just because we have 10k GEP operands. We don't
2055 // bail completely out because we want to handle constant GEPs regardless
2056 // of depth.
2057 if (Depth++ >= MaxAnalysisRecursionDepth)
2058 continue;
2059
2060 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2061 return true;
2062 }
2063
2064 return false;
2065 }
2066
isKnownNonNullFromDominatingCondition(const Value * V,const Instruction * CtxI,const DominatorTree * DT)2067 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2068 const Instruction *CtxI,
2069 const DominatorTree *DT) {
2070 if (isa<Constant>(V))
2071 return false;
2072
2073 if (!CtxI || !DT)
2074 return false;
2075
2076 unsigned NumUsesExplored = 0;
2077 for (auto *U : V->users()) {
2078 // Avoid massive lists
2079 if (NumUsesExplored >= DomConditionsMaxUses)
2080 break;
2081 NumUsesExplored++;
2082
2083 // If the value is used as an argument to a call or invoke, then argument
2084 // attributes may provide an answer about null-ness.
2085 if (const auto *CB = dyn_cast<CallBase>(U))
2086 if (auto *CalledFunc = CB->getCalledFunction())
2087 for (const Argument &Arg : CalledFunc->args())
2088 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2089 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2090 DT->dominates(CB, CtxI))
2091 return true;
2092
2093 // If the value is used as a load/store, then the pointer must be non null.
2094 if (V == getLoadStorePointerOperand(U)) {
2095 const Instruction *I = cast<Instruction>(U);
2096 if (!NullPointerIsDefined(I->getFunction(),
2097 V->getType()->getPointerAddressSpace()) &&
2098 DT->dominates(I, CtxI))
2099 return true;
2100 }
2101
2102 // Consider only compare instructions uniquely controlling a branch
2103 Value *RHS;
2104 CmpInst::Predicate Pred;
2105 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2106 continue;
2107
2108 bool NonNullIfTrue;
2109 if (cmpExcludesZero(Pred, RHS))
2110 NonNullIfTrue = true;
2111 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2112 NonNullIfTrue = false;
2113 else
2114 continue;
2115
2116 SmallVector<const User *, 4> WorkList;
2117 SmallPtrSet<const User *, 4> Visited;
2118 for (auto *CmpU : U->users()) {
2119 assert(WorkList.empty() && "Should be!");
2120 if (Visited.insert(CmpU).second)
2121 WorkList.push_back(CmpU);
2122
2123 while (!WorkList.empty()) {
2124 auto *Curr = WorkList.pop_back_val();
2125
2126 // If a user is an AND, add all its users to the work list. We only
2127 // propagate "pred != null" condition through AND because it is only
2128 // correct to assume that all conditions of AND are met in true branch.
2129 // TODO: Support similar logic of OR and EQ predicate?
2130 if (NonNullIfTrue)
2131 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2132 for (auto *CurrU : Curr->users())
2133 if (Visited.insert(CurrU).second)
2134 WorkList.push_back(CurrU);
2135 continue;
2136 }
2137
2138 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2139 assert(BI->isConditional() && "uses a comparison!");
2140
2141 BasicBlock *NonNullSuccessor =
2142 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2143 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2144 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2145 return true;
2146 } else if (NonNullIfTrue && isGuard(Curr) &&
2147 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2148 return true;
2149 }
2150 }
2151 }
2152 }
2153
2154 return false;
2155 }
2156
2157 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2158 /// ensure that the value it's attached to is never Value? 'RangeType' is
2159 /// is the type of the value described by the range.
rangeMetadataExcludesValue(const MDNode * Ranges,const APInt & Value)2160 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2161 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2162 assert(NumRanges >= 1);
2163 for (unsigned i = 0; i < NumRanges; ++i) {
2164 ConstantInt *Lower =
2165 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2166 ConstantInt *Upper =
2167 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2168 ConstantRange Range(Lower->getValue(), Upper->getValue());
2169 if (Range.contains(Value))
2170 return false;
2171 }
2172 return true;
2173 }
2174
2175 /// Return true if the given value is known to be non-zero when defined. For
2176 /// vectors, return true if every demanded element is known to be non-zero when
2177 /// defined. For pointers, if the context instruction and dominator tree are
2178 /// specified, perform context-sensitive analysis and return true if the
2179 /// pointer couldn't possibly be null at the specified instruction.
2180 /// Supports values with integer or pointer type and vectors of integers.
isKnownNonZero(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2181 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2182 const Query &Q) {
2183 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2184 // vector
2185 if (isa<ScalableVectorType>(V->getType()))
2186 return false;
2187
2188 if (auto *C = dyn_cast<Constant>(V)) {
2189 if (C->isNullValue())
2190 return false;
2191 if (isa<ConstantInt>(C))
2192 // Must be non-zero due to null test above.
2193 return true;
2194
2195 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2196 // See the comment for IntToPtr/PtrToInt instructions below.
2197 if (CE->getOpcode() == Instruction::IntToPtr ||
2198 CE->getOpcode() == Instruction::PtrToInt)
2199 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2200 .getFixedSize() <=
2201 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2202 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2203 }
2204
2205 // For constant vectors, check that all elements are undefined or known
2206 // non-zero to determine that the whole vector is known non-zero.
2207 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2208 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2209 if (!DemandedElts[i])
2210 continue;
2211 Constant *Elt = C->getAggregateElement(i);
2212 if (!Elt || Elt->isNullValue())
2213 return false;
2214 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2215 return false;
2216 }
2217 return true;
2218 }
2219
2220 // A global variable in address space 0 is non null unless extern weak
2221 // or an absolute symbol reference. Other address spaces may have null as a
2222 // valid address for a global, so we can't assume anything.
2223 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2224 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2225 GV->getType()->getAddressSpace() == 0)
2226 return true;
2227 } else
2228 return false;
2229 }
2230
2231 if (auto *I = dyn_cast<Instruction>(V)) {
2232 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2233 // If the possible ranges don't contain zero, then the value is
2234 // definitely non-zero.
2235 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2236 const APInt ZeroValue(Ty->getBitWidth(), 0);
2237 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2238 return true;
2239 }
2240 }
2241 }
2242
2243 if (isKnownNonZeroFromAssume(V, Q))
2244 return true;
2245
2246 // Some of the tests below are recursive, so bail out if we hit the limit.
2247 if (Depth++ >= MaxAnalysisRecursionDepth)
2248 return false;
2249
2250 // Check for pointer simplifications.
2251
2252 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2253 // Alloca never returns null, malloc might.
2254 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2255 return true;
2256
2257 // A byval, inalloca may not be null in a non-default addres space. A
2258 // nonnull argument is assumed never 0.
2259 if (const Argument *A = dyn_cast<Argument>(V)) {
2260 if (((A->hasPassPointeeByValueCopyAttr() &&
2261 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2262 A->hasNonNullAttr()))
2263 return true;
2264 }
2265
2266 // A Load tagged with nonnull metadata is never null.
2267 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2268 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2269 return true;
2270
2271 if (const auto *Call = dyn_cast<CallBase>(V)) {
2272 if (Call->isReturnNonNull())
2273 return true;
2274 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2275 return isKnownNonZero(RP, Depth, Q);
2276 }
2277 }
2278
2279 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2280 return true;
2281
2282 // Check for recursive pointer simplifications.
2283 if (V->getType()->isPointerTy()) {
2284 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2285 // do not alter the value, or at least not the nullness property of the
2286 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2287 //
2288 // Note that we have to take special care to avoid looking through
2289 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2290 // as casts that can alter the value, e.g., AddrSpaceCasts.
2291 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2292 return isGEPKnownNonNull(GEP, Depth, Q);
2293
2294 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2295 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2296
2297 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2298 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2299 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2300 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2301 }
2302
2303 // Similar to int2ptr above, we can look through ptr2int here if the cast
2304 // is a no-op or an extend and not a truncate.
2305 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2306 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2307 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2308 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2309
2310 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2311
2312 // X | Y != 0 if X != 0 or Y != 0.
2313 Value *X = nullptr, *Y = nullptr;
2314 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2315 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2316 isKnownNonZero(Y, DemandedElts, Depth, Q);
2317
2318 // ext X != 0 if X != 0.
2319 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2320 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2321
2322 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2323 // if the lowest bit is shifted off the end.
2324 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2325 // shl nuw can't remove any non-zero bits.
2326 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2327 if (Q.IIQ.hasNoUnsignedWrap(BO))
2328 return isKnownNonZero(X, Depth, Q);
2329
2330 KnownBits Known(BitWidth);
2331 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2332 if (Known.One[0])
2333 return true;
2334 }
2335 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2336 // defined if the sign bit is shifted off the end.
2337 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2338 // shr exact can only shift out zero bits.
2339 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2340 if (BO->isExact())
2341 return isKnownNonZero(X, Depth, Q);
2342
2343 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2344 if (Known.isNegative())
2345 return true;
2346
2347 // If the shifter operand is a constant, and all of the bits shifted
2348 // out are known to be zero, and X is known non-zero then at least one
2349 // non-zero bit must remain.
2350 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2351 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2352 // Is there a known one in the portion not shifted out?
2353 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2354 return true;
2355 // Are all the bits to be shifted out known zero?
2356 if (Known.countMinTrailingZeros() >= ShiftVal)
2357 return isKnownNonZero(X, DemandedElts, Depth, Q);
2358 }
2359 }
2360 // div exact can only produce a zero if the dividend is zero.
2361 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2362 return isKnownNonZero(X, DemandedElts, Depth, Q);
2363 }
2364 // X + Y.
2365 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2366 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2367 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2368
2369 // If X and Y are both non-negative (as signed values) then their sum is not
2370 // zero unless both X and Y are zero.
2371 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2372 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2373 isKnownNonZero(Y, DemandedElts, Depth, Q))
2374 return true;
2375
2376 // If X and Y are both negative (as signed values) then their sum is not
2377 // zero unless both X and Y equal INT_MIN.
2378 if (XKnown.isNegative() && YKnown.isNegative()) {
2379 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2380 // The sign bit of X is set. If some other bit is set then X is not equal
2381 // to INT_MIN.
2382 if (XKnown.One.intersects(Mask))
2383 return true;
2384 // The sign bit of Y is set. If some other bit is set then Y is not equal
2385 // to INT_MIN.
2386 if (YKnown.One.intersects(Mask))
2387 return true;
2388 }
2389
2390 // The sum of a non-negative number and a power of two is not zero.
2391 if (XKnown.isNonNegative() &&
2392 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2393 return true;
2394 if (YKnown.isNonNegative() &&
2395 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2396 return true;
2397 }
2398 // X * Y.
2399 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2400 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2401 // If X and Y are non-zero then so is X * Y as long as the multiplication
2402 // does not overflow.
2403 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2404 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2405 isKnownNonZero(Y, DemandedElts, Depth, Q))
2406 return true;
2407 }
2408 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2409 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2410 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2411 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2412 return true;
2413 }
2414 // PHI
2415 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2416 // Try and detect a recurrence that monotonically increases from a
2417 // starting value, as these are common as induction variables.
2418 if (PN->getNumIncomingValues() == 2) {
2419 Value *Start = PN->getIncomingValue(0);
2420 Value *Induction = PN->getIncomingValue(1);
2421 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2422 std::swap(Start, Induction);
2423 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2424 if (!C->isZero() && !C->isNegative()) {
2425 ConstantInt *X;
2426 if (Q.IIQ.UseInstrInfo &&
2427 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2428 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2429 !X->isNegative())
2430 return true;
2431 }
2432 }
2433 }
2434 // Check if all incoming values are non-zero using recursion.
2435 Query RecQ = Q;
2436 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2437 return llvm::all_of(PN->operands(), [&](const Use &U) {
2438 if (U.get() == PN)
2439 return true;
2440 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2441 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2442 });
2443 }
2444 // ExtractElement
2445 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2446 const Value *Vec = EEI->getVectorOperand();
2447 const Value *Idx = EEI->getIndexOperand();
2448 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2449 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2450 unsigned NumElts = VecTy->getNumElements();
2451 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2452 if (CIdx && CIdx->getValue().ult(NumElts))
2453 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2454 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2455 }
2456 }
2457 // Freeze
2458 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2459 auto *Op = FI->getOperand(0);
2460 if (isKnownNonZero(Op, Depth, Q) &&
2461 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2462 return true;
2463 }
2464
2465 KnownBits Known(BitWidth);
2466 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2467 return Known.One != 0;
2468 }
2469
isKnownNonZero(const Value * V,unsigned Depth,const Query & Q)2470 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2471 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2472 // vector
2473 if (isa<ScalableVectorType>(V->getType()))
2474 return false;
2475
2476 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2477 APInt DemandedElts =
2478 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2479 return isKnownNonZero(V, DemandedElts, Depth, Q);
2480 }
2481
2482 /// Return true if V2 == V1 + X, where X is known non-zero.
isAddOfNonZero(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2483 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2484 const Query &Q) {
2485 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2486 if (!BO || BO->getOpcode() != Instruction::Add)
2487 return false;
2488 Value *Op = nullptr;
2489 if (V2 == BO->getOperand(0))
2490 Op = BO->getOperand(1);
2491 else if (V2 == BO->getOperand(1))
2492 Op = BO->getOperand(0);
2493 else
2494 return false;
2495 return isKnownNonZero(Op, Depth + 1, Q);
2496 }
2497
2498
2499 /// Return true if it is known that V1 != V2.
isKnownNonEqual(const Value * V1,const Value * V2,unsigned Depth,const Query & Q)2500 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2501 const Query &Q) {
2502 if (V1 == V2)
2503 return false;
2504 if (V1->getType() != V2->getType())
2505 // We can't look through casts yet.
2506 return false;
2507
2508 if (Depth >= MaxAnalysisRecursionDepth)
2509 return false;
2510
2511 // See if we can recurse through (exactly one of) our operands. This
2512 // requires our operation be 1-to-1 and map every input value to exactly
2513 // one output value. Such an operation is invertible.
2514 auto *O1 = dyn_cast<Operator>(V1);
2515 auto *O2 = dyn_cast<Operator>(V2);
2516 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2517 switch (O1->getOpcode()) {
2518 default: break;
2519 case Instruction::Add:
2520 case Instruction::Sub:
2521 // Assume operand order has been canonicalized
2522 if (O1->getOperand(0) == O2->getOperand(0))
2523 return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1),
2524 Depth + 1, Q);
2525 if (O1->getOperand(1) == O2->getOperand(1))
2526 return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2527 Depth + 1, Q);
2528 break;
2529 case Instruction::Mul: {
2530 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2531 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2532 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2533 auto *OBO1 = cast<OverflowingBinaryOperator>(O1);
2534 auto *OBO2 = cast<OverflowingBinaryOperator>(O2);
2535 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2536 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2537 break;
2538
2539 // Assume operand order has been canonicalized
2540 if (O1->getOperand(1) == O2->getOperand(1) &&
2541 isa<ConstantInt>(O1->getOperand(1)) &&
2542 !cast<ConstantInt>(O1->getOperand(1))->isZero())
2543 return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2544 Depth + 1, Q);
2545 break;
2546 }
2547 case Instruction::SExt:
2548 case Instruction::ZExt:
2549 if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
2550 return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2551 Depth + 1, Q);
2552 break;
2553 };
2554 }
2555
2556 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2557 return true;
2558
2559 if (V1->getType()->isIntOrIntVectorTy()) {
2560 // Are any known bits in V1 contradictory to known bits in V2? If V1
2561 // has a known zero where V2 has a known one, they must not be equal.
2562 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2563 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2564
2565 if (Known1.Zero.intersects(Known2.One) ||
2566 Known2.Zero.intersects(Known1.One))
2567 return true;
2568 }
2569 return false;
2570 }
2571
2572 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2573 /// simplify operations downstream. Mask is known to be zero for bits that V
2574 /// cannot have.
2575 ///
2576 /// This function is defined on values with integer type, values with pointer
2577 /// type, and vectors of integers. In the case
2578 /// where V is a vector, the mask, known zero, and known one values are the
2579 /// same width as the vector element, and the bit is set only if it is true
2580 /// for all of the elements in the vector.
MaskedValueIsZero(const Value * V,const APInt & Mask,unsigned Depth,const Query & Q)2581 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2582 const Query &Q) {
2583 KnownBits Known(Mask.getBitWidth());
2584 computeKnownBits(V, Known, Depth, Q);
2585 return Mask.isSubsetOf(Known.Zero);
2586 }
2587
2588 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2589 // Returns the input and lower/upper bounds.
isSignedMinMaxClamp(const Value * Select,const Value * & In,const APInt * & CLow,const APInt * & CHigh)2590 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2591 const APInt *&CLow, const APInt *&CHigh) {
2592 assert(isa<Operator>(Select) &&
2593 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2594 "Input should be a Select!");
2595
2596 const Value *LHS = nullptr, *RHS = nullptr;
2597 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2598 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2599 return false;
2600
2601 if (!match(RHS, m_APInt(CLow)))
2602 return false;
2603
2604 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2605 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2606 if (getInverseMinMaxFlavor(SPF) != SPF2)
2607 return false;
2608
2609 if (!match(RHS2, m_APInt(CHigh)))
2610 return false;
2611
2612 if (SPF == SPF_SMIN)
2613 std::swap(CLow, CHigh);
2614
2615 In = LHS2;
2616 return CLow->sle(*CHigh);
2617 }
2618
2619 /// For vector constants, loop over the elements and find the constant with the
2620 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2621 /// or if any element was not analyzed; otherwise, return the count for the
2622 /// element with the minimum number of sign bits.
computeNumSignBitsVectorConstant(const Value * V,const APInt & DemandedElts,unsigned TyBits)2623 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2624 const APInt &DemandedElts,
2625 unsigned TyBits) {
2626 const auto *CV = dyn_cast<Constant>(V);
2627 if (!CV || !isa<FixedVectorType>(CV->getType()))
2628 return 0;
2629
2630 unsigned MinSignBits = TyBits;
2631 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2632 for (unsigned i = 0; i != NumElts; ++i) {
2633 if (!DemandedElts[i])
2634 continue;
2635 // If we find a non-ConstantInt, bail out.
2636 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2637 if (!Elt)
2638 return 0;
2639
2640 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2641 }
2642
2643 return MinSignBits;
2644 }
2645
2646 static unsigned ComputeNumSignBitsImpl(const Value *V,
2647 const APInt &DemandedElts,
2648 unsigned Depth, const Query &Q);
2649
ComputeNumSignBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2650 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2651 unsigned Depth, const Query &Q) {
2652 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2653 assert(Result > 0 && "At least one sign bit needs to be present!");
2654 return Result;
2655 }
2656
2657 /// Return the number of times the sign bit of the register is replicated into
2658 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2659 /// (itself), but other cases can give us information. For example, immediately
2660 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2661 /// other, so we return 3. For vectors, return the number of sign bits for the
2662 /// vector element with the minimum number of known sign bits of the demanded
2663 /// elements in the vector specified by DemandedElts.
ComputeNumSignBitsImpl(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2664 static unsigned ComputeNumSignBitsImpl(const Value *V,
2665 const APInt &DemandedElts,
2666 unsigned Depth, const Query &Q) {
2667 Type *Ty = V->getType();
2668
2669 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2670 // vector
2671 if (isa<ScalableVectorType>(Ty))
2672 return 1;
2673
2674 #ifndef NDEBUG
2675 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2676
2677 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2678 assert(
2679 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2680 "DemandedElt width should equal the fixed vector number of elements");
2681 } else {
2682 assert(DemandedElts == APInt(1, 1) &&
2683 "DemandedElt width should be 1 for scalars");
2684 }
2685 #endif
2686
2687 // We return the minimum number of sign bits that are guaranteed to be present
2688 // in V, so for undef we have to conservatively return 1. We don't have the
2689 // same behavior for poison though -- that's a FIXME today.
2690
2691 Type *ScalarTy = Ty->getScalarType();
2692 unsigned TyBits = ScalarTy->isPointerTy() ?
2693 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2694 Q.DL.getTypeSizeInBits(ScalarTy);
2695
2696 unsigned Tmp, Tmp2;
2697 unsigned FirstAnswer = 1;
2698
2699 // Note that ConstantInt is handled by the general computeKnownBits case
2700 // below.
2701
2702 if (Depth == MaxAnalysisRecursionDepth)
2703 return 1;
2704
2705 if (auto *U = dyn_cast<Operator>(V)) {
2706 switch (Operator::getOpcode(V)) {
2707 default: break;
2708 case Instruction::SExt:
2709 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2710 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2711
2712 case Instruction::SDiv: {
2713 const APInt *Denominator;
2714 // sdiv X, C -> adds log(C) sign bits.
2715 if (match(U->getOperand(1), m_APInt(Denominator))) {
2716
2717 // Ignore non-positive denominator.
2718 if (!Denominator->isStrictlyPositive())
2719 break;
2720
2721 // Calculate the incoming numerator bits.
2722 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2723
2724 // Add floor(log(C)) bits to the numerator bits.
2725 return std::min(TyBits, NumBits + Denominator->logBase2());
2726 }
2727 break;
2728 }
2729
2730 case Instruction::SRem: {
2731 const APInt *Denominator;
2732 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2733 // positive constant. This let us put a lower bound on the number of sign
2734 // bits.
2735 if (match(U->getOperand(1), m_APInt(Denominator))) {
2736
2737 // Ignore non-positive denominator.
2738 if (!Denominator->isStrictlyPositive())
2739 break;
2740
2741 // Calculate the incoming numerator bits. SRem by a positive constant
2742 // can't lower the number of sign bits.
2743 unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2744
2745 // Calculate the leading sign bit constraints by examining the
2746 // denominator. Given that the denominator is positive, there are two
2747 // cases:
2748 //
2749 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2750 // (1 << ceilLogBase2(C)).
2751 //
2752 // 2. the numerator is negative. Then the result range is (-C,0] and
2753 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2754 //
2755 // Thus a lower bound on the number of sign bits is `TyBits -
2756 // ceilLogBase2(C)`.
2757
2758 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2759 return std::max(NumrBits, ResBits);
2760 }
2761 break;
2762 }
2763
2764 case Instruction::AShr: {
2765 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2766 // ashr X, C -> adds C sign bits. Vectors too.
2767 const APInt *ShAmt;
2768 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2769 if (ShAmt->uge(TyBits))
2770 break; // Bad shift.
2771 unsigned ShAmtLimited = ShAmt->getZExtValue();
2772 Tmp += ShAmtLimited;
2773 if (Tmp > TyBits) Tmp = TyBits;
2774 }
2775 return Tmp;
2776 }
2777 case Instruction::Shl: {
2778 const APInt *ShAmt;
2779 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2780 // shl destroys sign bits.
2781 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2782 if (ShAmt->uge(TyBits) || // Bad shift.
2783 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2784 Tmp2 = ShAmt->getZExtValue();
2785 return Tmp - Tmp2;
2786 }
2787 break;
2788 }
2789 case Instruction::And:
2790 case Instruction::Or:
2791 case Instruction::Xor: // NOT is handled here.
2792 // Logical binary ops preserve the number of sign bits at the worst.
2793 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2794 if (Tmp != 1) {
2795 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2796 FirstAnswer = std::min(Tmp, Tmp2);
2797 // We computed what we know about the sign bits as our first
2798 // answer. Now proceed to the generic code that uses
2799 // computeKnownBits, and pick whichever answer is better.
2800 }
2801 break;
2802
2803 case Instruction::Select: {
2804 // If we have a clamp pattern, we know that the number of sign bits will
2805 // be the minimum of the clamp min/max range.
2806 const Value *X;
2807 const APInt *CLow, *CHigh;
2808 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2809 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2810
2811 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2812 if (Tmp == 1) break;
2813 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2814 return std::min(Tmp, Tmp2);
2815 }
2816
2817 case Instruction::Add:
2818 // Add can have at most one carry bit. Thus we know that the output
2819 // is, at worst, one more bit than the inputs.
2820 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2821 if (Tmp == 1) break;
2822
2823 // Special case decrementing a value (ADD X, -1):
2824 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2825 if (CRHS->isAllOnesValue()) {
2826 KnownBits Known(TyBits);
2827 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2828
2829 // If the input is known to be 0 or 1, the output is 0/-1, which is
2830 // all sign bits set.
2831 if ((Known.Zero | 1).isAllOnesValue())
2832 return TyBits;
2833
2834 // If we are subtracting one from a positive number, there is no carry
2835 // out of the result.
2836 if (Known.isNonNegative())
2837 return Tmp;
2838 }
2839
2840 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2841 if (Tmp2 == 1) break;
2842 return std::min(Tmp, Tmp2) - 1;
2843
2844 case Instruction::Sub:
2845 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2846 if (Tmp2 == 1) break;
2847
2848 // Handle NEG.
2849 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2850 if (CLHS->isNullValue()) {
2851 KnownBits Known(TyBits);
2852 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2853 // If the input is known to be 0 or 1, the output is 0/-1, which is
2854 // all sign bits set.
2855 if ((Known.Zero | 1).isAllOnesValue())
2856 return TyBits;
2857
2858 // If the input is known to be positive (the sign bit is known clear),
2859 // the output of the NEG has the same number of sign bits as the
2860 // input.
2861 if (Known.isNonNegative())
2862 return Tmp2;
2863
2864 // Otherwise, we treat this like a SUB.
2865 }
2866
2867 // Sub can have at most one carry bit. Thus we know that the output
2868 // is, at worst, one more bit than the inputs.
2869 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2870 if (Tmp == 1) break;
2871 return std::min(Tmp, Tmp2) - 1;
2872
2873 case Instruction::Mul: {
2874 // The output of the Mul can be at most twice the valid bits in the
2875 // inputs.
2876 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2877 if (SignBitsOp0 == 1) break;
2878 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2879 if (SignBitsOp1 == 1) break;
2880 unsigned OutValidBits =
2881 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2882 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2883 }
2884
2885 case Instruction::PHI: {
2886 const PHINode *PN = cast<PHINode>(U);
2887 unsigned NumIncomingValues = PN->getNumIncomingValues();
2888 // Don't analyze large in-degree PHIs.
2889 if (NumIncomingValues > 4) break;
2890 // Unreachable blocks may have zero-operand PHI nodes.
2891 if (NumIncomingValues == 0) break;
2892
2893 // Take the minimum of all incoming values. This can't infinitely loop
2894 // because of our depth threshold.
2895 Query RecQ = Q;
2896 Tmp = TyBits;
2897 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
2898 if (Tmp == 1) return Tmp;
2899 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
2900 Tmp = std::min(
2901 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
2902 }
2903 return Tmp;
2904 }
2905
2906 case Instruction::Trunc:
2907 // FIXME: it's tricky to do anything useful for this, but it is an
2908 // important case for targets like X86.
2909 break;
2910
2911 case Instruction::ExtractElement:
2912 // Look through extract element. At the moment we keep this simple and
2913 // skip tracking the specific element. But at least we might find
2914 // information valid for all elements of the vector (for example if vector
2915 // is sign extended, shifted, etc).
2916 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2917
2918 case Instruction::ShuffleVector: {
2919 // Collect the minimum number of sign bits that are shared by every vector
2920 // element referenced by the shuffle.
2921 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2922 if (!Shuf) {
2923 // FIXME: Add support for shufflevector constant expressions.
2924 return 1;
2925 }
2926 APInt DemandedLHS, DemandedRHS;
2927 // For undef elements, we don't know anything about the common state of
2928 // the shuffle result.
2929 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
2930 return 1;
2931 Tmp = std::numeric_limits<unsigned>::max();
2932 if (!!DemandedLHS) {
2933 const Value *LHS = Shuf->getOperand(0);
2934 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
2935 }
2936 // If we don't know anything, early out and try computeKnownBits
2937 // fall-back.
2938 if (Tmp == 1)
2939 break;
2940 if (!!DemandedRHS) {
2941 const Value *RHS = Shuf->getOperand(1);
2942 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
2943 Tmp = std::min(Tmp, Tmp2);
2944 }
2945 // If we don't know anything, early out and try computeKnownBits
2946 // fall-back.
2947 if (Tmp == 1)
2948 break;
2949 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
2950 return Tmp;
2951 }
2952 case Instruction::Call: {
2953 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
2954 switch (II->getIntrinsicID()) {
2955 default: break;
2956 case Intrinsic::abs:
2957 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2958 if (Tmp == 1) break;
2959
2960 // Absolute value reduces number of sign bits by at most 1.
2961 return Tmp - 1;
2962 }
2963 }
2964 }
2965 }
2966 }
2967
2968 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2969 // use this information.
2970
2971 // If we can examine all elements of a vector constant successfully, we're
2972 // done (we can't do any better than that). If not, keep trying.
2973 if (unsigned VecSignBits =
2974 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
2975 return VecSignBits;
2976
2977 KnownBits Known(TyBits);
2978 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2979
2980 // If we know that the sign bit is either zero or one, determine the number of
2981 // identical bits in the top of the input value.
2982 return std::max(FirstAnswer, Known.countMinSignBits());
2983 }
2984
2985 /// This function computes the integer multiple of Base that equals V.
2986 /// If successful, it returns true and returns the multiple in
2987 /// Multiple. If unsuccessful, it returns false. It looks
2988 /// through SExt instructions only if LookThroughSExt is true.
ComputeMultiple(Value * V,unsigned Base,Value * & Multiple,bool LookThroughSExt,unsigned Depth)2989 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2990 bool LookThroughSExt, unsigned Depth) {
2991 assert(V && "No Value?");
2992 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2993 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2994
2995 Type *T = V->getType();
2996
2997 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2998
2999 if (Base == 0)
3000 return false;
3001
3002 if (Base == 1) {
3003 Multiple = V;
3004 return true;
3005 }
3006
3007 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3008 Constant *BaseVal = ConstantInt::get(T, Base);
3009 if (CO && CO == BaseVal) {
3010 // Multiple is 1.
3011 Multiple = ConstantInt::get(T, 1);
3012 return true;
3013 }
3014
3015 if (CI && CI->getZExtValue() % Base == 0) {
3016 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3017 return true;
3018 }
3019
3020 if (Depth == MaxAnalysisRecursionDepth) return false;
3021
3022 Operator *I = dyn_cast<Operator>(V);
3023 if (!I) return false;
3024
3025 switch (I->getOpcode()) {
3026 default: break;
3027 case Instruction::SExt:
3028 if (!LookThroughSExt) return false;
3029 // otherwise fall through to ZExt
3030 LLVM_FALLTHROUGH;
3031 case Instruction::ZExt:
3032 return ComputeMultiple(I->getOperand(0), Base, Multiple,
3033 LookThroughSExt, Depth+1);
3034 case Instruction::Shl:
3035 case Instruction::Mul: {
3036 Value *Op0 = I->getOperand(0);
3037 Value *Op1 = I->getOperand(1);
3038
3039 if (I->getOpcode() == Instruction::Shl) {
3040 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3041 if (!Op1CI) return false;
3042 // Turn Op0 << Op1 into Op0 * 2^Op1
3043 APInt Op1Int = Op1CI->getValue();
3044 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3045 APInt API(Op1Int.getBitWidth(), 0);
3046 API.setBit(BitToSet);
3047 Op1 = ConstantInt::get(V->getContext(), API);
3048 }
3049
3050 Value *Mul0 = nullptr;
3051 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3052 if (Constant *Op1C = dyn_cast<Constant>(Op1))
3053 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3054 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3055 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3056 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3057 if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3058 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3059 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3060
3061 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3062 Multiple = ConstantExpr::getMul(MulC, Op1C);
3063 return true;
3064 }
3065
3066 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3067 if (Mul0CI->getValue() == 1) {
3068 // V == Base * Op1, so return Op1
3069 Multiple = Op1;
3070 return true;
3071 }
3072 }
3073
3074 Value *Mul1 = nullptr;
3075 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3076 if (Constant *Op0C = dyn_cast<Constant>(Op0))
3077 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3078 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3079 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3080 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3081 if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3082 MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3083 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3084
3085 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3086 Multiple = ConstantExpr::getMul(MulC, Op0C);
3087 return true;
3088 }
3089
3090 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3091 if (Mul1CI->getValue() == 1) {
3092 // V == Base * Op0, so return Op0
3093 Multiple = Op0;
3094 return true;
3095 }
3096 }
3097 }
3098 }
3099
3100 // We could not determine if V is a multiple of Base.
3101 return false;
3102 }
3103
getIntrinsicForCallSite(const CallBase & CB,const TargetLibraryInfo * TLI)3104 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3105 const TargetLibraryInfo *TLI) {
3106 const Function *F = CB.getCalledFunction();
3107 if (!F)
3108 return Intrinsic::not_intrinsic;
3109
3110 if (F->isIntrinsic())
3111 return F->getIntrinsicID();
3112
3113 // We are going to infer semantics of a library function based on mapping it
3114 // to an LLVM intrinsic. Check that the library function is available from
3115 // this callbase and in this environment.
3116 LibFunc Func;
3117 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3118 !CB.onlyReadsMemory())
3119 return Intrinsic::not_intrinsic;
3120
3121 switch (Func) {
3122 default:
3123 break;
3124 case LibFunc_sin:
3125 case LibFunc_sinf:
3126 case LibFunc_sinl:
3127 return Intrinsic::sin;
3128 case LibFunc_cos:
3129 case LibFunc_cosf:
3130 case LibFunc_cosl:
3131 return Intrinsic::cos;
3132 case LibFunc_exp:
3133 case LibFunc_expf:
3134 case LibFunc_expl:
3135 return Intrinsic::exp;
3136 case LibFunc_exp2:
3137 case LibFunc_exp2f:
3138 case LibFunc_exp2l:
3139 return Intrinsic::exp2;
3140 case LibFunc_log:
3141 case LibFunc_logf:
3142 case LibFunc_logl:
3143 return Intrinsic::log;
3144 case LibFunc_log10:
3145 case LibFunc_log10f:
3146 case LibFunc_log10l:
3147 return Intrinsic::log10;
3148 case LibFunc_log2:
3149 case LibFunc_log2f:
3150 case LibFunc_log2l:
3151 return Intrinsic::log2;
3152 case LibFunc_fabs:
3153 case LibFunc_fabsf:
3154 case LibFunc_fabsl:
3155 return Intrinsic::fabs;
3156 case LibFunc_fmin:
3157 case LibFunc_fminf:
3158 case LibFunc_fminl:
3159 return Intrinsic::minnum;
3160 case LibFunc_fmax:
3161 case LibFunc_fmaxf:
3162 case LibFunc_fmaxl:
3163 return Intrinsic::maxnum;
3164 case LibFunc_copysign:
3165 case LibFunc_copysignf:
3166 case LibFunc_copysignl:
3167 return Intrinsic::copysign;
3168 case LibFunc_floor:
3169 case LibFunc_floorf:
3170 case LibFunc_floorl:
3171 return Intrinsic::floor;
3172 case LibFunc_ceil:
3173 case LibFunc_ceilf:
3174 case LibFunc_ceill:
3175 return Intrinsic::ceil;
3176 case LibFunc_trunc:
3177 case LibFunc_truncf:
3178 case LibFunc_truncl:
3179 return Intrinsic::trunc;
3180 case LibFunc_rint:
3181 case LibFunc_rintf:
3182 case LibFunc_rintl:
3183 return Intrinsic::rint;
3184 case LibFunc_nearbyint:
3185 case LibFunc_nearbyintf:
3186 case LibFunc_nearbyintl:
3187 return Intrinsic::nearbyint;
3188 case LibFunc_round:
3189 case LibFunc_roundf:
3190 case LibFunc_roundl:
3191 return Intrinsic::round;
3192 case LibFunc_roundeven:
3193 case LibFunc_roundevenf:
3194 case LibFunc_roundevenl:
3195 return Intrinsic::roundeven;
3196 case LibFunc_pow:
3197 case LibFunc_powf:
3198 case LibFunc_powl:
3199 return Intrinsic::pow;
3200 case LibFunc_sqrt:
3201 case LibFunc_sqrtf:
3202 case LibFunc_sqrtl:
3203 return Intrinsic::sqrt;
3204 }
3205
3206 return Intrinsic::not_intrinsic;
3207 }
3208
3209 /// Return true if we can prove that the specified FP value is never equal to
3210 /// -0.0.
3211 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3212 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3213 /// the same as +0.0 in floating-point ops.
3214 ///
3215 /// NOTE: this function will need to be revisited when we support non-default
3216 /// rounding modes!
CannotBeNegativeZero(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3217 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3218 unsigned Depth) {
3219 if (auto *CFP = dyn_cast<ConstantFP>(V))
3220 return !CFP->getValueAPF().isNegZero();
3221
3222 if (Depth == MaxAnalysisRecursionDepth)
3223 return false;
3224
3225 auto *Op = dyn_cast<Operator>(V);
3226 if (!Op)
3227 return false;
3228
3229 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3230 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3231 return true;
3232
3233 // sitofp and uitofp turn into +0.0 for zero.
3234 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3235 return true;
3236
3237 if (auto *Call = dyn_cast<CallInst>(Op)) {
3238 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3239 switch (IID) {
3240 default:
3241 break;
3242 // sqrt(-0.0) = -0.0, no other negative results are possible.
3243 case Intrinsic::sqrt:
3244 case Intrinsic::canonicalize:
3245 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3246 // fabs(x) != -0.0
3247 case Intrinsic::fabs:
3248 return true;
3249 }
3250 }
3251
3252 return false;
3253 }
3254
3255 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3256 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3257 /// bit despite comparing equal.
cannotBeOrderedLessThanZeroImpl(const Value * V,const TargetLibraryInfo * TLI,bool SignBitOnly,unsigned Depth)3258 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3259 const TargetLibraryInfo *TLI,
3260 bool SignBitOnly,
3261 unsigned Depth) {
3262 // TODO: This function does not do the right thing when SignBitOnly is true
3263 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3264 // which flips the sign bits of NaNs. See
3265 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3266
3267 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3268 return !CFP->getValueAPF().isNegative() ||
3269 (!SignBitOnly && CFP->getValueAPF().isZero());
3270 }
3271
3272 // Handle vector of constants.
3273 if (auto *CV = dyn_cast<Constant>(V)) {
3274 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3275 unsigned NumElts = CVFVTy->getNumElements();
3276 for (unsigned i = 0; i != NumElts; ++i) {
3277 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3278 if (!CFP)
3279 return false;
3280 if (CFP->getValueAPF().isNegative() &&
3281 (SignBitOnly || !CFP->getValueAPF().isZero()))
3282 return false;
3283 }
3284
3285 // All non-negative ConstantFPs.
3286 return true;
3287 }
3288 }
3289
3290 if (Depth == MaxAnalysisRecursionDepth)
3291 return false;
3292
3293 const Operator *I = dyn_cast<Operator>(V);
3294 if (!I)
3295 return false;
3296
3297 switch (I->getOpcode()) {
3298 default:
3299 break;
3300 // Unsigned integers are always nonnegative.
3301 case Instruction::UIToFP:
3302 return true;
3303 case Instruction::FMul:
3304 case Instruction::FDiv:
3305 // X * X is always non-negative or a NaN.
3306 // X / X is always exactly 1.0 or a NaN.
3307 if (I->getOperand(0) == I->getOperand(1) &&
3308 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3309 return true;
3310
3311 LLVM_FALLTHROUGH;
3312 case Instruction::FAdd:
3313 case Instruction::FRem:
3314 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3315 Depth + 1) &&
3316 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3317 Depth + 1);
3318 case Instruction::Select:
3319 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3320 Depth + 1) &&
3321 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3322 Depth + 1);
3323 case Instruction::FPExt:
3324 case Instruction::FPTrunc:
3325 // Widening/narrowing never change sign.
3326 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3327 Depth + 1);
3328 case Instruction::ExtractElement:
3329 // Look through extract element. At the moment we keep this simple and skip
3330 // tracking the specific element. But at least we might find information
3331 // valid for all elements of the vector.
3332 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3333 Depth + 1);
3334 case Instruction::Call:
3335 const auto *CI = cast<CallInst>(I);
3336 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3337 switch (IID) {
3338 default:
3339 break;
3340 case Intrinsic::maxnum: {
3341 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3342 auto isPositiveNum = [&](Value *V) {
3343 if (SignBitOnly) {
3344 // With SignBitOnly, this is tricky because the result of
3345 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3346 // a constant strictly greater than 0.0.
3347 const APFloat *C;
3348 return match(V, m_APFloat(C)) &&
3349 *C > APFloat::getZero(C->getSemantics());
3350 }
3351
3352 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3353 // maxnum can't be ordered-less-than-zero.
3354 return isKnownNeverNaN(V, TLI) &&
3355 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3356 };
3357
3358 // TODO: This could be improved. We could also check that neither operand
3359 // has its sign bit set (and at least 1 is not-NAN?).
3360 return isPositiveNum(V0) || isPositiveNum(V1);
3361 }
3362
3363 case Intrinsic::maximum:
3364 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3365 Depth + 1) ||
3366 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3367 Depth + 1);
3368 case Intrinsic::minnum:
3369 case Intrinsic::minimum:
3370 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3371 Depth + 1) &&
3372 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3373 Depth + 1);
3374 case Intrinsic::exp:
3375 case Intrinsic::exp2:
3376 case Intrinsic::fabs:
3377 return true;
3378
3379 case Intrinsic::sqrt:
3380 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3381 if (!SignBitOnly)
3382 return true;
3383 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3384 CannotBeNegativeZero(CI->getOperand(0), TLI));
3385
3386 case Intrinsic::powi:
3387 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3388 // powi(x,n) is non-negative if n is even.
3389 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3390 return true;
3391 }
3392 // TODO: This is not correct. Given that exp is an integer, here are the
3393 // ways that pow can return a negative value:
3394 //
3395 // pow(x, exp) --> negative if exp is odd and x is negative.
3396 // pow(-0, exp) --> -inf if exp is negative odd.
3397 // pow(-0, exp) --> -0 if exp is positive odd.
3398 // pow(-inf, exp) --> -0 if exp is negative odd.
3399 // pow(-inf, exp) --> -inf if exp is positive odd.
3400 //
3401 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3402 // but we must return false if x == -0. Unfortunately we do not currently
3403 // have a way of expressing this constraint. See details in
3404 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3405 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3406 Depth + 1);
3407
3408 case Intrinsic::fma:
3409 case Intrinsic::fmuladd:
3410 // x*x+y is non-negative if y is non-negative.
3411 return I->getOperand(0) == I->getOperand(1) &&
3412 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3413 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3414 Depth + 1);
3415 }
3416 break;
3417 }
3418 return false;
3419 }
3420
CannotBeOrderedLessThanZero(const Value * V,const TargetLibraryInfo * TLI)3421 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3422 const TargetLibraryInfo *TLI) {
3423 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3424 }
3425
SignBitMustBeZero(const Value * V,const TargetLibraryInfo * TLI)3426 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3427 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3428 }
3429
isKnownNeverInfinity(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3430 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3431 unsigned Depth) {
3432 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3433
3434 // If we're told that infinities won't happen, assume they won't.
3435 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3436 if (FPMathOp->hasNoInfs())
3437 return true;
3438
3439 // Handle scalar constants.
3440 if (auto *CFP = dyn_cast<ConstantFP>(V))
3441 return !CFP->isInfinity();
3442
3443 if (Depth == MaxAnalysisRecursionDepth)
3444 return false;
3445
3446 if (auto *Inst = dyn_cast<Instruction>(V)) {
3447 switch (Inst->getOpcode()) {
3448 case Instruction::Select: {
3449 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3450 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3451 }
3452 case Instruction::SIToFP:
3453 case Instruction::UIToFP: {
3454 // Get width of largest magnitude integer (remove a bit if signed).
3455 // This still works for a signed minimum value because the largest FP
3456 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3457 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3458 if (Inst->getOpcode() == Instruction::SIToFP)
3459 --IntSize;
3460
3461 // If the exponent of the largest finite FP value can hold the largest
3462 // integer, the result of the cast must be finite.
3463 Type *FPTy = Inst->getType()->getScalarType();
3464 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3465 }
3466 default:
3467 break;
3468 }
3469 }
3470
3471 // try to handle fixed width vector constants
3472 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3473 if (VFVTy && isa<Constant>(V)) {
3474 // For vectors, verify that each element is not infinity.
3475 unsigned NumElts = VFVTy->getNumElements();
3476 for (unsigned i = 0; i != NumElts; ++i) {
3477 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3478 if (!Elt)
3479 return false;
3480 if (isa<UndefValue>(Elt))
3481 continue;
3482 auto *CElt = dyn_cast<ConstantFP>(Elt);
3483 if (!CElt || CElt->isInfinity())
3484 return false;
3485 }
3486 // All elements were confirmed non-infinity or undefined.
3487 return true;
3488 }
3489
3490 // was not able to prove that V never contains infinity
3491 return false;
3492 }
3493
isKnownNeverNaN(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3494 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3495 unsigned Depth) {
3496 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3497
3498 // If we're told that NaNs won't happen, assume they won't.
3499 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3500 if (FPMathOp->hasNoNaNs())
3501 return true;
3502
3503 // Handle scalar constants.
3504 if (auto *CFP = dyn_cast<ConstantFP>(V))
3505 return !CFP->isNaN();
3506
3507 if (Depth == MaxAnalysisRecursionDepth)
3508 return false;
3509
3510 if (auto *Inst = dyn_cast<Instruction>(V)) {
3511 switch (Inst->getOpcode()) {
3512 case Instruction::FAdd:
3513 case Instruction::FSub:
3514 // Adding positive and negative infinity produces NaN.
3515 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3516 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3517 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3518 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3519
3520 case Instruction::FMul:
3521 // Zero multiplied with infinity produces NaN.
3522 // FIXME: If neither side can be zero fmul never produces NaN.
3523 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3524 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3525 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3526 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3527
3528 case Instruction::FDiv:
3529 case Instruction::FRem:
3530 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3531 return false;
3532
3533 case Instruction::Select: {
3534 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3535 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3536 }
3537 case Instruction::SIToFP:
3538 case Instruction::UIToFP:
3539 return true;
3540 case Instruction::FPTrunc:
3541 case Instruction::FPExt:
3542 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3543 default:
3544 break;
3545 }
3546 }
3547
3548 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3549 switch (II->getIntrinsicID()) {
3550 case Intrinsic::canonicalize:
3551 case Intrinsic::fabs:
3552 case Intrinsic::copysign:
3553 case Intrinsic::exp:
3554 case Intrinsic::exp2:
3555 case Intrinsic::floor:
3556 case Intrinsic::ceil:
3557 case Intrinsic::trunc:
3558 case Intrinsic::rint:
3559 case Intrinsic::nearbyint:
3560 case Intrinsic::round:
3561 case Intrinsic::roundeven:
3562 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3563 case Intrinsic::sqrt:
3564 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3565 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3566 case Intrinsic::minnum:
3567 case Intrinsic::maxnum:
3568 // If either operand is not NaN, the result is not NaN.
3569 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3570 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3571 default:
3572 return false;
3573 }
3574 }
3575
3576 // Try to handle fixed width vector constants
3577 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3578 if (VFVTy && isa<Constant>(V)) {
3579 // For vectors, verify that each element is not NaN.
3580 unsigned NumElts = VFVTy->getNumElements();
3581 for (unsigned i = 0; i != NumElts; ++i) {
3582 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3583 if (!Elt)
3584 return false;
3585 if (isa<UndefValue>(Elt))
3586 continue;
3587 auto *CElt = dyn_cast<ConstantFP>(Elt);
3588 if (!CElt || CElt->isNaN())
3589 return false;
3590 }
3591 // All elements were confirmed not-NaN or undefined.
3592 return true;
3593 }
3594
3595 // Was not able to prove that V never contains NaN
3596 return false;
3597 }
3598
isBytewiseValue(Value * V,const DataLayout & DL)3599 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3600
3601 // All byte-wide stores are splatable, even of arbitrary variables.
3602 if (V->getType()->isIntegerTy(8))
3603 return V;
3604
3605 LLVMContext &Ctx = V->getContext();
3606
3607 // Undef don't care.
3608 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3609 if (isa<UndefValue>(V))
3610 return UndefInt8;
3611
3612 // Return Undef for zero-sized type.
3613 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3614 return UndefInt8;
3615
3616 Constant *C = dyn_cast<Constant>(V);
3617 if (!C) {
3618 // Conceptually, we could handle things like:
3619 // %a = zext i8 %X to i16
3620 // %b = shl i16 %a, 8
3621 // %c = or i16 %a, %b
3622 // but until there is an example that actually needs this, it doesn't seem
3623 // worth worrying about.
3624 return nullptr;
3625 }
3626
3627 // Handle 'null' ConstantArrayZero etc.
3628 if (C->isNullValue())
3629 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3630
3631 // Constant floating-point values can be handled as integer values if the
3632 // corresponding integer value is "byteable". An important case is 0.0.
3633 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3634 Type *Ty = nullptr;
3635 if (CFP->getType()->isHalfTy())
3636 Ty = Type::getInt16Ty(Ctx);
3637 else if (CFP->getType()->isFloatTy())
3638 Ty = Type::getInt32Ty(Ctx);
3639 else if (CFP->getType()->isDoubleTy())
3640 Ty = Type::getInt64Ty(Ctx);
3641 // Don't handle long double formats, which have strange constraints.
3642 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3643 : nullptr;
3644 }
3645
3646 // We can handle constant integers that are multiple of 8 bits.
3647 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3648 if (CI->getBitWidth() % 8 == 0) {
3649 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3650 if (!CI->getValue().isSplat(8))
3651 return nullptr;
3652 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3653 }
3654 }
3655
3656 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3657 if (CE->getOpcode() == Instruction::IntToPtr) {
3658 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3659 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3660 return isBytewiseValue(
3661 ConstantExpr::getIntegerCast(CE->getOperand(0),
3662 Type::getIntNTy(Ctx, BitWidth), false),
3663 DL);
3664 }
3665 }
3666 }
3667
3668 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3669 if (LHS == RHS)
3670 return LHS;
3671 if (!LHS || !RHS)
3672 return nullptr;
3673 if (LHS == UndefInt8)
3674 return RHS;
3675 if (RHS == UndefInt8)
3676 return LHS;
3677 return nullptr;
3678 };
3679
3680 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3681 Value *Val = UndefInt8;
3682 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3683 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3684 return nullptr;
3685 return Val;
3686 }
3687
3688 if (isa<ConstantAggregate>(C)) {
3689 Value *Val = UndefInt8;
3690 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3691 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3692 return nullptr;
3693 return Val;
3694 }
3695
3696 // Don't try to handle the handful of other constants.
3697 return nullptr;
3698 }
3699
3700 // This is the recursive version of BuildSubAggregate. It takes a few different
3701 // arguments. Idxs is the index within the nested struct From that we are
3702 // looking at now (which is of type IndexedType). IdxSkip is the number of
3703 // indices from Idxs that should be left out when inserting into the resulting
3704 // struct. To is the result struct built so far, new insertvalue instructions
3705 // build on that.
BuildSubAggregate(Value * From,Value * To,Type * IndexedType,SmallVectorImpl<unsigned> & Idxs,unsigned IdxSkip,Instruction * InsertBefore)3706 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3707 SmallVectorImpl<unsigned> &Idxs,
3708 unsigned IdxSkip,
3709 Instruction *InsertBefore) {
3710 StructType *STy = dyn_cast<StructType>(IndexedType);
3711 if (STy) {
3712 // Save the original To argument so we can modify it
3713 Value *OrigTo = To;
3714 // General case, the type indexed by Idxs is a struct
3715 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3716 // Process each struct element recursively
3717 Idxs.push_back(i);
3718 Value *PrevTo = To;
3719 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3720 InsertBefore);
3721 Idxs.pop_back();
3722 if (!To) {
3723 // Couldn't find any inserted value for this index? Cleanup
3724 while (PrevTo != OrigTo) {
3725 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3726 PrevTo = Del->getAggregateOperand();
3727 Del->eraseFromParent();
3728 }
3729 // Stop processing elements
3730 break;
3731 }
3732 }
3733 // If we successfully found a value for each of our subaggregates
3734 if (To)
3735 return To;
3736 }
3737 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3738 // the struct's elements had a value that was inserted directly. In the latter
3739 // case, perhaps we can't determine each of the subelements individually, but
3740 // we might be able to find the complete struct somewhere.
3741
3742 // Find the value that is at that particular spot
3743 Value *V = FindInsertedValue(From, Idxs);
3744
3745 if (!V)
3746 return nullptr;
3747
3748 // Insert the value in the new (sub) aggregate
3749 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3750 "tmp", InsertBefore);
3751 }
3752
3753 // This helper takes a nested struct and extracts a part of it (which is again a
3754 // struct) into a new value. For example, given the struct:
3755 // { a, { b, { c, d }, e } }
3756 // and the indices "1, 1" this returns
3757 // { c, d }.
3758 //
3759 // It does this by inserting an insertvalue for each element in the resulting
3760 // struct, as opposed to just inserting a single struct. This will only work if
3761 // each of the elements of the substruct are known (ie, inserted into From by an
3762 // insertvalue instruction somewhere).
3763 //
3764 // All inserted insertvalue instructions are inserted before InsertBefore
BuildSubAggregate(Value * From,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)3765 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3766 Instruction *InsertBefore) {
3767 assert(InsertBefore && "Must have someplace to insert!");
3768 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3769 idx_range);
3770 Value *To = UndefValue::get(IndexedType);
3771 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3772 unsigned IdxSkip = Idxs.size();
3773
3774 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3775 }
3776
3777 /// Given an aggregate and a sequence of indices, see if the scalar value
3778 /// indexed is already around as a register, for example if it was inserted
3779 /// directly into the aggregate.
3780 ///
3781 /// If InsertBefore is not null, this function will duplicate (modified)
3782 /// insertvalues when a part of a nested struct is extracted.
FindInsertedValue(Value * V,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)3783 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3784 Instruction *InsertBefore) {
3785 // Nothing to index? Just return V then (this is useful at the end of our
3786 // recursion).
3787 if (idx_range.empty())
3788 return V;
3789 // We have indices, so V should have an indexable type.
3790 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3791 "Not looking at a struct or array?");
3792 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3793 "Invalid indices for type?");
3794
3795 if (Constant *C = dyn_cast<Constant>(V)) {
3796 C = C->getAggregateElement(idx_range[0]);
3797 if (!C) return nullptr;
3798 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3799 }
3800
3801 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3802 // Loop the indices for the insertvalue instruction in parallel with the
3803 // requested indices
3804 const unsigned *req_idx = idx_range.begin();
3805 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3806 i != e; ++i, ++req_idx) {
3807 if (req_idx == idx_range.end()) {
3808 // We can't handle this without inserting insertvalues
3809 if (!InsertBefore)
3810 return nullptr;
3811
3812 // The requested index identifies a part of a nested aggregate. Handle
3813 // this specially. For example,
3814 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3815 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3816 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3817 // This can be changed into
3818 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3819 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3820 // which allows the unused 0,0 element from the nested struct to be
3821 // removed.
3822 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3823 InsertBefore);
3824 }
3825
3826 // This insert value inserts something else than what we are looking for.
3827 // See if the (aggregate) value inserted into has the value we are
3828 // looking for, then.
3829 if (*req_idx != *i)
3830 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3831 InsertBefore);
3832 }
3833 // If we end up here, the indices of the insertvalue match with those
3834 // requested (though possibly only partially). Now we recursively look at
3835 // the inserted value, passing any remaining indices.
3836 return FindInsertedValue(I->getInsertedValueOperand(),
3837 makeArrayRef(req_idx, idx_range.end()),
3838 InsertBefore);
3839 }
3840
3841 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3842 // If we're extracting a value from an aggregate that was extracted from
3843 // something else, we can extract from that something else directly instead.
3844 // However, we will need to chain I's indices with the requested indices.
3845
3846 // Calculate the number of indices required
3847 unsigned size = I->getNumIndices() + idx_range.size();
3848 // Allocate some space to put the new indices in
3849 SmallVector<unsigned, 5> Idxs;
3850 Idxs.reserve(size);
3851 // Add indices from the extract value instruction
3852 Idxs.append(I->idx_begin(), I->idx_end());
3853
3854 // Add requested indices
3855 Idxs.append(idx_range.begin(), idx_range.end());
3856
3857 assert(Idxs.size() == size
3858 && "Number of indices added not correct?");
3859
3860 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3861 }
3862 // Otherwise, we don't know (such as, extracting from a function return value
3863 // or load instruction)
3864 return nullptr;
3865 }
3866
isGEPBasedOnPointerToString(const GEPOperator * GEP,unsigned CharSize)3867 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3868 unsigned CharSize) {
3869 // Make sure the GEP has exactly three arguments.
3870 if (GEP->getNumOperands() != 3)
3871 return false;
3872
3873 // Make sure the index-ee is a pointer to array of \p CharSize integers.
3874 // CharSize.
3875 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3876 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3877 return false;
3878
3879 // Check to make sure that the first operand of the GEP is an integer and
3880 // has value 0 so that we are sure we're indexing into the initializer.
3881 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3882 if (!FirstIdx || !FirstIdx->isZero())
3883 return false;
3884
3885 return true;
3886 }
3887
getConstantDataArrayInfo(const Value * V,ConstantDataArraySlice & Slice,unsigned ElementSize,uint64_t Offset)3888 bool llvm::getConstantDataArrayInfo(const Value *V,
3889 ConstantDataArraySlice &Slice,
3890 unsigned ElementSize, uint64_t Offset) {
3891 assert(V);
3892
3893 // Look through bitcast instructions and geps.
3894 V = V->stripPointerCasts();
3895
3896 // If the value is a GEP instruction or constant expression, treat it as an
3897 // offset.
3898 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3899 // The GEP operator should be based on a pointer to string constant, and is
3900 // indexing into the string constant.
3901 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3902 return false;
3903
3904 // If the second index isn't a ConstantInt, then this is a variable index
3905 // into the array. If this occurs, we can't say anything meaningful about
3906 // the string.
3907 uint64_t StartIdx = 0;
3908 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3909 StartIdx = CI->getZExtValue();
3910 else
3911 return false;
3912 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3913 StartIdx + Offset);
3914 }
3915
3916 // The GEP instruction, constant or instruction, must reference a global
3917 // variable that is a constant and is initialized. The referenced constant
3918 // initializer is the array that we'll use for optimization.
3919 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3920 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3921 return false;
3922
3923 const ConstantDataArray *Array;
3924 ArrayType *ArrayTy;
3925 if (GV->getInitializer()->isNullValue()) {
3926 Type *GVTy = GV->getValueType();
3927 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3928 // A zeroinitializer for the array; there is no ConstantDataArray.
3929 Array = nullptr;
3930 } else {
3931 const DataLayout &DL = GV->getParent()->getDataLayout();
3932 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
3933 uint64_t Length = SizeInBytes / (ElementSize / 8);
3934 if (Length <= Offset)
3935 return false;
3936
3937 Slice.Array = nullptr;
3938 Slice.Offset = 0;
3939 Slice.Length = Length - Offset;
3940 return true;
3941 }
3942 } else {
3943 // This must be a ConstantDataArray.
3944 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3945 if (!Array)
3946 return false;
3947 ArrayTy = Array->getType();
3948 }
3949 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3950 return false;
3951
3952 uint64_t NumElts = ArrayTy->getArrayNumElements();
3953 if (Offset > NumElts)
3954 return false;
3955
3956 Slice.Array = Array;
3957 Slice.Offset = Offset;
3958 Slice.Length = NumElts - Offset;
3959 return true;
3960 }
3961
3962 /// This function computes the length of a null-terminated C string pointed to
3963 /// by V. If successful, it returns true and returns the string in Str.
3964 /// If unsuccessful, it returns false.
getConstantStringInfo(const Value * V,StringRef & Str,uint64_t Offset,bool TrimAtNul)3965 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3966 uint64_t Offset, bool TrimAtNul) {
3967 ConstantDataArraySlice Slice;
3968 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3969 return false;
3970
3971 if (Slice.Array == nullptr) {
3972 if (TrimAtNul) {
3973 Str = StringRef();
3974 return true;
3975 }
3976 if (Slice.Length == 1) {
3977 Str = StringRef("", 1);
3978 return true;
3979 }
3980 // We cannot instantiate a StringRef as we do not have an appropriate string
3981 // of 0s at hand.
3982 return false;
3983 }
3984
3985 // Start out with the entire array in the StringRef.
3986 Str = Slice.Array->getAsString();
3987 // Skip over 'offset' bytes.
3988 Str = Str.substr(Slice.Offset);
3989
3990 if (TrimAtNul) {
3991 // Trim off the \0 and anything after it. If the array is not nul
3992 // terminated, we just return the whole end of string. The client may know
3993 // some other way that the string is length-bound.
3994 Str = Str.substr(0, Str.find('\0'));
3995 }
3996 return true;
3997 }
3998
3999 // These next two are very similar to the above, but also look through PHI
4000 // nodes.
4001 // TODO: See if we can integrate these two together.
4002
4003 /// If we can compute the length of the string pointed to by
4004 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLengthH(const Value * V,SmallPtrSetImpl<const PHINode * > & PHIs,unsigned CharSize)4005 static uint64_t GetStringLengthH(const Value *V,
4006 SmallPtrSetImpl<const PHINode*> &PHIs,
4007 unsigned CharSize) {
4008 // Look through noop bitcast instructions.
4009 V = V->stripPointerCasts();
4010
4011 // If this is a PHI node, there are two cases: either we have already seen it
4012 // or we haven't.
4013 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4014 if (!PHIs.insert(PN).second)
4015 return ~0ULL; // already in the set.
4016
4017 // If it was new, see if all the input strings are the same length.
4018 uint64_t LenSoFar = ~0ULL;
4019 for (Value *IncValue : PN->incoming_values()) {
4020 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4021 if (Len == 0) return 0; // Unknown length -> unknown.
4022
4023 if (Len == ~0ULL) continue;
4024
4025 if (Len != LenSoFar && LenSoFar != ~0ULL)
4026 return 0; // Disagree -> unknown.
4027 LenSoFar = Len;
4028 }
4029
4030 // Success, all agree.
4031 return LenSoFar;
4032 }
4033
4034 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4035 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4036 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4037 if (Len1 == 0) return 0;
4038 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4039 if (Len2 == 0) return 0;
4040 if (Len1 == ~0ULL) return Len2;
4041 if (Len2 == ~0ULL) return Len1;
4042 if (Len1 != Len2) return 0;
4043 return Len1;
4044 }
4045
4046 // Otherwise, see if we can read the string.
4047 ConstantDataArraySlice Slice;
4048 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4049 return 0;
4050
4051 if (Slice.Array == nullptr)
4052 return 1;
4053
4054 // Search for nul characters
4055 unsigned NullIndex = 0;
4056 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4057 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4058 break;
4059 }
4060
4061 return NullIndex + 1;
4062 }
4063
4064 /// If we can compute the length of the string pointed to by
4065 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLength(const Value * V,unsigned CharSize)4066 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4067 if (!V->getType()->isPointerTy())
4068 return 0;
4069
4070 SmallPtrSet<const PHINode*, 32> PHIs;
4071 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4072 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4073 // an empty string as a length.
4074 return Len == ~0ULL ? 1 : Len;
4075 }
4076
4077 const Value *
getArgumentAliasingToReturnedPointer(const CallBase * Call,bool MustPreserveNullness)4078 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4079 bool MustPreserveNullness) {
4080 assert(Call &&
4081 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4082 if (const Value *RV = Call->getReturnedArgOperand())
4083 return RV;
4084 // This can be used only as a aliasing property.
4085 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4086 Call, MustPreserveNullness))
4087 return Call->getArgOperand(0);
4088 return nullptr;
4089 }
4090
isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase * Call,bool MustPreserveNullness)4091 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4092 const CallBase *Call, bool MustPreserveNullness) {
4093 switch (Call->getIntrinsicID()) {
4094 case Intrinsic::launder_invariant_group:
4095 case Intrinsic::strip_invariant_group:
4096 case Intrinsic::aarch64_irg:
4097 case Intrinsic::aarch64_tagp:
4098 return true;
4099 case Intrinsic::ptrmask:
4100 return !MustPreserveNullness;
4101 default:
4102 return false;
4103 }
4104 }
4105
4106 /// \p PN defines a loop-variant pointer to an object. Check if the
4107 /// previous iteration of the loop was referring to the same object as \p PN.
isSameUnderlyingObjectInLoop(const PHINode * PN,const LoopInfo * LI)4108 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4109 const LoopInfo *LI) {
4110 // Find the loop-defined value.
4111 Loop *L = LI->getLoopFor(PN->getParent());
4112 if (PN->getNumIncomingValues() != 2)
4113 return true;
4114
4115 // Find the value from previous iteration.
4116 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4117 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4118 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4119 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4120 return true;
4121
4122 // If a new pointer is loaded in the loop, the pointer references a different
4123 // object in every iteration. E.g.:
4124 // for (i)
4125 // int *p = a[i];
4126 // ...
4127 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4128 if (!L->isLoopInvariant(Load->getPointerOperand()))
4129 return false;
4130 return true;
4131 }
4132
getUnderlyingObject(Value * V,unsigned MaxLookup)4133 Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
4134 if (!V->getType()->isPointerTy())
4135 return V;
4136 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4137 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4138 V = GEP->getPointerOperand();
4139 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4140 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4141 V = cast<Operator>(V)->getOperand(0);
4142 if (!V->getType()->isPointerTy())
4143 return V;
4144 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
4145 if (GA->isInterposable())
4146 return V;
4147 V = GA->getAliasee();
4148 } else {
4149 if (auto *PHI = dyn_cast<PHINode>(V)) {
4150 // Look through single-arg phi nodes created by LCSSA.
4151 if (PHI->getNumIncomingValues() == 1) {
4152 V = PHI->getIncomingValue(0);
4153 continue;
4154 }
4155 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4156 // CaptureTracking can know about special capturing properties of some
4157 // intrinsics like launder.invariant.group, that can't be expressed with
4158 // the attributes, but have properties like returning aliasing pointer.
4159 // Because some analysis may assume that nocaptured pointer is not
4160 // returned from some special intrinsic (because function would have to
4161 // be marked with returns attribute), it is crucial to use this function
4162 // because it should be in sync with CaptureTracking. Not using it may
4163 // cause weird miscompilations where 2 aliasing pointers are assumed to
4164 // noalias.
4165 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4166 V = RP;
4167 continue;
4168 }
4169 }
4170
4171 return V;
4172 }
4173 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4174 }
4175 return V;
4176 }
4177
getUnderlyingObjects(const Value * V,SmallVectorImpl<const Value * > & Objects,LoopInfo * LI,unsigned MaxLookup)4178 void llvm::getUnderlyingObjects(const Value *V,
4179 SmallVectorImpl<const Value *> &Objects,
4180 LoopInfo *LI, unsigned MaxLookup) {
4181 SmallPtrSet<const Value *, 4> Visited;
4182 SmallVector<const Value *, 4> Worklist;
4183 Worklist.push_back(V);
4184 do {
4185 const Value *P = Worklist.pop_back_val();
4186 P = getUnderlyingObject(P, MaxLookup);
4187
4188 if (!Visited.insert(P).second)
4189 continue;
4190
4191 if (auto *SI = dyn_cast<SelectInst>(P)) {
4192 Worklist.push_back(SI->getTrueValue());
4193 Worklist.push_back(SI->getFalseValue());
4194 continue;
4195 }
4196
4197 if (auto *PN = dyn_cast<PHINode>(P)) {
4198 // If this PHI changes the underlying object in every iteration of the
4199 // loop, don't look through it. Consider:
4200 // int **A;
4201 // for (i) {
4202 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4203 // Curr = A[i];
4204 // *Prev, *Curr;
4205 //
4206 // Prev is tracking Curr one iteration behind so they refer to different
4207 // underlying objects.
4208 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4209 isSameUnderlyingObjectInLoop(PN, LI))
4210 append_range(Worklist, PN->incoming_values());
4211 continue;
4212 }
4213
4214 Objects.push_back(P);
4215 } while (!Worklist.empty());
4216 }
4217
4218 /// This is the function that does the work of looking through basic
4219 /// ptrtoint+arithmetic+inttoptr sequences.
getUnderlyingObjectFromInt(const Value * V)4220 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4221 do {
4222 if (const Operator *U = dyn_cast<Operator>(V)) {
4223 // If we find a ptrtoint, we can transfer control back to the
4224 // regular getUnderlyingObjectFromInt.
4225 if (U->getOpcode() == Instruction::PtrToInt)
4226 return U->getOperand(0);
4227 // If we find an add of a constant, a multiplied value, or a phi, it's
4228 // likely that the other operand will lead us to the base
4229 // object. We don't have to worry about the case where the
4230 // object address is somehow being computed by the multiply,
4231 // because our callers only care when the result is an
4232 // identifiable object.
4233 if (U->getOpcode() != Instruction::Add ||
4234 (!isa<ConstantInt>(U->getOperand(1)) &&
4235 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4236 !isa<PHINode>(U->getOperand(1))))
4237 return V;
4238 V = U->getOperand(0);
4239 } else {
4240 return V;
4241 }
4242 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4243 } while (true);
4244 }
4245
4246 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4247 /// ptrtoint+arithmetic+inttoptr sequences.
4248 /// It returns false if unidentified object is found in getUnderlyingObjects.
getUnderlyingObjectsForCodeGen(const Value * V,SmallVectorImpl<Value * > & Objects)4249 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4250 SmallVectorImpl<Value *> &Objects) {
4251 SmallPtrSet<const Value *, 16> Visited;
4252 SmallVector<const Value *, 4> Working(1, V);
4253 do {
4254 V = Working.pop_back_val();
4255
4256 SmallVector<const Value *, 4> Objs;
4257 getUnderlyingObjects(V, Objs);
4258
4259 for (const Value *V : Objs) {
4260 if (!Visited.insert(V).second)
4261 continue;
4262 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4263 const Value *O =
4264 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4265 if (O->getType()->isPointerTy()) {
4266 Working.push_back(O);
4267 continue;
4268 }
4269 }
4270 // If getUnderlyingObjects fails to find an identifiable object,
4271 // getUnderlyingObjectsForCodeGen also fails for safety.
4272 if (!isIdentifiedObject(V)) {
4273 Objects.clear();
4274 return false;
4275 }
4276 Objects.push_back(const_cast<Value *>(V));
4277 }
4278 } while (!Working.empty());
4279 return true;
4280 }
4281
findAllocaForValue(Value * V,bool OffsetZero)4282 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4283 AllocaInst *Result = nullptr;
4284 SmallPtrSet<Value *, 4> Visited;
4285 SmallVector<Value *, 4> Worklist;
4286
4287 auto AddWork = [&](Value *V) {
4288 if (Visited.insert(V).second)
4289 Worklist.push_back(V);
4290 };
4291
4292 AddWork(V);
4293 do {
4294 V = Worklist.pop_back_val();
4295 assert(Visited.count(V));
4296
4297 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4298 if (Result && Result != AI)
4299 return nullptr;
4300 Result = AI;
4301 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4302 AddWork(CI->getOperand(0));
4303 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4304 for (Value *IncValue : PN->incoming_values())
4305 AddWork(IncValue);
4306 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4307 AddWork(SI->getTrueValue());
4308 AddWork(SI->getFalseValue());
4309 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4310 if (OffsetZero && !GEP->hasAllZeroIndices())
4311 return nullptr;
4312 AddWork(GEP->getPointerOperand());
4313 } else {
4314 return nullptr;
4315 }
4316 } while (!Worklist.empty());
4317
4318 return Result;
4319 }
4320
onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value * V,bool AllowLifetime,bool AllowDroppable)4321 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4322 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4323 for (const User *U : V->users()) {
4324 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4325 if (!II)
4326 return false;
4327
4328 if (AllowLifetime && II->isLifetimeStartOrEnd())
4329 continue;
4330
4331 if (AllowDroppable && II->isDroppable())
4332 continue;
4333
4334 return false;
4335 }
4336 return true;
4337 }
4338
onlyUsedByLifetimeMarkers(const Value * V)4339 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4340 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4341 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4342 }
onlyUsedByLifetimeMarkersOrDroppableInsts(const Value * V)4343 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4344 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4345 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4346 }
4347
mustSuppressSpeculation(const LoadInst & LI)4348 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4349 if (!LI.isUnordered())
4350 return true;
4351 const Function &F = *LI.getFunction();
4352 // Speculative load may create a race that did not exist in the source.
4353 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4354 // Speculative load may load data from dirty regions.
4355 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4356 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4357 }
4358
4359
isSafeToSpeculativelyExecute(const Value * V,const Instruction * CtxI,const DominatorTree * DT)4360 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4361 const Instruction *CtxI,
4362 const DominatorTree *DT) {
4363 const Operator *Inst = dyn_cast<Operator>(V);
4364 if (!Inst)
4365 return false;
4366
4367 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4368 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4369 if (C->canTrap())
4370 return false;
4371
4372 switch (Inst->getOpcode()) {
4373 default:
4374 return true;
4375 case Instruction::UDiv:
4376 case Instruction::URem: {
4377 // x / y is undefined if y == 0.
4378 const APInt *V;
4379 if (match(Inst->getOperand(1), m_APInt(V)))
4380 return *V != 0;
4381 return false;
4382 }
4383 case Instruction::SDiv:
4384 case Instruction::SRem: {
4385 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4386 const APInt *Numerator, *Denominator;
4387 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4388 return false;
4389 // We cannot hoist this division if the denominator is 0.
4390 if (*Denominator == 0)
4391 return false;
4392 // It's safe to hoist if the denominator is not 0 or -1.
4393 if (!Denominator->isAllOnesValue())
4394 return true;
4395 // At this point we know that the denominator is -1. It is safe to hoist as
4396 // long we know that the numerator is not INT_MIN.
4397 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4398 return !Numerator->isMinSignedValue();
4399 // The numerator *might* be MinSignedValue.
4400 return false;
4401 }
4402 case Instruction::Load: {
4403 const LoadInst *LI = cast<LoadInst>(Inst);
4404 if (mustSuppressSpeculation(*LI))
4405 return false;
4406 const DataLayout &DL = LI->getModule()->getDataLayout();
4407 return isDereferenceableAndAlignedPointer(
4408 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4409 DL, CtxI, DT);
4410 }
4411 case Instruction::Call: {
4412 auto *CI = cast<const CallInst>(Inst);
4413 const Function *Callee = CI->getCalledFunction();
4414
4415 // The called function could have undefined behavior or side-effects, even
4416 // if marked readnone nounwind.
4417 return Callee && Callee->isSpeculatable();
4418 }
4419 case Instruction::VAArg:
4420 case Instruction::Alloca:
4421 case Instruction::Invoke:
4422 case Instruction::CallBr:
4423 case Instruction::PHI:
4424 case Instruction::Store:
4425 case Instruction::Ret:
4426 case Instruction::Br:
4427 case Instruction::IndirectBr:
4428 case Instruction::Switch:
4429 case Instruction::Unreachable:
4430 case Instruction::Fence:
4431 case Instruction::AtomicRMW:
4432 case Instruction::AtomicCmpXchg:
4433 case Instruction::LandingPad:
4434 case Instruction::Resume:
4435 case Instruction::CatchSwitch:
4436 case Instruction::CatchPad:
4437 case Instruction::CatchRet:
4438 case Instruction::CleanupPad:
4439 case Instruction::CleanupRet:
4440 return false; // Misc instructions which have effects
4441 }
4442 }
4443
mayBeMemoryDependent(const Instruction & I)4444 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4445 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4446 }
4447
4448 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
mapOverflowResult(ConstantRange::OverflowResult OR)4449 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4450 switch (OR) {
4451 case ConstantRange::OverflowResult::MayOverflow:
4452 return OverflowResult::MayOverflow;
4453 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4454 return OverflowResult::AlwaysOverflowsLow;
4455 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4456 return OverflowResult::AlwaysOverflowsHigh;
4457 case ConstantRange::OverflowResult::NeverOverflows:
4458 return OverflowResult::NeverOverflows;
4459 }
4460 llvm_unreachable("Unknown OverflowResult");
4461 }
4462
4463 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
computeConstantRangeIncludingKnownBits(const Value * V,bool ForSigned,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE=nullptr,bool UseInstrInfo=true)4464 static ConstantRange computeConstantRangeIncludingKnownBits(
4465 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4466 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4467 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4468 KnownBits Known = computeKnownBits(
4469 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4470 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4471 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4472 ConstantRange::PreferredRangeType RangeType =
4473 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4474 return CR1.intersectWith(CR2, RangeType);
4475 }
4476
computeOverflowForUnsignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4477 OverflowResult llvm::computeOverflowForUnsignedMul(
4478 const Value *LHS, const Value *RHS, const DataLayout &DL,
4479 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4480 bool UseInstrInfo) {
4481 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4482 nullptr, UseInstrInfo);
4483 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4484 nullptr, UseInstrInfo);
4485 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4486 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4487 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4488 }
4489
4490 OverflowResult
computeOverflowForSignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4491 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4492 const DataLayout &DL, AssumptionCache *AC,
4493 const Instruction *CxtI,
4494 const DominatorTree *DT, bool UseInstrInfo) {
4495 // Multiplying n * m significant bits yields a result of n + m significant
4496 // bits. If the total number of significant bits does not exceed the
4497 // result bit width (minus 1), there is no overflow.
4498 // This means if we have enough leading sign bits in the operands
4499 // we can guarantee that the result does not overflow.
4500 // Ref: "Hacker's Delight" by Henry Warren
4501 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4502
4503 // Note that underestimating the number of sign bits gives a more
4504 // conservative answer.
4505 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4506 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4507
4508 // First handle the easy case: if we have enough sign bits there's
4509 // definitely no overflow.
4510 if (SignBits > BitWidth + 1)
4511 return OverflowResult::NeverOverflows;
4512
4513 // There are two ambiguous cases where there can be no overflow:
4514 // SignBits == BitWidth + 1 and
4515 // SignBits == BitWidth
4516 // The second case is difficult to check, therefore we only handle the
4517 // first case.
4518 if (SignBits == BitWidth + 1) {
4519 // It overflows only when both arguments are negative and the true
4520 // product is exactly the minimum negative number.
4521 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4522 // For simplicity we just check if at least one side is not negative.
4523 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4524 nullptr, UseInstrInfo);
4525 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4526 nullptr, UseInstrInfo);
4527 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4528 return OverflowResult::NeverOverflows;
4529 }
4530 return OverflowResult::MayOverflow;
4531 }
4532
computeOverflowForUnsignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4533 OverflowResult llvm::computeOverflowForUnsignedAdd(
4534 const Value *LHS, const Value *RHS, const DataLayout &DL,
4535 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4536 bool UseInstrInfo) {
4537 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4538 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4539 nullptr, UseInstrInfo);
4540 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4541 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4542 nullptr, UseInstrInfo);
4543 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4544 }
4545
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4546 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4547 const Value *RHS,
4548 const AddOperator *Add,
4549 const DataLayout &DL,
4550 AssumptionCache *AC,
4551 const Instruction *CxtI,
4552 const DominatorTree *DT) {
4553 if (Add && Add->hasNoSignedWrap()) {
4554 return OverflowResult::NeverOverflows;
4555 }
4556
4557 // If LHS and RHS each have at least two sign bits, the addition will look
4558 // like
4559 //
4560 // XX..... +
4561 // YY.....
4562 //
4563 // If the carry into the most significant position is 0, X and Y can't both
4564 // be 1 and therefore the carry out of the addition is also 0.
4565 //
4566 // If the carry into the most significant position is 1, X and Y can't both
4567 // be 0 and therefore the carry out of the addition is also 1.
4568 //
4569 // Since the carry into the most significant position is always equal to
4570 // the carry out of the addition, there is no signed overflow.
4571 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4572 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4573 return OverflowResult::NeverOverflows;
4574
4575 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4576 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4577 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4578 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4579 OverflowResult OR =
4580 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4581 if (OR != OverflowResult::MayOverflow)
4582 return OR;
4583
4584 // The remaining code needs Add to be available. Early returns if not so.
4585 if (!Add)
4586 return OverflowResult::MayOverflow;
4587
4588 // If the sign of Add is the same as at least one of the operands, this add
4589 // CANNOT overflow. If this can be determined from the known bits of the
4590 // operands the above signedAddMayOverflow() check will have already done so.
4591 // The only other way to improve on the known bits is from an assumption, so
4592 // call computeKnownBitsFromAssume() directly.
4593 bool LHSOrRHSKnownNonNegative =
4594 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4595 bool LHSOrRHSKnownNegative =
4596 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4597 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4598 KnownBits AddKnown(LHSRange.getBitWidth());
4599 computeKnownBitsFromAssume(
4600 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4601 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4602 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4603 return OverflowResult::NeverOverflows;
4604 }
4605
4606 return OverflowResult::MayOverflow;
4607 }
4608
computeOverflowForUnsignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4609 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4610 const Value *RHS,
4611 const DataLayout &DL,
4612 AssumptionCache *AC,
4613 const Instruction *CxtI,
4614 const DominatorTree *DT) {
4615 // Checking for conditions implied by dominating conditions may be expensive.
4616 // Limit it to usub_with_overflow calls for now.
4617 if (match(CxtI,
4618 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4619 if (auto C =
4620 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4621 if (*C)
4622 return OverflowResult::NeverOverflows;
4623 return OverflowResult::AlwaysOverflowsLow;
4624 }
4625 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4626 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4627 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4628 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4629 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4630 }
4631
computeOverflowForSignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4632 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4633 const Value *RHS,
4634 const DataLayout &DL,
4635 AssumptionCache *AC,
4636 const Instruction *CxtI,
4637 const DominatorTree *DT) {
4638 // If LHS and RHS each have at least two sign bits, the subtraction
4639 // cannot overflow.
4640 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4641 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4642 return OverflowResult::NeverOverflows;
4643
4644 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4645 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4646 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4647 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4648 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4649 }
4650
isOverflowIntrinsicNoWrap(const WithOverflowInst * WO,const DominatorTree & DT)4651 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4652 const DominatorTree &DT) {
4653 SmallVector<const BranchInst *, 2> GuardingBranches;
4654 SmallVector<const ExtractValueInst *, 2> Results;
4655
4656 for (const User *U : WO->users()) {
4657 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4658 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4659
4660 if (EVI->getIndices()[0] == 0)
4661 Results.push_back(EVI);
4662 else {
4663 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4664
4665 for (const auto *U : EVI->users())
4666 if (const auto *B = dyn_cast<BranchInst>(U)) {
4667 assert(B->isConditional() && "How else is it using an i1?");
4668 GuardingBranches.push_back(B);
4669 }
4670 }
4671 } else {
4672 // We are using the aggregate directly in a way we don't want to analyze
4673 // here (storing it to a global, say).
4674 return false;
4675 }
4676 }
4677
4678 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4679 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4680 if (!NoWrapEdge.isSingleEdge())
4681 return false;
4682
4683 // Check if all users of the add are provably no-wrap.
4684 for (const auto *Result : Results) {
4685 // If the extractvalue itself is not executed on overflow, the we don't
4686 // need to check each use separately, since domination is transitive.
4687 if (DT.dominates(NoWrapEdge, Result->getParent()))
4688 continue;
4689
4690 for (auto &RU : Result->uses())
4691 if (!DT.dominates(NoWrapEdge, RU))
4692 return false;
4693 }
4694
4695 return true;
4696 };
4697
4698 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4699 }
4700
canCreateUndefOrPoison(const Operator * Op,bool PoisonOnly)4701 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4702 // See whether I has flags that may create poison
4703 if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4704 if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4705 return true;
4706 }
4707 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4708 if (ExactOp->isExact())
4709 return true;
4710 if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4711 auto FMF = FP->getFastMathFlags();
4712 if (FMF.noNaNs() || FMF.noInfs())
4713 return true;
4714 }
4715
4716 unsigned Opcode = Op->getOpcode();
4717
4718 // Check whether opcode is a poison/undef-generating operation
4719 switch (Opcode) {
4720 case Instruction::Shl:
4721 case Instruction::AShr:
4722 case Instruction::LShr: {
4723 // Shifts return poison if shiftwidth is larger than the bitwidth.
4724 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4725 SmallVector<Constant *, 4> ShiftAmounts;
4726 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4727 unsigned NumElts = FVTy->getNumElements();
4728 for (unsigned i = 0; i < NumElts; ++i)
4729 ShiftAmounts.push_back(C->getAggregateElement(i));
4730 } else if (isa<ScalableVectorType>(C->getType()))
4731 return true; // Can't tell, just return true to be safe
4732 else
4733 ShiftAmounts.push_back(C);
4734
4735 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4736 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4737 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4738 });
4739 return !Safe;
4740 }
4741 return true;
4742 }
4743 case Instruction::FPToSI:
4744 case Instruction::FPToUI:
4745 // fptosi/ui yields poison if the resulting value does not fit in the
4746 // destination type.
4747 return true;
4748 case Instruction::Call:
4749 case Instruction::CallBr:
4750 case Instruction::Invoke: {
4751 const auto *CB = cast<CallBase>(Op);
4752 return !CB->hasRetAttr(Attribute::NoUndef);
4753 }
4754 case Instruction::InsertElement:
4755 case Instruction::ExtractElement: {
4756 // If index exceeds the length of the vector, it returns poison
4757 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4758 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4759 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4760 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4761 return true;
4762 return false;
4763 }
4764 case Instruction::ShuffleVector: {
4765 // shufflevector may return undef.
4766 if (PoisonOnly)
4767 return false;
4768 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4769 ? cast<ConstantExpr>(Op)->getShuffleMask()
4770 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4771 return is_contained(Mask, UndefMaskElem);
4772 }
4773 case Instruction::FNeg:
4774 case Instruction::PHI:
4775 case Instruction::Select:
4776 case Instruction::URem:
4777 case Instruction::SRem:
4778 case Instruction::ExtractValue:
4779 case Instruction::InsertValue:
4780 case Instruction::Freeze:
4781 case Instruction::ICmp:
4782 case Instruction::FCmp:
4783 return false;
4784 case Instruction::GetElementPtr: {
4785 const auto *GEP = cast<GEPOperator>(Op);
4786 return GEP->isInBounds();
4787 }
4788 default: {
4789 const auto *CE = dyn_cast<ConstantExpr>(Op);
4790 if (isa<CastInst>(Op) || (CE && CE->isCast()))
4791 return false;
4792 else if (Instruction::isBinaryOp(Opcode))
4793 return false;
4794 // Be conservative and return true.
4795 return true;
4796 }
4797 }
4798 }
4799
canCreateUndefOrPoison(const Operator * Op)4800 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4801 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4802 }
4803
canCreatePoison(const Operator * Op)4804 bool llvm::canCreatePoison(const Operator *Op) {
4805 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4806 }
4807
directlyImpliesPoison(const Value * ValAssumedPoison,const Value * V,unsigned Depth)4808 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
4809 const Value *V, unsigned Depth) {
4810 if (ValAssumedPoison == V)
4811 return true;
4812
4813 const unsigned MaxDepth = 2;
4814 if (Depth >= MaxDepth)
4815 return false;
4816
4817 const auto *I = dyn_cast<Instruction>(V);
4818 if (I && propagatesPoison(cast<Operator>(I))) {
4819 return any_of(I->operands(), [=](const Value *Op) {
4820 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
4821 });
4822 }
4823 return false;
4824 }
4825
impliesPoison(const Value * ValAssumedPoison,const Value * V,unsigned Depth)4826 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
4827 unsigned Depth) {
4828 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
4829 return true;
4830
4831 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
4832 return true;
4833
4834 const unsigned MaxDepth = 2;
4835 if (Depth >= MaxDepth)
4836 return false;
4837
4838 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
4839 if (I && !canCreatePoison(cast<Operator>(I))) {
4840 return all_of(I->operands(), [=](const Value *Op) {
4841 return impliesPoison(Op, V, Depth + 1);
4842 });
4843 }
4844 return false;
4845 }
4846
impliesPoison(const Value * ValAssumedPoison,const Value * V)4847 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
4848 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
4849 }
4850
4851 static bool programUndefinedIfUndefOrPoison(const Value *V,
4852 bool PoisonOnly);
4853
isGuaranteedNotToBeUndefOrPoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth,bool PoisonOnly)4854 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
4855 AssumptionCache *AC,
4856 const Instruction *CtxI,
4857 const DominatorTree *DT,
4858 unsigned Depth, bool PoisonOnly) {
4859 if (Depth >= MaxAnalysisRecursionDepth)
4860 return false;
4861
4862 if (isa<MetadataAsValue>(V))
4863 return false;
4864
4865 if (const auto *A = dyn_cast<Argument>(V)) {
4866 if (A->hasAttribute(Attribute::NoUndef))
4867 return true;
4868 }
4869
4870 if (auto *C = dyn_cast<Constant>(V)) {
4871 if (isa<UndefValue>(C))
4872 return PoisonOnly && !isa<PoisonValue>(C);
4873
4874 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4875 isa<ConstantPointerNull>(C) || isa<Function>(C))
4876 return true;
4877
4878 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4879 return (PoisonOnly ? !C->containsPoisonElement()
4880 : !C->containsUndefOrPoisonElement()) &&
4881 !C->containsConstantExpression();
4882 }
4883
4884 // Strip cast operations from a pointer value.
4885 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4886 // inbounds with zero offset. To guarantee that the result isn't poison, the
4887 // stripped pointer is checked as it has to be pointing into an allocated
4888 // object or be null `null` to ensure `inbounds` getelement pointers with a
4889 // zero offset could not produce poison.
4890 // It can strip off addrspacecast that do not change bit representation as
4891 // well. We believe that such addrspacecast is equivalent to no-op.
4892 auto *StrippedV = V->stripPointerCastsSameRepresentation();
4893 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4894 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4895 return true;
4896
4897 auto OpCheck = [&](const Value *V) {
4898 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
4899 PoisonOnly);
4900 };
4901
4902 if (auto *Opr = dyn_cast<Operator>(V)) {
4903 // If the value is a freeze instruction, then it can never
4904 // be undef or poison.
4905 if (isa<FreezeInst>(V))
4906 return true;
4907
4908 if (const auto *CB = dyn_cast<CallBase>(V)) {
4909 if (CB->hasRetAttr(Attribute::NoUndef))
4910 return true;
4911 }
4912
4913 if (const auto *PN = dyn_cast<PHINode>(V)) {
4914 unsigned Num = PN->getNumIncomingValues();
4915 bool IsWellDefined = true;
4916 for (unsigned i = 0; i < Num; ++i) {
4917 auto *TI = PN->getIncomingBlock(i)->getTerminator();
4918 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
4919 DT, Depth + 1, PoisonOnly)) {
4920 IsWellDefined = false;
4921 break;
4922 }
4923 }
4924 if (IsWellDefined)
4925 return true;
4926 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
4927 return true;
4928 }
4929
4930 if (auto *I = dyn_cast<LoadInst>(V))
4931 if (I->getMetadata(LLVMContext::MD_noundef))
4932 return true;
4933
4934 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
4935 return true;
4936
4937 // CxtI may be null or a cloned instruction.
4938 if (!CtxI || !CtxI->getParent() || !DT)
4939 return false;
4940
4941 auto *DNode = DT->getNode(CtxI->getParent());
4942 if (!DNode)
4943 // Unreachable block
4944 return false;
4945
4946 // If V is used as a branch condition before reaching CtxI, V cannot be
4947 // undef or poison.
4948 // br V, BB1, BB2
4949 // BB1:
4950 // CtxI ; V cannot be undef or poison here
4951 auto *Dominator = DNode->getIDom();
4952 while (Dominator) {
4953 auto *TI = Dominator->getBlock()->getTerminator();
4954
4955 Value *Cond = nullptr;
4956 if (auto BI = dyn_cast<BranchInst>(TI)) {
4957 if (BI->isConditional())
4958 Cond = BI->getCondition();
4959 } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
4960 Cond = SI->getCondition();
4961 }
4962
4963 if (Cond) {
4964 if (Cond == V)
4965 return true;
4966 else if (PoisonOnly && isa<Operator>(Cond)) {
4967 // For poison, we can analyze further
4968 auto *Opr = cast<Operator>(Cond);
4969 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
4970 return true;
4971 }
4972 }
4973
4974 Dominator = Dominator->getIDom();
4975 }
4976
4977 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
4978 if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
4979 return true;
4980
4981 return false;
4982 }
4983
isGuaranteedNotToBeUndefOrPoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)4984 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
4985 const Instruction *CtxI,
4986 const DominatorTree *DT,
4987 unsigned Depth) {
4988 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
4989 }
4990
isGuaranteedNotToBePoison(const Value * V,AssumptionCache * AC,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)4991 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
4992 const Instruction *CtxI,
4993 const DominatorTree *DT, unsigned Depth) {
4994 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
4995 }
4996
computeOverflowForSignedAdd(const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4997 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4998 const DataLayout &DL,
4999 AssumptionCache *AC,
5000 const Instruction *CxtI,
5001 const DominatorTree *DT) {
5002 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5003 Add, DL, AC, CxtI, DT);
5004 }
5005
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5006 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5007 const Value *RHS,
5008 const DataLayout &DL,
5009 AssumptionCache *AC,
5010 const Instruction *CxtI,
5011 const DominatorTree *DT) {
5012 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5013 }
5014
isGuaranteedToTransferExecutionToSuccessor(const Instruction * I)5015 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5016 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5017 // of time because it's possible for another thread to interfere with it for an
5018 // arbitrary length of time, but programs aren't allowed to rely on that.
5019
5020 // If there is no successor, then execution can't transfer to it.
5021 if (isa<ReturnInst>(I))
5022 return false;
5023 if (isa<UnreachableInst>(I))
5024 return false;
5025
5026 // An instruction that returns without throwing must transfer control flow
5027 // to a successor.
5028 return !I->mayThrow() && I->willReturn();
5029 }
5030
isGuaranteedToTransferExecutionToSuccessor(const BasicBlock * BB)5031 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5032 // TODO: This is slightly conservative for invoke instruction since exiting
5033 // via an exception *is* normal control for them.
5034 for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
5035 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
5036 return false;
5037 return true;
5038 }
5039
isGuaranteedToExecuteForEveryIteration(const Instruction * I,const Loop * L)5040 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5041 const Loop *L) {
5042 // The loop header is guaranteed to be executed for every iteration.
5043 //
5044 // FIXME: Relax this constraint to cover all basic blocks that are
5045 // guaranteed to be executed at every iteration.
5046 if (I->getParent() != L->getHeader()) return false;
5047
5048 for (const Instruction &LI : *L->getHeader()) {
5049 if (&LI == I) return true;
5050 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5051 }
5052 llvm_unreachable("Instruction not contained in its own parent basic block.");
5053 }
5054
propagatesPoison(const Operator * I)5055 bool llvm::propagatesPoison(const Operator *I) {
5056 switch (I->getOpcode()) {
5057 case Instruction::Freeze:
5058 case Instruction::Select:
5059 case Instruction::PHI:
5060 case Instruction::Call:
5061 case Instruction::Invoke:
5062 return false;
5063 case Instruction::ICmp:
5064 case Instruction::FCmp:
5065 case Instruction::GetElementPtr:
5066 return true;
5067 default:
5068 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5069 return true;
5070
5071 // Be conservative and return false.
5072 return false;
5073 }
5074 }
5075
getGuaranteedNonPoisonOps(const Instruction * I,SmallPtrSetImpl<const Value * > & Operands)5076 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5077 SmallPtrSetImpl<const Value *> &Operands) {
5078 switch (I->getOpcode()) {
5079 case Instruction::Store:
5080 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5081 break;
5082
5083 case Instruction::Load:
5084 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5085 break;
5086
5087 case Instruction::AtomicCmpXchg:
5088 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5089 break;
5090
5091 case Instruction::AtomicRMW:
5092 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5093 break;
5094
5095 case Instruction::UDiv:
5096 case Instruction::SDiv:
5097 case Instruction::URem:
5098 case Instruction::SRem:
5099 Operands.insert(I->getOperand(1));
5100 break;
5101
5102 case Instruction::Call:
5103 case Instruction::Invoke: {
5104 const CallBase *CB = cast<CallBase>(I);
5105 if (CB->isIndirectCall())
5106 Operands.insert(CB->getCalledOperand());
5107 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5108 if (CB->paramHasAttr(i, Attribute::NoUndef))
5109 Operands.insert(CB->getArgOperand(i));
5110 }
5111 break;
5112 }
5113
5114 default:
5115 break;
5116 }
5117 }
5118
mustTriggerUB(const Instruction * I,const SmallSet<const Value *,16> & KnownPoison)5119 bool llvm::mustTriggerUB(const Instruction *I,
5120 const SmallSet<const Value *, 16>& KnownPoison) {
5121 SmallPtrSet<const Value *, 4> NonPoisonOps;
5122 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5123
5124 for (const auto *V : NonPoisonOps)
5125 if (KnownPoison.count(V))
5126 return true;
5127
5128 return false;
5129 }
5130
programUndefinedIfUndefOrPoison(const Value * V,bool PoisonOnly)5131 static bool programUndefinedIfUndefOrPoison(const Value *V,
5132 bool PoisonOnly) {
5133 // We currently only look for uses of values within the same basic
5134 // block, as that makes it easier to guarantee that the uses will be
5135 // executed given that Inst is executed.
5136 //
5137 // FIXME: Expand this to consider uses beyond the same basic block. To do
5138 // this, look out for the distinction between post-dominance and strong
5139 // post-dominance.
5140 const BasicBlock *BB = nullptr;
5141 BasicBlock::const_iterator Begin;
5142 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5143 BB = Inst->getParent();
5144 Begin = Inst->getIterator();
5145 Begin++;
5146 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5147 BB = &Arg->getParent()->getEntryBlock();
5148 Begin = BB->begin();
5149 } else {
5150 return false;
5151 }
5152
5153 // Limit number of instructions we look at, to avoid scanning through large
5154 // blocks. The current limit is chosen arbitrarily.
5155 unsigned ScanLimit = 32;
5156 BasicBlock::const_iterator End = BB->end();
5157
5158 if (!PoisonOnly) {
5159 // Be conservative & just check whether a value is passed to a noundef
5160 // argument.
5161 // Instructions that raise UB with a poison operand are well-defined
5162 // or have unclear semantics when the input is partially undef.
5163 // For example, 'udiv x, (undef | 1)' isn't UB.
5164
5165 for (auto &I : make_range(Begin, End)) {
5166 if (isa<DbgInfoIntrinsic>(I))
5167 continue;
5168 if (--ScanLimit == 0)
5169 break;
5170
5171 if (const auto *CB = dyn_cast<CallBase>(&I)) {
5172 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5173 if (CB->paramHasAttr(i, Attribute::NoUndef) &&
5174 CB->getArgOperand(i) == V)
5175 return true;
5176 }
5177 }
5178 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5179 break;
5180 }
5181 return false;
5182 }
5183
5184 // Set of instructions that we have proved will yield poison if Inst
5185 // does.
5186 SmallSet<const Value *, 16> YieldsPoison;
5187 SmallSet<const BasicBlock *, 4> Visited;
5188
5189 YieldsPoison.insert(V);
5190 auto Propagate = [&](const User *User) {
5191 if (propagatesPoison(cast<Operator>(User)))
5192 YieldsPoison.insert(User);
5193 };
5194 for_each(V->users(), Propagate);
5195 Visited.insert(BB);
5196
5197 while (true) {
5198 for (auto &I : make_range(Begin, End)) {
5199 if (isa<DbgInfoIntrinsic>(I))
5200 continue;
5201 if (--ScanLimit == 0)
5202 return false;
5203 if (mustTriggerUB(&I, YieldsPoison))
5204 return true;
5205 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5206 return false;
5207
5208 // Mark poison that propagates from I through uses of I.
5209 if (YieldsPoison.count(&I))
5210 for_each(I.users(), Propagate);
5211 }
5212
5213 if (auto *NextBB = BB->getSingleSuccessor()) {
5214 if (Visited.insert(NextBB).second) {
5215 BB = NextBB;
5216 Begin = BB->getFirstNonPHI()->getIterator();
5217 End = BB->end();
5218 continue;
5219 }
5220 }
5221
5222 break;
5223 }
5224 return false;
5225 }
5226
programUndefinedIfUndefOrPoison(const Instruction * Inst)5227 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5228 return ::programUndefinedIfUndefOrPoison(Inst, false);
5229 }
5230
programUndefinedIfPoison(const Instruction * Inst)5231 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5232 return ::programUndefinedIfUndefOrPoison(Inst, true);
5233 }
5234
isKnownNonNaN(const Value * V,FastMathFlags FMF)5235 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5236 if (FMF.noNaNs())
5237 return true;
5238
5239 if (auto *C = dyn_cast<ConstantFP>(V))
5240 return !C->isNaN();
5241
5242 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5243 if (!C->getElementType()->isFloatingPointTy())
5244 return false;
5245 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5246 if (C->getElementAsAPFloat(I).isNaN())
5247 return false;
5248 }
5249 return true;
5250 }
5251
5252 if (isa<ConstantAggregateZero>(V))
5253 return true;
5254
5255 return false;
5256 }
5257
isKnownNonZero(const Value * V)5258 static bool isKnownNonZero(const Value *V) {
5259 if (auto *C = dyn_cast<ConstantFP>(V))
5260 return !C->isZero();
5261
5262 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5263 if (!C->getElementType()->isFloatingPointTy())
5264 return false;
5265 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5266 if (C->getElementAsAPFloat(I).isZero())
5267 return false;
5268 }
5269 return true;
5270 }
5271
5272 return false;
5273 }
5274
5275 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5276 /// Given non-min/max outer cmp/select from the clamp pattern this
5277 /// function recognizes if it can be substitued by a "canonical" min/max
5278 /// pattern.
matchFastFloatClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS)5279 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5280 Value *CmpLHS, Value *CmpRHS,
5281 Value *TrueVal, Value *FalseVal,
5282 Value *&LHS, Value *&RHS) {
5283 // Try to match
5284 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5285 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5286 // and return description of the outer Max/Min.
5287
5288 // First, check if select has inverse order:
5289 if (CmpRHS == FalseVal) {
5290 std::swap(TrueVal, FalseVal);
5291 Pred = CmpInst::getInversePredicate(Pred);
5292 }
5293
5294 // Assume success now. If there's no match, callers should not use these anyway.
5295 LHS = TrueVal;
5296 RHS = FalseVal;
5297
5298 const APFloat *FC1;
5299 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5300 return {SPF_UNKNOWN, SPNB_NA, false};
5301
5302 const APFloat *FC2;
5303 switch (Pred) {
5304 case CmpInst::FCMP_OLT:
5305 case CmpInst::FCMP_OLE:
5306 case CmpInst::FCMP_ULT:
5307 case CmpInst::FCMP_ULE:
5308 if (match(FalseVal,
5309 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5310 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5311 *FC1 < *FC2)
5312 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5313 break;
5314 case CmpInst::FCMP_OGT:
5315 case CmpInst::FCMP_OGE:
5316 case CmpInst::FCMP_UGT:
5317 case CmpInst::FCMP_UGE:
5318 if (match(FalseVal,
5319 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5320 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5321 *FC1 > *FC2)
5322 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5323 break;
5324 default:
5325 break;
5326 }
5327
5328 return {SPF_UNKNOWN, SPNB_NA, false};
5329 }
5330
5331 /// Recognize variations of:
5332 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
matchClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal)5333 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5334 Value *CmpLHS, Value *CmpRHS,
5335 Value *TrueVal, Value *FalseVal) {
5336 // Swap the select operands and predicate to match the patterns below.
5337 if (CmpRHS != TrueVal) {
5338 Pred = ICmpInst::getSwappedPredicate(Pred);
5339 std::swap(TrueVal, FalseVal);
5340 }
5341 const APInt *C1;
5342 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5343 const APInt *C2;
5344 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5345 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5346 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5347 return {SPF_SMAX, SPNB_NA, false};
5348
5349 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5350 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5351 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5352 return {SPF_SMIN, SPNB_NA, false};
5353
5354 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5355 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5356 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5357 return {SPF_UMAX, SPNB_NA, false};
5358
5359 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5360 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5361 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5362 return {SPF_UMIN, SPNB_NA, false};
5363 }
5364 return {SPF_UNKNOWN, SPNB_NA, false};
5365 }
5366
5367 /// Recognize variations of:
5368 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
matchMinMaxOfMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TVal,Value * FVal,unsigned Depth)5369 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5370 Value *CmpLHS, Value *CmpRHS,
5371 Value *TVal, Value *FVal,
5372 unsigned Depth) {
5373 // TODO: Allow FP min/max with nnan/nsz.
5374 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5375
5376 Value *A = nullptr, *B = nullptr;
5377 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5378 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5379 return {SPF_UNKNOWN, SPNB_NA, false};
5380
5381 Value *C = nullptr, *D = nullptr;
5382 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5383 if (L.Flavor != R.Flavor)
5384 return {SPF_UNKNOWN, SPNB_NA, false};
5385
5386 // We have something like: x Pred y ? min(a, b) : min(c, d).
5387 // Try to match the compare to the min/max operations of the select operands.
5388 // First, make sure we have the right compare predicate.
5389 switch (L.Flavor) {
5390 case SPF_SMIN:
5391 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5392 Pred = ICmpInst::getSwappedPredicate(Pred);
5393 std::swap(CmpLHS, CmpRHS);
5394 }
5395 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5396 break;
5397 return {SPF_UNKNOWN, SPNB_NA, false};
5398 case SPF_SMAX:
5399 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5400 Pred = ICmpInst::getSwappedPredicate(Pred);
5401 std::swap(CmpLHS, CmpRHS);
5402 }
5403 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5404 break;
5405 return {SPF_UNKNOWN, SPNB_NA, false};
5406 case SPF_UMIN:
5407 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5408 Pred = ICmpInst::getSwappedPredicate(Pred);
5409 std::swap(CmpLHS, CmpRHS);
5410 }
5411 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5412 break;
5413 return {SPF_UNKNOWN, SPNB_NA, false};
5414 case SPF_UMAX:
5415 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5416 Pred = ICmpInst::getSwappedPredicate(Pred);
5417 std::swap(CmpLHS, CmpRHS);
5418 }
5419 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5420 break;
5421 return {SPF_UNKNOWN, SPNB_NA, false};
5422 default:
5423 return {SPF_UNKNOWN, SPNB_NA, false};
5424 }
5425
5426 // If there is a common operand in the already matched min/max and the other
5427 // min/max operands match the compare operands (either directly or inverted),
5428 // then this is min/max of the same flavor.
5429
5430 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5431 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5432 if (D == B) {
5433 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5434 match(A, m_Not(m_Specific(CmpRHS)))))
5435 return {L.Flavor, SPNB_NA, false};
5436 }
5437 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5438 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5439 if (C == B) {
5440 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5441 match(A, m_Not(m_Specific(CmpRHS)))))
5442 return {L.Flavor, SPNB_NA, false};
5443 }
5444 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5445 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5446 if (D == A) {
5447 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5448 match(B, m_Not(m_Specific(CmpRHS)))))
5449 return {L.Flavor, SPNB_NA, false};
5450 }
5451 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5452 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5453 if (C == A) {
5454 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5455 match(B, m_Not(m_Specific(CmpRHS)))))
5456 return {L.Flavor, SPNB_NA, false};
5457 }
5458
5459 return {SPF_UNKNOWN, SPNB_NA, false};
5460 }
5461
5462 /// If the input value is the result of a 'not' op, constant integer, or vector
5463 /// splat of a constant integer, return the bitwise-not source value.
5464 /// TODO: This could be extended to handle non-splat vector integer constants.
getNotValue(Value * V)5465 static Value *getNotValue(Value *V) {
5466 Value *NotV;
5467 if (match(V, m_Not(m_Value(NotV))))
5468 return NotV;
5469
5470 const APInt *C;
5471 if (match(V, m_APInt(C)))
5472 return ConstantInt::get(V->getType(), ~(*C));
5473
5474 return nullptr;
5475 }
5476
5477 /// Match non-obvious integer minimum and maximum sequences.
matchMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5478 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5479 Value *CmpLHS, Value *CmpRHS,
5480 Value *TrueVal, Value *FalseVal,
5481 Value *&LHS, Value *&RHS,
5482 unsigned Depth) {
5483 // Assume success. If there's no match, callers should not use these anyway.
5484 LHS = TrueVal;
5485 RHS = FalseVal;
5486
5487 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5488 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5489 return SPR;
5490
5491 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5492 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5493 return SPR;
5494
5495 // Look through 'not' ops to find disguised min/max.
5496 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5497 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5498 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5499 switch (Pred) {
5500 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5501 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5502 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5503 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5504 default: break;
5505 }
5506 }
5507
5508 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5509 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5510 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5511 switch (Pred) {
5512 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5513 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5514 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5515 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5516 default: break;
5517 }
5518 }
5519
5520 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5521 return {SPF_UNKNOWN, SPNB_NA, false};
5522
5523 // Z = X -nsw Y
5524 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5525 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5526 if (match(TrueVal, m_Zero()) &&
5527 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5528 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5529
5530 // Z = X -nsw Y
5531 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5532 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5533 if (match(FalseVal, m_Zero()) &&
5534 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5535 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5536
5537 const APInt *C1;
5538 if (!match(CmpRHS, m_APInt(C1)))
5539 return {SPF_UNKNOWN, SPNB_NA, false};
5540
5541 // An unsigned min/max can be written with a signed compare.
5542 const APInt *C2;
5543 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5544 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5545 // Is the sign bit set?
5546 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5547 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5548 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5549 C2->isMaxSignedValue())
5550 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5551
5552 // Is the sign bit clear?
5553 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5554 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5555 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5556 C2->isMinSignedValue())
5557 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5558 }
5559
5560 return {SPF_UNKNOWN, SPNB_NA, false};
5561 }
5562
isKnownNegation(const Value * X,const Value * Y,bool NeedNSW)5563 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5564 assert(X && Y && "Invalid operand");
5565
5566 // X = sub (0, Y) || X = sub nsw (0, Y)
5567 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5568 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5569 return true;
5570
5571 // Y = sub (0, X) || Y = sub nsw (0, X)
5572 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5573 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5574 return true;
5575
5576 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5577 Value *A, *B;
5578 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5579 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5580 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5581 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5582 }
5583
matchSelectPattern(CmpInst::Predicate Pred,FastMathFlags FMF,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5584 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5585 FastMathFlags FMF,
5586 Value *CmpLHS, Value *CmpRHS,
5587 Value *TrueVal, Value *FalseVal,
5588 Value *&LHS, Value *&RHS,
5589 unsigned Depth) {
5590 if (CmpInst::isFPPredicate(Pred)) {
5591 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5592 // 0.0 operand, set the compare's 0.0 operands to that same value for the
5593 // purpose of identifying min/max. Disregard vector constants with undefined
5594 // elements because those can not be back-propagated for analysis.
5595 Value *OutputZeroVal = nullptr;
5596 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5597 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5598 OutputZeroVal = TrueVal;
5599 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5600 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5601 OutputZeroVal = FalseVal;
5602
5603 if (OutputZeroVal) {
5604 if (match(CmpLHS, m_AnyZeroFP()))
5605 CmpLHS = OutputZeroVal;
5606 if (match(CmpRHS, m_AnyZeroFP()))
5607 CmpRHS = OutputZeroVal;
5608 }
5609 }
5610
5611 LHS = CmpLHS;
5612 RHS = CmpRHS;
5613
5614 // Signed zero may return inconsistent results between implementations.
5615 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5616 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5617 // Therefore, we behave conservatively and only proceed if at least one of the
5618 // operands is known to not be zero or if we don't care about signed zero.
5619 switch (Pred) {
5620 default: break;
5621 // FIXME: Include OGT/OLT/UGT/ULT.
5622 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5623 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5624 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5625 !isKnownNonZero(CmpRHS))
5626 return {SPF_UNKNOWN, SPNB_NA, false};
5627 }
5628
5629 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5630 bool Ordered = false;
5631
5632 // When given one NaN and one non-NaN input:
5633 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5634 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5635 // ordered comparison fails), which could be NaN or non-NaN.
5636 // so here we discover exactly what NaN behavior is required/accepted.
5637 if (CmpInst::isFPPredicate(Pred)) {
5638 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5639 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5640
5641 if (LHSSafe && RHSSafe) {
5642 // Both operands are known non-NaN.
5643 NaNBehavior = SPNB_RETURNS_ANY;
5644 } else if (CmpInst::isOrdered(Pred)) {
5645 // An ordered comparison will return false when given a NaN, so it
5646 // returns the RHS.
5647 Ordered = true;
5648 if (LHSSafe)
5649 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5650 NaNBehavior = SPNB_RETURNS_NAN;
5651 else if (RHSSafe)
5652 NaNBehavior = SPNB_RETURNS_OTHER;
5653 else
5654 // Completely unsafe.
5655 return {SPF_UNKNOWN, SPNB_NA, false};
5656 } else {
5657 Ordered = false;
5658 // An unordered comparison will return true when given a NaN, so it
5659 // returns the LHS.
5660 if (LHSSafe)
5661 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5662 NaNBehavior = SPNB_RETURNS_OTHER;
5663 else if (RHSSafe)
5664 NaNBehavior = SPNB_RETURNS_NAN;
5665 else
5666 // Completely unsafe.
5667 return {SPF_UNKNOWN, SPNB_NA, false};
5668 }
5669 }
5670
5671 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5672 std::swap(CmpLHS, CmpRHS);
5673 Pred = CmpInst::getSwappedPredicate(Pred);
5674 if (NaNBehavior == SPNB_RETURNS_NAN)
5675 NaNBehavior = SPNB_RETURNS_OTHER;
5676 else if (NaNBehavior == SPNB_RETURNS_OTHER)
5677 NaNBehavior = SPNB_RETURNS_NAN;
5678 Ordered = !Ordered;
5679 }
5680
5681 // ([if]cmp X, Y) ? X : Y
5682 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5683 switch (Pred) {
5684 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5685 case ICmpInst::ICMP_UGT:
5686 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5687 case ICmpInst::ICMP_SGT:
5688 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5689 case ICmpInst::ICMP_ULT:
5690 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5691 case ICmpInst::ICMP_SLT:
5692 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5693 case FCmpInst::FCMP_UGT:
5694 case FCmpInst::FCMP_UGE:
5695 case FCmpInst::FCMP_OGT:
5696 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5697 case FCmpInst::FCMP_ULT:
5698 case FCmpInst::FCMP_ULE:
5699 case FCmpInst::FCMP_OLT:
5700 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5701 }
5702 }
5703
5704 if (isKnownNegation(TrueVal, FalseVal)) {
5705 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5706 // match against either LHS or sext(LHS).
5707 auto MaybeSExtCmpLHS =
5708 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5709 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5710 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5711 if (match(TrueVal, MaybeSExtCmpLHS)) {
5712 // Set the return values. If the compare uses the negated value (-X >s 0),
5713 // swap the return values because the negated value is always 'RHS'.
5714 LHS = TrueVal;
5715 RHS = FalseVal;
5716 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5717 std::swap(LHS, RHS);
5718
5719 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5720 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5721 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5722 return {SPF_ABS, SPNB_NA, false};
5723
5724 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5725 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5726 return {SPF_ABS, SPNB_NA, false};
5727
5728 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5729 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5730 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5731 return {SPF_NABS, SPNB_NA, false};
5732 }
5733 else if (match(FalseVal, MaybeSExtCmpLHS)) {
5734 // Set the return values. If the compare uses the negated value (-X >s 0),
5735 // swap the return values because the negated value is always 'RHS'.
5736 LHS = FalseVal;
5737 RHS = TrueVal;
5738 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5739 std::swap(LHS, RHS);
5740
5741 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5742 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5743 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5744 return {SPF_NABS, SPNB_NA, false};
5745
5746 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5747 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5748 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5749 return {SPF_ABS, SPNB_NA, false};
5750 }
5751 }
5752
5753 if (CmpInst::isIntPredicate(Pred))
5754 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5755
5756 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5757 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5758 // semantics than minNum. Be conservative in such case.
5759 if (NaNBehavior != SPNB_RETURNS_ANY ||
5760 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5761 !isKnownNonZero(CmpRHS)))
5762 return {SPF_UNKNOWN, SPNB_NA, false};
5763
5764 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5765 }
5766
5767 /// Helps to match a select pattern in case of a type mismatch.
5768 ///
5769 /// The function processes the case when type of true and false values of a
5770 /// select instruction differs from type of the cmp instruction operands because
5771 /// of a cast instruction. The function checks if it is legal to move the cast
5772 /// operation after "select". If yes, it returns the new second value of
5773 /// "select" (with the assumption that cast is moved):
5774 /// 1. As operand of cast instruction when both values of "select" are same cast
5775 /// instructions.
5776 /// 2. As restored constant (by applying reverse cast operation) when the first
5777 /// value of the "select" is a cast operation and the second value is a
5778 /// constant.
5779 /// NOTE: We return only the new second value because the first value could be
5780 /// accessed as operand of cast instruction.
lookThroughCast(CmpInst * CmpI,Value * V1,Value * V2,Instruction::CastOps * CastOp)5781 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5782 Instruction::CastOps *CastOp) {
5783 auto *Cast1 = dyn_cast<CastInst>(V1);
5784 if (!Cast1)
5785 return nullptr;
5786
5787 *CastOp = Cast1->getOpcode();
5788 Type *SrcTy = Cast1->getSrcTy();
5789 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5790 // If V1 and V2 are both the same cast from the same type, look through V1.
5791 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5792 return Cast2->getOperand(0);
5793 return nullptr;
5794 }
5795
5796 auto *C = dyn_cast<Constant>(V2);
5797 if (!C)
5798 return nullptr;
5799
5800 Constant *CastedTo = nullptr;
5801 switch (*CastOp) {
5802 case Instruction::ZExt:
5803 if (CmpI->isUnsigned())
5804 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5805 break;
5806 case Instruction::SExt:
5807 if (CmpI->isSigned())
5808 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5809 break;
5810 case Instruction::Trunc:
5811 Constant *CmpConst;
5812 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5813 CmpConst->getType() == SrcTy) {
5814 // Here we have the following case:
5815 //
5816 // %cond = cmp iN %x, CmpConst
5817 // %tr = trunc iN %x to iK
5818 // %narrowsel = select i1 %cond, iK %t, iK C
5819 //
5820 // We can always move trunc after select operation:
5821 //
5822 // %cond = cmp iN %x, CmpConst
5823 // %widesel = select i1 %cond, iN %x, iN CmpConst
5824 // %tr = trunc iN %widesel to iK
5825 //
5826 // Note that C could be extended in any way because we don't care about
5827 // upper bits after truncation. It can't be abs pattern, because it would
5828 // look like:
5829 //
5830 // select i1 %cond, x, -x.
5831 //
5832 // So only min/max pattern could be matched. Such match requires widened C
5833 // == CmpConst. That is why set widened C = CmpConst, condition trunc
5834 // CmpConst == C is checked below.
5835 CastedTo = CmpConst;
5836 } else {
5837 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5838 }
5839 break;
5840 case Instruction::FPTrunc:
5841 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5842 break;
5843 case Instruction::FPExt:
5844 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5845 break;
5846 case Instruction::FPToUI:
5847 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5848 break;
5849 case Instruction::FPToSI:
5850 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5851 break;
5852 case Instruction::UIToFP:
5853 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5854 break;
5855 case Instruction::SIToFP:
5856 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5857 break;
5858 default:
5859 break;
5860 }
5861
5862 if (!CastedTo)
5863 return nullptr;
5864
5865 // Make sure the cast doesn't lose any information.
5866 Constant *CastedBack =
5867 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5868 if (CastedBack != C)
5869 return nullptr;
5870
5871 return CastedTo;
5872 }
5873
matchSelectPattern(Value * V,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)5874 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5875 Instruction::CastOps *CastOp,
5876 unsigned Depth) {
5877 if (Depth >= MaxAnalysisRecursionDepth)
5878 return {SPF_UNKNOWN, SPNB_NA, false};
5879
5880 SelectInst *SI = dyn_cast<SelectInst>(V);
5881 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5882
5883 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5884 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5885
5886 Value *TrueVal = SI->getTrueValue();
5887 Value *FalseVal = SI->getFalseValue();
5888
5889 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5890 CastOp, Depth);
5891 }
5892
matchDecomposedSelectPattern(CmpInst * CmpI,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)5893 SelectPatternResult llvm::matchDecomposedSelectPattern(
5894 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5895 Instruction::CastOps *CastOp, unsigned Depth) {
5896 CmpInst::Predicate Pred = CmpI->getPredicate();
5897 Value *CmpLHS = CmpI->getOperand(0);
5898 Value *CmpRHS = CmpI->getOperand(1);
5899 FastMathFlags FMF;
5900 if (isa<FPMathOperator>(CmpI))
5901 FMF = CmpI->getFastMathFlags();
5902
5903 // Bail out early.
5904 if (CmpI->isEquality())
5905 return {SPF_UNKNOWN, SPNB_NA, false};
5906
5907 // Deal with type mismatches.
5908 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5909 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5910 // If this is a potential fmin/fmax with a cast to integer, then ignore
5911 // -0.0 because there is no corresponding integer value.
5912 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5913 FMF.setNoSignedZeros();
5914 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5915 cast<CastInst>(TrueVal)->getOperand(0), C,
5916 LHS, RHS, Depth);
5917 }
5918 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5919 // If this is a potential fmin/fmax with a cast to integer, then ignore
5920 // -0.0 because there is no corresponding integer value.
5921 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5922 FMF.setNoSignedZeros();
5923 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5924 C, cast<CastInst>(FalseVal)->getOperand(0),
5925 LHS, RHS, Depth);
5926 }
5927 }
5928 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5929 LHS, RHS, Depth);
5930 }
5931
getMinMaxPred(SelectPatternFlavor SPF,bool Ordered)5932 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5933 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5934 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5935 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5936 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5937 if (SPF == SPF_FMINNUM)
5938 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5939 if (SPF == SPF_FMAXNUM)
5940 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5941 llvm_unreachable("unhandled!");
5942 }
5943
getInverseMinMaxFlavor(SelectPatternFlavor SPF)5944 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5945 if (SPF == SPF_SMIN) return SPF_SMAX;
5946 if (SPF == SPF_UMIN) return SPF_UMAX;
5947 if (SPF == SPF_SMAX) return SPF_SMIN;
5948 if (SPF == SPF_UMAX) return SPF_UMIN;
5949 llvm_unreachable("unhandled!");
5950 }
5951
getInverseMinMaxPred(SelectPatternFlavor SPF)5952 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5953 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5954 }
5955
5956 std::pair<Intrinsic::ID, bool>
canConvertToMinOrMaxIntrinsic(ArrayRef<Value * > VL)5957 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
5958 // Check if VL contains select instructions that can be folded into a min/max
5959 // vector intrinsic and return the intrinsic if it is possible.
5960 // TODO: Support floating point min/max.
5961 bool AllCmpSingleUse = true;
5962 SelectPatternResult SelectPattern;
5963 SelectPattern.Flavor = SPF_UNKNOWN;
5964 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
5965 Value *LHS, *RHS;
5966 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
5967 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
5968 CurrentPattern.Flavor == SPF_FMINNUM ||
5969 CurrentPattern.Flavor == SPF_FMAXNUM ||
5970 !I->getType()->isIntOrIntVectorTy())
5971 return false;
5972 if (SelectPattern.Flavor != SPF_UNKNOWN &&
5973 SelectPattern.Flavor != CurrentPattern.Flavor)
5974 return false;
5975 SelectPattern = CurrentPattern;
5976 AllCmpSingleUse &=
5977 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
5978 return true;
5979 })) {
5980 switch (SelectPattern.Flavor) {
5981 case SPF_SMIN:
5982 return {Intrinsic::smin, AllCmpSingleUse};
5983 case SPF_UMIN:
5984 return {Intrinsic::umin, AllCmpSingleUse};
5985 case SPF_SMAX:
5986 return {Intrinsic::smax, AllCmpSingleUse};
5987 case SPF_UMAX:
5988 return {Intrinsic::umax, AllCmpSingleUse};
5989 default:
5990 llvm_unreachable("unexpected select pattern flavor");
5991 }
5992 }
5993 return {Intrinsic::not_intrinsic, false};
5994 }
5995
5996 /// Return true if "icmp Pred LHS RHS" is always true.
isTruePredicate(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const DataLayout & DL,unsigned Depth)5997 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5998 const Value *RHS, const DataLayout &DL,
5999 unsigned Depth) {
6000 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6001 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6002 return true;
6003
6004 switch (Pred) {
6005 default:
6006 return false;
6007
6008 case CmpInst::ICMP_SLE: {
6009 const APInt *C;
6010
6011 // LHS s<= LHS +_{nsw} C if C >= 0
6012 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6013 return !C->isNegative();
6014 return false;
6015 }
6016
6017 case CmpInst::ICMP_ULE: {
6018 const APInt *C;
6019
6020 // LHS u<= LHS +_{nuw} C for any C
6021 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6022 return true;
6023
6024 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6025 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6026 const Value *&X,
6027 const APInt *&CA, const APInt *&CB) {
6028 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6029 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6030 return true;
6031
6032 // If X & C == 0 then (X | C) == X +_{nuw} C
6033 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6034 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6035 KnownBits Known(CA->getBitWidth());
6036 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6037 /*CxtI*/ nullptr, /*DT*/ nullptr);
6038 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6039 return true;
6040 }
6041
6042 return false;
6043 };
6044
6045 const Value *X;
6046 const APInt *CLHS, *CRHS;
6047 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6048 return CLHS->ule(*CRHS);
6049
6050 return false;
6051 }
6052 }
6053 }
6054
6055 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6056 /// ALHS ARHS" is true. Otherwise, return None.
6057 static Optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred,const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,const DataLayout & DL,unsigned Depth)6058 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6059 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6060 const DataLayout &DL, unsigned Depth) {
6061 switch (Pred) {
6062 default:
6063 return None;
6064
6065 case CmpInst::ICMP_SLT:
6066 case CmpInst::ICMP_SLE:
6067 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6068 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6069 return true;
6070 return None;
6071
6072 case CmpInst::ICMP_ULT:
6073 case CmpInst::ICMP_ULE:
6074 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6075 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6076 return true;
6077 return None;
6078 }
6079 }
6080
6081 /// Return true if the operands of the two compares match. IsSwappedOps is true
6082 /// when the operands match, but are swapped.
isMatchingOps(const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,bool & IsSwappedOps)6083 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6084 const Value *BLHS, const Value *BRHS,
6085 bool &IsSwappedOps) {
6086
6087 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6088 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6089 return IsMatchingOps || IsSwappedOps;
6090 }
6091
6092 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6093 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6094 /// Otherwise, return None if we can't infer anything.
isImpliedCondMatchingOperands(CmpInst::Predicate APred,CmpInst::Predicate BPred,bool AreSwappedOps)6095 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6096 CmpInst::Predicate BPred,
6097 bool AreSwappedOps) {
6098 // Canonicalize the predicate as if the operands were not commuted.
6099 if (AreSwappedOps)
6100 BPred = ICmpInst::getSwappedPredicate(BPred);
6101
6102 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6103 return true;
6104 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6105 return false;
6106
6107 return None;
6108 }
6109
6110 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6111 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6112 /// Otherwise, return None if we can't infer anything.
6113 static Optional<bool>
isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,const ConstantInt * C1,CmpInst::Predicate BPred,const ConstantInt * C2)6114 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6115 const ConstantInt *C1,
6116 CmpInst::Predicate BPred,
6117 const ConstantInt *C2) {
6118 ConstantRange DomCR =
6119 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6120 ConstantRange CR =
6121 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6122 ConstantRange Intersection = DomCR.intersectWith(CR);
6123 ConstantRange Difference = DomCR.difference(CR);
6124 if (Intersection.isEmptySet())
6125 return false;
6126 if (Difference.isEmptySet())
6127 return true;
6128 return None;
6129 }
6130
6131 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6132 /// false. Otherwise, return None if we can't infer anything.
isImpliedCondICmps(const ICmpInst * LHS,CmpInst::Predicate BPred,const Value * BLHS,const Value * BRHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6133 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6134 CmpInst::Predicate BPred,
6135 const Value *BLHS, const Value *BRHS,
6136 const DataLayout &DL, bool LHSIsTrue,
6137 unsigned Depth) {
6138 Value *ALHS = LHS->getOperand(0);
6139 Value *ARHS = LHS->getOperand(1);
6140
6141 // The rest of the logic assumes the LHS condition is true. If that's not the
6142 // case, invert the predicate to make it so.
6143 CmpInst::Predicate APred =
6144 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6145
6146 // Can we infer anything when the two compares have matching operands?
6147 bool AreSwappedOps;
6148 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6149 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6150 APred, BPred, AreSwappedOps))
6151 return Implication;
6152 // No amount of additional analysis will infer the second condition, so
6153 // early exit.
6154 return None;
6155 }
6156
6157 // Can we infer anything when the LHS operands match and the RHS operands are
6158 // constants (not necessarily matching)?
6159 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6160 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6161 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6162 return Implication;
6163 // No amount of additional analysis will infer the second condition, so
6164 // early exit.
6165 return None;
6166 }
6167
6168 if (APred == BPred)
6169 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6170 return None;
6171 }
6172
6173 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6174 /// false. Otherwise, return None if we can't infer anything. We expect the
6175 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6176 static Optional<bool>
isImpliedCondAndOr(const Instruction * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6177 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6178 const Value *RHSOp0, const Value *RHSOp1,
6179 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6180 // The LHS must be an 'or', 'and', or a 'select' instruction.
6181 assert((LHS->getOpcode() == Instruction::And ||
6182 LHS->getOpcode() == Instruction::Or ||
6183 LHS->getOpcode() == Instruction::Select) &&
6184 "Expected LHS to be 'and', 'or', or 'select'.");
6185
6186 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6187
6188 // If the result of an 'or' is false, then we know both legs of the 'or' are
6189 // false. Similarly, if the result of an 'and' is true, then we know both
6190 // legs of the 'and' are true.
6191 const Value *ALHS, *ARHS;
6192 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6193 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6194 // FIXME: Make this non-recursion.
6195 if (Optional<bool> Implication = isImpliedCondition(
6196 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6197 return Implication;
6198 if (Optional<bool> Implication = isImpliedCondition(
6199 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6200 return Implication;
6201 return None;
6202 }
6203 return None;
6204 }
6205
6206 Optional<bool>
isImpliedCondition(const Value * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6207 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6208 const Value *RHSOp0, const Value *RHSOp1,
6209 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6210 // Bail out when we hit the limit.
6211 if (Depth == MaxAnalysisRecursionDepth)
6212 return None;
6213
6214 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6215 // example.
6216 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6217 return None;
6218
6219 Type *OpTy = LHS->getType();
6220 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6221
6222 // FIXME: Extending the code below to handle vectors.
6223 if (OpTy->isVectorTy())
6224 return None;
6225
6226 assert(OpTy->isIntegerTy(1) && "implied by above");
6227
6228 // Both LHS and RHS are icmps.
6229 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6230 if (LHSCmp)
6231 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6232 Depth);
6233
6234 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
6235 /// the RHS to be an icmp.
6236 /// FIXME: Add support for and/or/select on the RHS.
6237 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6238 if ((LHSI->getOpcode() == Instruction::And ||
6239 LHSI->getOpcode() == Instruction::Or ||
6240 LHSI->getOpcode() == Instruction::Select))
6241 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6242 Depth);
6243 }
6244 return None;
6245 }
6246
isImpliedCondition(const Value * LHS,const Value * RHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6247 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6248 const DataLayout &DL, bool LHSIsTrue,
6249 unsigned Depth) {
6250 // LHS ==> RHS by definition
6251 if (LHS == RHS)
6252 return LHSIsTrue;
6253
6254 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6255 if (RHSCmp)
6256 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6257 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6258 LHSIsTrue, Depth);
6259 return None;
6260 }
6261
6262 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6263 // condition dominating ContextI or nullptr, if no condition is found.
6264 static std::pair<Value *, bool>
getDomPredecessorCondition(const Instruction * ContextI)6265 getDomPredecessorCondition(const Instruction *ContextI) {
6266 if (!ContextI || !ContextI->getParent())
6267 return {nullptr, false};
6268
6269 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6270 // dominator tree (eg, from a SimplifyQuery) instead?
6271 const BasicBlock *ContextBB = ContextI->getParent();
6272 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6273 if (!PredBB)
6274 return {nullptr, false};
6275
6276 // We need a conditional branch in the predecessor.
6277 Value *PredCond;
6278 BasicBlock *TrueBB, *FalseBB;
6279 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6280 return {nullptr, false};
6281
6282 // The branch should get simplified. Don't bother simplifying this condition.
6283 if (TrueBB == FalseBB)
6284 return {nullptr, false};
6285
6286 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6287 "Predecessor block does not point to successor?");
6288
6289 // Is this condition implied by the predecessor condition?
6290 return {PredCond, TrueBB == ContextBB};
6291 }
6292
isImpliedByDomCondition(const Value * Cond,const Instruction * ContextI,const DataLayout & DL)6293 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6294 const Instruction *ContextI,
6295 const DataLayout &DL) {
6296 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6297 auto PredCond = getDomPredecessorCondition(ContextI);
6298 if (PredCond.first)
6299 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6300 return None;
6301 }
6302
isImpliedByDomCondition(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const Instruction * ContextI,const DataLayout & DL)6303 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6304 const Value *LHS, const Value *RHS,
6305 const Instruction *ContextI,
6306 const DataLayout &DL) {
6307 auto PredCond = getDomPredecessorCondition(ContextI);
6308 if (PredCond.first)
6309 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6310 PredCond.second);
6311 return None;
6312 }
6313
setLimitsForBinOp(const BinaryOperator & BO,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6314 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6315 APInt &Upper, const InstrInfoQuery &IIQ) {
6316 unsigned Width = Lower.getBitWidth();
6317 const APInt *C;
6318 switch (BO.getOpcode()) {
6319 case Instruction::Add:
6320 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6321 // FIXME: If we have both nuw and nsw, we should reduce the range further.
6322 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6323 // 'add nuw x, C' produces [C, UINT_MAX].
6324 Lower = *C;
6325 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6326 if (C->isNegative()) {
6327 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6328 Lower = APInt::getSignedMinValue(Width);
6329 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6330 } else {
6331 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6332 Lower = APInt::getSignedMinValue(Width) + *C;
6333 Upper = APInt::getSignedMaxValue(Width) + 1;
6334 }
6335 }
6336 }
6337 break;
6338
6339 case Instruction::And:
6340 if (match(BO.getOperand(1), m_APInt(C)))
6341 // 'and x, C' produces [0, C].
6342 Upper = *C + 1;
6343 break;
6344
6345 case Instruction::Or:
6346 if (match(BO.getOperand(1), m_APInt(C)))
6347 // 'or x, C' produces [C, UINT_MAX].
6348 Lower = *C;
6349 break;
6350
6351 case Instruction::AShr:
6352 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6353 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6354 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6355 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6356 } else if (match(BO.getOperand(0), m_APInt(C))) {
6357 unsigned ShiftAmount = Width - 1;
6358 if (!C->isNullValue() && IIQ.isExact(&BO))
6359 ShiftAmount = C->countTrailingZeros();
6360 if (C->isNegative()) {
6361 // 'ashr C, x' produces [C, C >> (Width-1)]
6362 Lower = *C;
6363 Upper = C->ashr(ShiftAmount) + 1;
6364 } else {
6365 // 'ashr C, x' produces [C >> (Width-1), C]
6366 Lower = C->ashr(ShiftAmount);
6367 Upper = *C + 1;
6368 }
6369 }
6370 break;
6371
6372 case Instruction::LShr:
6373 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6374 // 'lshr x, C' produces [0, UINT_MAX >> C].
6375 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6376 } else if (match(BO.getOperand(0), m_APInt(C))) {
6377 // 'lshr C, x' produces [C >> (Width-1), C].
6378 unsigned ShiftAmount = Width - 1;
6379 if (!C->isNullValue() && IIQ.isExact(&BO))
6380 ShiftAmount = C->countTrailingZeros();
6381 Lower = C->lshr(ShiftAmount);
6382 Upper = *C + 1;
6383 }
6384 break;
6385
6386 case Instruction::Shl:
6387 if (match(BO.getOperand(0), m_APInt(C))) {
6388 if (IIQ.hasNoUnsignedWrap(&BO)) {
6389 // 'shl nuw C, x' produces [C, C << CLZ(C)]
6390 Lower = *C;
6391 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6392 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6393 if (C->isNegative()) {
6394 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6395 unsigned ShiftAmount = C->countLeadingOnes() - 1;
6396 Lower = C->shl(ShiftAmount);
6397 Upper = *C + 1;
6398 } else {
6399 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6400 unsigned ShiftAmount = C->countLeadingZeros() - 1;
6401 Lower = *C;
6402 Upper = C->shl(ShiftAmount) + 1;
6403 }
6404 }
6405 }
6406 break;
6407
6408 case Instruction::SDiv:
6409 if (match(BO.getOperand(1), m_APInt(C))) {
6410 APInt IntMin = APInt::getSignedMinValue(Width);
6411 APInt IntMax = APInt::getSignedMaxValue(Width);
6412 if (C->isAllOnesValue()) {
6413 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6414 // where C != -1 and C != 0 and C != 1
6415 Lower = IntMin + 1;
6416 Upper = IntMax + 1;
6417 } else if (C->countLeadingZeros() < Width - 1) {
6418 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6419 // where C != -1 and C != 0 and C != 1
6420 Lower = IntMin.sdiv(*C);
6421 Upper = IntMax.sdiv(*C);
6422 if (Lower.sgt(Upper))
6423 std::swap(Lower, Upper);
6424 Upper = Upper + 1;
6425 assert(Upper != Lower && "Upper part of range has wrapped!");
6426 }
6427 } else if (match(BO.getOperand(0), m_APInt(C))) {
6428 if (C->isMinSignedValue()) {
6429 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6430 Lower = *C;
6431 Upper = Lower.lshr(1) + 1;
6432 } else {
6433 // 'sdiv C, x' produces [-|C|, |C|].
6434 Upper = C->abs() + 1;
6435 Lower = (-Upper) + 1;
6436 }
6437 }
6438 break;
6439
6440 case Instruction::UDiv:
6441 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6442 // 'udiv x, C' produces [0, UINT_MAX / C].
6443 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6444 } else if (match(BO.getOperand(0), m_APInt(C))) {
6445 // 'udiv C, x' produces [0, C].
6446 Upper = *C + 1;
6447 }
6448 break;
6449
6450 case Instruction::SRem:
6451 if (match(BO.getOperand(1), m_APInt(C))) {
6452 // 'srem x, C' produces (-|C|, |C|).
6453 Upper = C->abs();
6454 Lower = (-Upper) + 1;
6455 }
6456 break;
6457
6458 case Instruction::URem:
6459 if (match(BO.getOperand(1), m_APInt(C)))
6460 // 'urem x, C' produces [0, C).
6461 Upper = *C;
6462 break;
6463
6464 default:
6465 break;
6466 }
6467 }
6468
setLimitsForIntrinsic(const IntrinsicInst & II,APInt & Lower,APInt & Upper)6469 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6470 APInt &Upper) {
6471 unsigned Width = Lower.getBitWidth();
6472 const APInt *C;
6473 switch (II.getIntrinsicID()) {
6474 case Intrinsic::ctpop:
6475 case Intrinsic::ctlz:
6476 case Intrinsic::cttz:
6477 // Maximum of set/clear bits is the bit width.
6478 assert(Lower == 0 && "Expected lower bound to be zero");
6479 Upper = Width + 1;
6480 break;
6481 case Intrinsic::uadd_sat:
6482 // uadd.sat(x, C) produces [C, UINT_MAX].
6483 if (match(II.getOperand(0), m_APInt(C)) ||
6484 match(II.getOperand(1), m_APInt(C)))
6485 Lower = *C;
6486 break;
6487 case Intrinsic::sadd_sat:
6488 if (match(II.getOperand(0), m_APInt(C)) ||
6489 match(II.getOperand(1), m_APInt(C))) {
6490 if (C->isNegative()) {
6491 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6492 Lower = APInt::getSignedMinValue(Width);
6493 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6494 } else {
6495 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6496 Lower = APInt::getSignedMinValue(Width) + *C;
6497 Upper = APInt::getSignedMaxValue(Width) + 1;
6498 }
6499 }
6500 break;
6501 case Intrinsic::usub_sat:
6502 // usub.sat(C, x) produces [0, C].
6503 if (match(II.getOperand(0), m_APInt(C)))
6504 Upper = *C + 1;
6505 // usub.sat(x, C) produces [0, UINT_MAX - C].
6506 else if (match(II.getOperand(1), m_APInt(C)))
6507 Upper = APInt::getMaxValue(Width) - *C + 1;
6508 break;
6509 case Intrinsic::ssub_sat:
6510 if (match(II.getOperand(0), m_APInt(C))) {
6511 if (C->isNegative()) {
6512 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6513 Lower = APInt::getSignedMinValue(Width);
6514 Upper = *C - APInt::getSignedMinValue(Width) + 1;
6515 } else {
6516 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6517 Lower = *C - APInt::getSignedMaxValue(Width);
6518 Upper = APInt::getSignedMaxValue(Width) + 1;
6519 }
6520 } else if (match(II.getOperand(1), m_APInt(C))) {
6521 if (C->isNegative()) {
6522 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6523 Lower = APInt::getSignedMinValue(Width) - *C;
6524 Upper = APInt::getSignedMaxValue(Width) + 1;
6525 } else {
6526 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6527 Lower = APInt::getSignedMinValue(Width);
6528 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6529 }
6530 }
6531 break;
6532 case Intrinsic::umin:
6533 case Intrinsic::umax:
6534 case Intrinsic::smin:
6535 case Intrinsic::smax:
6536 if (!match(II.getOperand(0), m_APInt(C)) &&
6537 !match(II.getOperand(1), m_APInt(C)))
6538 break;
6539
6540 switch (II.getIntrinsicID()) {
6541 case Intrinsic::umin:
6542 Upper = *C + 1;
6543 break;
6544 case Intrinsic::umax:
6545 Lower = *C;
6546 break;
6547 case Intrinsic::smin:
6548 Lower = APInt::getSignedMinValue(Width);
6549 Upper = *C + 1;
6550 break;
6551 case Intrinsic::smax:
6552 Lower = *C;
6553 Upper = APInt::getSignedMaxValue(Width) + 1;
6554 break;
6555 default:
6556 llvm_unreachable("Must be min/max intrinsic");
6557 }
6558 break;
6559 case Intrinsic::abs:
6560 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6561 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6562 if (match(II.getOperand(1), m_One()))
6563 Upper = APInt::getSignedMaxValue(Width) + 1;
6564 else
6565 Upper = APInt::getSignedMinValue(Width) + 1;
6566 break;
6567 default:
6568 break;
6569 }
6570 }
6571
setLimitsForSelectPattern(const SelectInst & SI,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6572 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6573 APInt &Upper, const InstrInfoQuery &IIQ) {
6574 const Value *LHS = nullptr, *RHS = nullptr;
6575 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6576 if (R.Flavor == SPF_UNKNOWN)
6577 return;
6578
6579 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6580
6581 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6582 // If the negation part of the abs (in RHS) has the NSW flag,
6583 // then the result of abs(X) is [0..SIGNED_MAX],
6584 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6585 Lower = APInt::getNullValue(BitWidth);
6586 if (match(RHS, m_Neg(m_Specific(LHS))) &&
6587 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6588 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6589 else
6590 Upper = APInt::getSignedMinValue(BitWidth) + 1;
6591 return;
6592 }
6593
6594 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6595 // The result of -abs(X) is <= 0.
6596 Lower = APInt::getSignedMinValue(BitWidth);
6597 Upper = APInt(BitWidth, 1);
6598 return;
6599 }
6600
6601 const APInt *C;
6602 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6603 return;
6604
6605 switch (R.Flavor) {
6606 case SPF_UMIN:
6607 Upper = *C + 1;
6608 break;
6609 case SPF_UMAX:
6610 Lower = *C;
6611 break;
6612 case SPF_SMIN:
6613 Lower = APInt::getSignedMinValue(BitWidth);
6614 Upper = *C + 1;
6615 break;
6616 case SPF_SMAX:
6617 Lower = *C;
6618 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6619 break;
6620 default:
6621 break;
6622 }
6623 }
6624
computeConstantRange(const Value * V,bool UseInstrInfo,AssumptionCache * AC,const Instruction * CtxI,unsigned Depth)6625 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6626 AssumptionCache *AC,
6627 const Instruction *CtxI,
6628 unsigned Depth) {
6629 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6630
6631 if (Depth == MaxAnalysisRecursionDepth)
6632 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6633
6634 const APInt *C;
6635 if (match(V, m_APInt(C)))
6636 return ConstantRange(*C);
6637
6638 InstrInfoQuery IIQ(UseInstrInfo);
6639 unsigned BitWidth = V->getType()->getScalarSizeInBits();
6640 APInt Lower = APInt(BitWidth, 0);
6641 APInt Upper = APInt(BitWidth, 0);
6642 if (auto *BO = dyn_cast<BinaryOperator>(V))
6643 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6644 else if (auto *II = dyn_cast<IntrinsicInst>(V))
6645 setLimitsForIntrinsic(*II, Lower, Upper);
6646 else if (auto *SI = dyn_cast<SelectInst>(V))
6647 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6648
6649 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6650
6651 if (auto *I = dyn_cast<Instruction>(V))
6652 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6653 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6654
6655 if (CtxI && AC) {
6656 // Try to restrict the range based on information from assumptions.
6657 for (auto &AssumeVH : AC->assumptionsFor(V)) {
6658 if (!AssumeVH)
6659 continue;
6660 CallInst *I = cast<CallInst>(AssumeVH);
6661 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6662 "Got assumption for the wrong function!");
6663 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6664 "must be an assume intrinsic");
6665
6666 if (!isValidAssumeForContext(I, CtxI, nullptr))
6667 continue;
6668 Value *Arg = I->getArgOperand(0);
6669 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6670 // Currently we just use information from comparisons.
6671 if (!Cmp || Cmp->getOperand(0) != V)
6672 continue;
6673 ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6674 AC, I, Depth + 1);
6675 CR = CR.intersectWith(
6676 ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6677 }
6678 }
6679
6680 return CR;
6681 }
6682
6683 static Optional<int64_t>
getOffsetFromIndex(const GEPOperator * GEP,unsigned Idx,const DataLayout & DL)6684 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6685 // Skip over the first indices.
6686 gep_type_iterator GTI = gep_type_begin(GEP);
6687 for (unsigned i = 1; i != Idx; ++i, ++GTI)
6688 /*skip along*/;
6689
6690 // Compute the offset implied by the rest of the indices.
6691 int64_t Offset = 0;
6692 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6693 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6694 if (!OpC)
6695 return None;
6696 if (OpC->isZero())
6697 continue; // No offset.
6698
6699 // Handle struct indices, which add their field offset to the pointer.
6700 if (StructType *STy = GTI.getStructTypeOrNull()) {
6701 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6702 continue;
6703 }
6704
6705 // Otherwise, we have a sequential type like an array or fixed-length
6706 // vector. Multiply the index by the ElementSize.
6707 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6708 if (Size.isScalable())
6709 return None;
6710 Offset += Size.getFixedSize() * OpC->getSExtValue();
6711 }
6712
6713 return Offset;
6714 }
6715
isPointerOffset(const Value * Ptr1,const Value * Ptr2,const DataLayout & DL)6716 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6717 const DataLayout &DL) {
6718 Ptr1 = Ptr1->stripPointerCasts();
6719 Ptr2 = Ptr2->stripPointerCasts();
6720
6721 // Handle the trivial case first.
6722 if (Ptr1 == Ptr2) {
6723 return 0;
6724 }
6725
6726 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6727 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6728
6729 // If one pointer is a GEP see if the GEP is a constant offset from the base,
6730 // as in "P" and "gep P, 1".
6731 // Also do this iteratively to handle the the following case:
6732 // Ptr_t1 = GEP Ptr1, c1
6733 // Ptr_t2 = GEP Ptr_t1, c2
6734 // Ptr2 = GEP Ptr_t2, c3
6735 // where we will return c1+c2+c3.
6736 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6737 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6738 // are the same, and return the difference between offsets.
6739 auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6740 const Value *Ptr) -> Optional<int64_t> {
6741 const GEPOperator *GEP_T = GEP;
6742 int64_t OffsetVal = 0;
6743 bool HasSameBase = false;
6744 while (GEP_T) {
6745 auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6746 if (!Offset)
6747 return None;
6748 OffsetVal += *Offset;
6749 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6750 if (Op0 == Ptr) {
6751 HasSameBase = true;
6752 break;
6753 }
6754 GEP_T = dyn_cast<GEPOperator>(Op0);
6755 }
6756 if (!HasSameBase)
6757 return None;
6758 return OffsetVal;
6759 };
6760
6761 if (GEP1) {
6762 auto Offset = getOffsetFromBase(GEP1, Ptr2);
6763 if (Offset)
6764 return -*Offset;
6765 }
6766 if (GEP2) {
6767 auto Offset = getOffsetFromBase(GEP2, Ptr1);
6768 if (Offset)
6769 return Offset;
6770 }
6771
6772 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6773 // base. After that base, they may have some number of common (and
6774 // potentially variable) indices. After that they handle some constant
6775 // offset, which determines their offset from each other. At this point, we
6776 // handle no other case.
6777 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6778 return None;
6779
6780 // Skip any common indices and track the GEP types.
6781 unsigned Idx = 1;
6782 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6783 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6784 break;
6785
6786 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6787 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6788 if (!Offset1 || !Offset2)
6789 return None;
6790 return *Offset2 - *Offset1;
6791 }
6792