1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/Cheri.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/IntrinsicsAArch64.h"
56 #include "llvm/IR/IntrinsicsX86.h"
57 #include "llvm/IR/LLVMContext.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/Module.h"
60 #include "llvm/IR/Operator.h"
61 #include "llvm/IR/PatternMatch.h"
62 #include "llvm/IR/Type.h"
63 #include "llvm/IR/User.h"
64 #include "llvm/IR/Value.h"
65 #include "llvm/Support/Casting.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Compiler.h"
68 #include "llvm/Support/ErrorHandling.h"
69 #include "llvm/Support/KnownBits.h"
70 #include "llvm/Support/MathExtras.h"
71 #include <algorithm>
72 #include <array>
73 #include <cassert>
74 #include <cstdint>
75 #include <iterator>
76 #include <utility>
77
78 using namespace llvm;
79 using namespace llvm::PatternMatch;
80
81 const unsigned MaxDepth = 6;
82
83 // Controls the number of uses of the value searched for possible
84 // dominating comparisons.
85 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
86 cl::Hidden, cl::init(20));
87
88 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
89 /// returns the element type's bitwidth.
getBitWidth(Type * Ty,const DataLayout & DL)90 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
91 if (unsigned BitWidth = Ty->getScalarSizeInBits())
92 return BitWidth;
93
94 // For CHERI capabilities the usable range here is the address range
95 return DL.getPointerAddrSizeInBits(Ty);
96 }
97
98 namespace {
99
100 // Simplifying using an assume can only be done in a particular control-flow
101 // context (the context instruction provides that context). If an assume and
102 // the context instruction are not in the same block then the DT helps in
103 // figuring out if we can use it.
104 struct Query {
105 const DataLayout &DL;
106 AssumptionCache *AC;
107 const Instruction *CxtI;
108 const DominatorTree *DT;
109
110 // Unlike the other analyses, this may be a nullptr because not all clients
111 // provide it currently.
112 OptimizationRemarkEmitter *ORE;
113
114 /// Set of assumptions that should be excluded from further queries.
115 /// This is because of the potential for mutual recursion to cause
116 /// computeKnownBits to repeatedly visit the same assume intrinsic. The
117 /// classic case of this is assume(x = y), which will attempt to determine
118 /// bits in x from bits in y, which will attempt to determine bits in y from
119 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
120 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
121 /// (all of which can call computeKnownBits), and so on.
122 std::array<const Value *, MaxDepth> Excluded;
123
124 /// If true, it is safe to use metadata during simplification.
125 InstrInfoQuery IIQ;
126
127 unsigned NumExcluded = 0;
128
Query__anon76eb18c10111::Query129 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
130 const DominatorTree *DT, bool UseInstrInfo,
131 OptimizationRemarkEmitter *ORE = nullptr)
132 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
133
Query__anon76eb18c10111::Query134 Query(const Query &Q, const Value *NewExcl)
135 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
136 NumExcluded(Q.NumExcluded) {
137 Excluded = Q.Excluded;
138 Excluded[NumExcluded++] = NewExcl;
139 assert(NumExcluded <= Excluded.size());
140 }
141
isExcluded__anon76eb18c10111::Query142 bool isExcluded(const Value *Value) const {
143 if (NumExcluded == 0)
144 return false;
145 auto End = Excluded.begin() + NumExcluded;
146 return std::find(Excluded.begin(), End, Value) != End;
147 }
148 };
149
150 } // end anonymous namespace
151
152 // Given the provided Value and, potentially, a context instruction, return
153 // the preferred context instruction (if any).
safeCxtI(const Value * V,const Instruction * CxtI)154 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
155 // If we've been provided with a context instruction, then use that (provided
156 // it has been inserted).
157 if (CxtI && CxtI->getParent())
158 return CxtI;
159
160 // If the value is really an already-inserted instruction, then use that.
161 CxtI = dyn_cast<Instruction>(V);
162 if (CxtI && CxtI->getParent())
163 return CxtI;
164
165 return nullptr;
166 }
167
getShuffleDemandedElts(const ShuffleVectorInst * Shuf,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)168 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
169 const APInt &DemandedElts,
170 APInt &DemandedLHS, APInt &DemandedRHS) {
171 // The length of scalable vectors is unknown at compile time, thus we
172 // cannot check their values
173 if (isa<ScalableVectorType>(Shuf->getType()))
174 return false;
175
176 int NumElts =
177 cast<VectorType>(Shuf->getOperand(0)->getType())->getNumElements();
178 int NumMaskElts = Shuf->getType()->getNumElements();
179 DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
180 if (DemandedElts.isNullValue())
181 return true;
182 // Simple case of a shuffle with zeroinitializer.
183 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
184 DemandedLHS.setBit(0);
185 return true;
186 }
187 for (int i = 0; i != NumMaskElts; ++i) {
188 if (!DemandedElts[i])
189 continue;
190 int M = Shuf->getMaskValue(i);
191 assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
192
193 // For undef elements, we don't know anything about the common state of
194 // the shuffle result.
195 if (M == -1)
196 return false;
197 if (M < NumElts)
198 DemandedLHS.setBit(M % NumElts);
199 else
200 DemandedRHS.setBit(M % NumElts);
201 }
202
203 return true;
204 }
205
206 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
207 KnownBits &Known, unsigned Depth, const Query &Q);
208
computeKnownBits(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)209 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
210 const Query &Q) {
211 // FIXME: We currently have no way to represent the DemandedElts of a scalable
212 // vector
213 if (isa<ScalableVectorType>(V->getType())) {
214 Known.resetAll();
215 return;
216 }
217
218 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
219 APInt DemandedElts =
220 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
221 computeKnownBits(V, DemandedElts, Known, Depth, Q);
222 }
223
computeKnownBits(const Value * V,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)224 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
225 const DataLayout &DL, unsigned Depth,
226 AssumptionCache *AC, const Instruction *CxtI,
227 const DominatorTree *DT,
228 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
229 ::computeKnownBits(V, Known, Depth,
230 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
231 }
232
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)233 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
234 KnownBits &Known, const DataLayout &DL,
235 unsigned Depth, AssumptionCache *AC,
236 const Instruction *CxtI, const DominatorTree *DT,
237 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
238 ::computeKnownBits(V, DemandedElts, Known, Depth,
239 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
240 }
241
242 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
243 unsigned Depth, const Query &Q);
244
245 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
246 const Query &Q);
247
computeKnownBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)248 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
249 unsigned Depth, AssumptionCache *AC,
250 const Instruction *CxtI,
251 const DominatorTree *DT,
252 OptimizationRemarkEmitter *ORE,
253 bool UseInstrInfo) {
254 return ::computeKnownBits(
255 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
256 }
257
computeKnownBits(const Value * V,const APInt & DemandedElts,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE,bool UseInstrInfo)258 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
259 const DataLayout &DL, unsigned Depth,
260 AssumptionCache *AC, const Instruction *CxtI,
261 const DominatorTree *DT,
262 OptimizationRemarkEmitter *ORE,
263 bool UseInstrInfo) {
264 return ::computeKnownBits(
265 V, DemandedElts, Depth,
266 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
267 }
268
haveNoCommonBitsSet(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)269 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
270 const DataLayout &DL, AssumptionCache *AC,
271 const Instruction *CxtI, const DominatorTree *DT,
272 bool UseInstrInfo) {
273 assert(LHS->getType() == RHS->getType() &&
274 "LHS and RHS should have the same type");
275 assert(LHS->getType()->isIntOrIntVectorTy() &&
276 "LHS and RHS should be integers");
277 // Look for an inverted mask: (X & ~M) op (Y & M).
278 Value *M;
279 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
280 match(RHS, m_c_And(m_Specific(M), m_Value())))
281 return true;
282 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
283 match(LHS, m_c_And(m_Specific(M), m_Value())))
284 return true;
285 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
286 KnownBits LHSKnown(IT->getBitWidth());
287 KnownBits RHSKnown(IT->getBitWidth());
288 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
289 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
290 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
291 }
292
isOnlyUsedInZeroEqualityComparison(const Instruction * CxtI)293 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
294 for (const User *U : CxtI->users()) {
295 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
296 if (IC->isEquality())
297 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
298 if (C->isNullValue())
299 continue;
300 return false;
301 }
302 return true;
303 }
304
305 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
306 const Query &Q);
307
isKnownToBeAPowerOfTwo(const Value * V,const DataLayout & DL,bool OrZero,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)308 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
309 bool OrZero, unsigned Depth,
310 AssumptionCache *AC, const Instruction *CxtI,
311 const DominatorTree *DT, bool UseInstrInfo) {
312 return ::isKnownToBeAPowerOfTwo(
313 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
314 }
315
316 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
317 unsigned Depth, const Query &Q);
318
319 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
320
isKnownNonZero(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)321 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
322 AssumptionCache *AC, const Instruction *CxtI,
323 const DominatorTree *DT, bool UseInstrInfo) {
324 return ::isKnownNonZero(V, Depth,
325 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
326 }
327
isKnownNonNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)328 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
329 unsigned Depth, AssumptionCache *AC,
330 const Instruction *CxtI, const DominatorTree *DT,
331 bool UseInstrInfo) {
332 KnownBits Known =
333 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
334 return Known.isNonNegative();
335 }
336
isKnownPositive(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)337 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
338 AssumptionCache *AC, const Instruction *CxtI,
339 const DominatorTree *DT, bool UseInstrInfo) {
340 if (auto *CI = dyn_cast<ConstantInt>(V))
341 return CI->getValue().isStrictlyPositive();
342
343 // TODO: We'd doing two recursive queries here. We should factor this such
344 // that only a single query is needed.
345 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
346 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
347 }
348
isKnownNegative(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)349 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
350 AssumptionCache *AC, const Instruction *CxtI,
351 const DominatorTree *DT, bool UseInstrInfo) {
352 KnownBits Known =
353 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
354 return Known.isNegative();
355 }
356
357 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
358
isKnownNonEqual(const Value * V1,const Value * V2,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)359 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
360 const DataLayout &DL, AssumptionCache *AC,
361 const Instruction *CxtI, const DominatorTree *DT,
362 bool UseInstrInfo) {
363 return ::isKnownNonEqual(V1, V2,
364 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
365 UseInstrInfo, /*ORE=*/nullptr));
366 }
367
368 #define DEBUG_TAG(...) \
369 DEBUG_WITH_TYPE("infer-tag", dbgs() << __func__ << "(d=" << Depth \
370 << "): " << __VA_ARGS__; \
371 dbgs() << " -- "; V->dump())
372
isKnownUntaggedCapability(const Value * V,unsigned Depth,const DataLayout * DL)373 static bool isKnownUntaggedCapability(const Value *V, unsigned Depth,
374 const DataLayout *DL) {
375 assert(V && "No Value?");
376 assert(isCheriPointer(V->getType(), DL));
377 if (Depth >= MaxDepth) {
378 DEBUG_TAG("reached max depth -> false");
379 return false;
380 }
381 if (DL) {
382 V = getBasePtrIgnoringCapabilityAddressManipulation(const_cast<Value *>(V),
383 *DL);
384 }
385 if (isa<ConstantPointerNull>(V)) {
386 DEBUG_TAG("NULL base pointer -> true");
387 return true; // NULL is always untagged
388 } else if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
389 switch (II->getIntrinsicID()) {
390 case Intrinsic::cheri_cap_tag_clear:
391 DEBUG_TAG("ccleartag -> true");
392 return true;
393 case Intrinsic::cheri_cap_offset_set:
394 case Intrinsic::cheri_cap_address_set:
395 case Intrinsic::cheri_cap_perms_and:
396 case Intrinsic::cheri_cap_flags_set:
397 case Intrinsic::cheri_cap_bounds_set:
398 case Intrinsic::cheri_cap_bounds_set_exact:
399 if (isa<ConstantPointerNull>(II->getOperand(0))) {
400 DEBUG_TAG("modifying NULL cap -> true");
401 return true;
402 }
403 // Otherwise check if the source argument is known to be untagged:
404 return isKnownUntaggedCapability(II->getOperand(0), Depth + 1, DL);
405 case Intrinsic::cheri_cap_from_pointer:
406 if (auto CI = dyn_cast<ConstantInt>(II->getOperand(1))) {
407 DEBUG_TAG("CFromPtr on constant -> " << CI->isZeroValue());
408 return CI->isZeroValue(); // cfromptr with zero returns NULL
409 }
410 DEBUG_TAG("CFromPtr on nonconst -> false");
411 return false; // We don't know if the source is NULL -> could be tagged
412 default:
413 DEBUG_TAG("unknown intrinsic -> false");
414 // Any other intrinsic returning a capability could be a tagged value
415 return false;
416 }
417 } else if (const auto *GEPI = dyn_cast<GetElementPtrInst>(V)) {
418 // GEP on NULL has to be untagged
419 DEBUG_TAG("is GEP on NULL? -> "
420 << isa<ConstantPointerNull>(GEPI->getPointerOperand()));
421 return isa<ConstantPointerNull>(GEPI->getPointerOperand());
422 } else if (const auto *ITP = dyn_cast<IntToPtrInst>(V)) {
423 // IntToPtr returns untagged values in the pure capability ABI
424 bool Purecap = DL && isCheriPointer(DL->getAllocaAddrSpace(), DL);
425 DEBUG_TAG("is nonconst inttoptr in" << (Purecap ? "purecap" : "hybrid")
426 << " ABI -> " << Purecap);
427 bool IsZero = false;
428 if (auto Constant = dyn_cast<ConstantInt>(ITP->getOperand(0)))
429 IsZero = Constant->isNullValue();
430 return Purecap || IsZero;
431 } else if (const auto *CE = dyn_cast<ConstantExpr>(V)) {
432 if (CE->getOpcode() == Instruction::IntToPtr) {
433 // IntToPtr returns untagged values in the pure capability ABI
434 bool Purecap = DL && isCheriPointer(DL->getAllocaAddrSpace(), DL);
435 DEBUG_TAG("is constant inttoptr in" << (Purecap ? "purecap" : "hybrid")
436 << " ABI -> " << Purecap);
437 return Purecap;
438 } else if (CE->getOpcode() == Instruction::GetElementPtr) {
439 if (CE->getOperand(0)->isNullValue()) {
440 DEBUG_TAG("GEP on NULL -> true");
441 return true;
442 } else {
443 DEBUG_TAG("GEP on non-NULL value -> false");
444 return false;
445 }
446 }
447 DEBUG_TAG("unknown constantexpr -> false");
448 return false;
449 }
450 DEBUG_TAG("unknown value -> false");
451 // Anything else could be tagged (what about GEP on NULL?)
452 return false;
453 }
454
isKnownUntaggedCapability(const Value * V,const DataLayout * DL)455 bool llvm::cheri::isKnownUntaggedCapability(const Value *V,
456 const DataLayout *DL) {
457 DEBUG_WITH_TYPE("infer-tag", dbgs() << "Checking if value is untagged:";
458 V->dump());
459 return ::isKnownUntaggedCapability(V, 0, DL);
460 }
461
462 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
463 const Query &Q);
464
MaskedValueIsZero(const Value * V,const APInt & Mask,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)465 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
466 const DataLayout &DL, unsigned Depth,
467 AssumptionCache *AC, const Instruction *CxtI,
468 const DominatorTree *DT, bool UseInstrInfo) {
469 return ::MaskedValueIsZero(
470 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
471 }
472
473 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
474 unsigned Depth, const Query &Q);
475
ComputeNumSignBits(const Value * V,unsigned Depth,const Query & Q)476 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
477 const Query &Q) {
478 // FIXME: We currently have no way to represent the DemandedElts of a scalable
479 // vector
480 if (isa<ScalableVectorType>(V->getType()))
481 return 1;
482
483 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
484 APInt DemandedElts =
485 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
486 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
487 }
488
ComputeNumSignBits(const Value * V,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)489 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
490 unsigned Depth, AssumptionCache *AC,
491 const Instruction *CxtI,
492 const DominatorTree *DT, bool UseInstrInfo) {
493 return ::ComputeNumSignBits(
494 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
495 }
496
computeKnownBitsAddSub(bool Add,const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & KnownOut,KnownBits & Known2,unsigned Depth,const Query & Q)497 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
498 bool NSW, const APInt &DemandedElts,
499 KnownBits &KnownOut, KnownBits &Known2,
500 unsigned Depth, const Query &Q) {
501 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
502
503 // If one operand is unknown and we have no nowrap information,
504 // the result will be unknown independently of the second operand.
505 if (KnownOut.isUnknown() && !NSW)
506 return;
507
508 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
509 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
510 }
511
computeKnownBitsMul(const Value * Op0,const Value * Op1,bool NSW,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q)512 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
513 const APInt &DemandedElts, KnownBits &Known,
514 KnownBits &Known2, unsigned Depth,
515 const Query &Q) {
516 unsigned BitWidth = Known.getBitWidth();
517 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
518 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
519
520 bool isKnownNegative = false;
521 bool isKnownNonNegative = false;
522 // If the multiplication is known not to overflow, compute the sign bit.
523 if (NSW) {
524 if (Op0 == Op1) {
525 // The product of a number with itself is non-negative.
526 isKnownNonNegative = true;
527 } else {
528 bool isKnownNonNegativeOp1 = Known.isNonNegative();
529 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
530 bool isKnownNegativeOp1 = Known.isNegative();
531 bool isKnownNegativeOp0 = Known2.isNegative();
532 // The product of two numbers with the same sign is non-negative.
533 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
534 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
535 // The product of a negative number and a non-negative number is either
536 // negative or zero.
537 if (!isKnownNonNegative)
538 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
539 isKnownNonZero(Op0, Depth, Q)) ||
540 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
541 isKnownNonZero(Op1, Depth, Q));
542 }
543 }
544
545 assert(!Known.hasConflict() && !Known2.hasConflict());
546 // Compute a conservative estimate for high known-0 bits.
547 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
548 Known2.countMinLeadingZeros(),
549 BitWidth) - BitWidth;
550 LeadZ = std::min(LeadZ, BitWidth);
551
552 // The result of the bottom bits of an integer multiply can be
553 // inferred by looking at the bottom bits of both operands and
554 // multiplying them together.
555 // We can infer at least the minimum number of known trailing bits
556 // of both operands. Depending on number of trailing zeros, we can
557 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
558 // a and b are divisible by m and n respectively.
559 // We then calculate how many of those bits are inferrable and set
560 // the output. For example, the i8 mul:
561 // a = XXXX1100 (12)
562 // b = XXXX1110 (14)
563 // We know the bottom 3 bits are zero since the first can be divided by
564 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
565 // Applying the multiplication to the trimmed arguments gets:
566 // XX11 (3)
567 // X111 (7)
568 // -------
569 // XX11
570 // XX11
571 // XX11
572 // XX11
573 // -------
574 // XXXXX01
575 // Which allows us to infer the 2 LSBs. Since we're multiplying the result
576 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
577 // The proof for this can be described as:
578 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
579 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
580 // umin(countTrailingZeros(C2), C6) +
581 // umin(C5 - umin(countTrailingZeros(C1), C5),
582 // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
583 // %aa = shl i8 %a, C5
584 // %bb = shl i8 %b, C6
585 // %aaa = or i8 %aa, C1
586 // %bbb = or i8 %bb, C2
587 // %mul = mul i8 %aaa, %bbb
588 // %mask = and i8 %mul, C7
589 // =>
590 // %mask = i8 ((C1*C2)&C7)
591 // Where C5, C6 describe the known bits of %a, %b
592 // C1, C2 describe the known bottom bits of %a, %b.
593 // C7 describes the mask of the known bits of the result.
594 APInt Bottom0 = Known.One;
595 APInt Bottom1 = Known2.One;
596
597 // How many times we'd be able to divide each argument by 2 (shr by 1).
598 // This gives us the number of trailing zeros on the multiplication result.
599 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
600 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
601 unsigned TrailZero0 = Known.countMinTrailingZeros();
602 unsigned TrailZero1 = Known2.countMinTrailingZeros();
603 unsigned TrailZ = TrailZero0 + TrailZero1;
604
605 // Figure out the fewest known-bits operand.
606 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
607 TrailBitsKnown1 - TrailZero1);
608 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
609
610 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
611 Bottom1.getLoBits(TrailBitsKnown1);
612
613 Known.resetAll();
614 Known.Zero.setHighBits(LeadZ);
615 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
616 Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
617
618 // Only make use of no-wrap flags if we failed to compute the sign bit
619 // directly. This matters if the multiplication always overflows, in
620 // which case we prefer to follow the result of the direct computation,
621 // though as the program is invoking undefined behaviour we can choose
622 // whatever we like here.
623 if (isKnownNonNegative && !Known.isNegative())
624 Known.makeNonNegative();
625 else if (isKnownNegative && !Known.isNonNegative())
626 Known.makeNegative();
627 }
628
computeKnownBitsFromRangeMetadata(const MDNode & Ranges,KnownBits & Known)629 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
630 KnownBits &Known) {
631 unsigned BitWidth = Known.getBitWidth();
632 unsigned NumRanges = Ranges.getNumOperands() / 2;
633 assert(NumRanges >= 1);
634
635 Known.Zero.setAllBits();
636 Known.One.setAllBits();
637
638 for (unsigned i = 0; i < NumRanges; ++i) {
639 ConstantInt *Lower =
640 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
641 ConstantInt *Upper =
642 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
643 ConstantRange Range(Lower->getValue(), Upper->getValue());
644
645 // The first CommonPrefixBits of all values in Range are equal.
646 unsigned CommonPrefixBits =
647 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
648
649 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
650 Known.One &= Range.getUnsignedMax() & Mask;
651 Known.Zero &= ~Range.getUnsignedMax() & Mask;
652 }
653 }
654
isEphemeralValueOf(const Instruction * I,const Value * E)655 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
656 SmallVector<const Value *, 16> WorkSet(1, I);
657 SmallPtrSet<const Value *, 32> Visited;
658 SmallPtrSet<const Value *, 16> EphValues;
659
660 // The instruction defining an assumption's condition itself is always
661 // considered ephemeral to that assumption (even if it has other
662 // non-ephemeral users). See r246696's test case for an example.
663 if (is_contained(I->operands(), E))
664 return true;
665
666 while (!WorkSet.empty()) {
667 const Value *V = WorkSet.pop_back_val();
668 if (!Visited.insert(V).second)
669 continue;
670
671 // If all uses of this value are ephemeral, then so is this value.
672 if (llvm::all_of(V->users(), [&](const User *U) {
673 return EphValues.count(U);
674 })) {
675 if (V == E)
676 return true;
677
678 if (V == I || isSafeToSpeculativelyExecute(V)) {
679 EphValues.insert(V);
680 if (const User *U = dyn_cast<User>(V))
681 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
682 J != JE; ++J)
683 WorkSet.push_back(*J);
684 }
685 }
686 }
687
688 return false;
689 }
690
691 // Is this an intrinsic that cannot be speculated but also cannot trap?
isAssumeLikeIntrinsic(const Instruction * I)692 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
693 if (const CallInst *CI = dyn_cast<CallInst>(I))
694 if (Function *F = CI->getCalledFunction())
695 switch (F->getIntrinsicID()) {
696 default: break;
697 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
698 case Intrinsic::assume:
699 case Intrinsic::sideeffect:
700 case Intrinsic::dbg_declare:
701 case Intrinsic::dbg_value:
702 case Intrinsic::dbg_label:
703 case Intrinsic::invariant_start:
704 case Intrinsic::invariant_end:
705 case Intrinsic::lifetime_start:
706 case Intrinsic::lifetime_end:
707 case Intrinsic::objectsize:
708 case Intrinsic::ptr_annotation:
709 case Intrinsic::var_annotation:
710 return true;
711 }
712
713 return false;
714 }
715
isValidAssumeForContext(const Instruction * Inv,const Instruction * CxtI,const DominatorTree * DT)716 bool llvm::isValidAssumeForContext(const Instruction *Inv,
717 const Instruction *CxtI,
718 const DominatorTree *DT) {
719 // There are two restrictions on the use of an assume:
720 // 1. The assume must dominate the context (or the control flow must
721 // reach the assume whenever it reaches the context).
722 // 2. The context must not be in the assume's set of ephemeral values
723 // (otherwise we will use the assume to prove that the condition
724 // feeding the assume is trivially true, thus causing the removal of
725 // the assume).
726
727 if (Inv->getParent() == CxtI->getParent()) {
728 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
729 // in the BB.
730 if (Inv->comesBefore(CxtI))
731 return true;
732
733 // Don't let an assume affect itself - this would cause the problems
734 // `isEphemeralValueOf` is trying to prevent, and it would also make
735 // the loop below go out of bounds.
736 if (Inv == CxtI)
737 return false;
738
739 // The context comes first, but they're both in the same block.
740 // Make sure there is nothing in between that might interrupt
741 // the control flow, not even CxtI itself.
742 for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
743 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
744 return false;
745
746 return !isEphemeralValueOf(Inv, CxtI);
747 }
748
749 // Inv and CxtI are in different blocks.
750 if (DT) {
751 if (DT->dominates(Inv, CxtI))
752 return true;
753 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
754 // We don't have a DT, but this trivially dominates.
755 return true;
756 }
757
758 return false;
759 }
760
isKnownNonZeroFromAssume(const Value * V,const Query & Q)761 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
762 // Use of assumptions is context-sensitive. If we don't have a context, we
763 // cannot use them!
764 if (!Q.AC || !Q.CxtI)
765 return false;
766
767 // Note that the patterns below need to be kept in sync with the code
768 // in AssumptionCache::updateAffectedValues.
769
770 auto CmpExcludesZero = [V](ICmpInst *Cmp) {
771 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
772
773 Value *RHS;
774 CmpInst::Predicate Pred;
775 if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
776 return false;
777 // assume(v u> y) -> assume(v != 0)
778 if (Pred == ICmpInst::ICMP_UGT)
779 return true;
780
781 // assume(v != 0)
782 // We special-case this one to ensure that we handle `assume(v != null)`.
783 if (Pred == ICmpInst::ICMP_NE)
784 return match(RHS, m_Zero());
785
786 // All other predicates - rely on generic ConstantRange handling.
787 ConstantInt *CI;
788 if (!match(RHS, m_ConstantInt(CI)))
789 return false;
790 ConstantRange RHSRange(CI->getValue());
791 ConstantRange TrueValues =
792 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
793 return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
794 };
795
796 if (Q.CxtI && V->getType()->isPointerTy()) {
797 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
798 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
799 V->getType()->getPointerAddressSpace()))
800 AttrKinds.push_back(Attribute::Dereferenceable);
801
802 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
803 return true;
804 }
805
806 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
807 if (!AssumeVH)
808 continue;
809 CallInst *I = cast<CallInst>(AssumeVH);
810 assert(I->getFunction() == Q.CxtI->getFunction() &&
811 "Got assumption for the wrong function!");
812 if (Q.isExcluded(I))
813 continue;
814
815 // Warning: This loop can end up being somewhat performance sensitive.
816 // We're running this loop for once for each value queried resulting in a
817 // runtime of ~O(#assumes * #values).
818
819 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
820 "must be an assume intrinsic");
821
822 Value *Arg = I->getArgOperand(0);
823 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
824 if (!Cmp)
825 continue;
826
827 if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
828 return true;
829 }
830
831 return false;
832 }
833
computeKnownBitsFromAssume(const Value * V,KnownBits & Known,unsigned Depth,const Query & Q)834 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
835 unsigned Depth, const Query &Q) {
836 // Use of assumptions is context-sensitive. If we don't have a context, we
837 // cannot use them!
838 if (!Q.AC || !Q.CxtI)
839 return;
840
841 unsigned BitWidth = Known.getBitWidth();
842
843 // Refine Known set if the pointer alignment is set by assume bundles.
844 if (V->getType()->isPointerTy()) {
845 if (RetainedKnowledge RK = getKnowledgeValidInContext(
846 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
847 Known.Zero.setLowBits(Log2_32(RK.ArgValue));
848 }
849 }
850
851 // Note that the patterns below need to be kept in sync with the code
852 // in AssumptionCache::updateAffectedValues.
853
854 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
855 if (!AssumeVH)
856 continue;
857 CallInst *I = cast<CallInst>(AssumeVH);
858 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
859 "Got assumption for the wrong function!");
860 if (Q.isExcluded(I))
861 continue;
862
863 // Warning: This loop can end up being somewhat performance sensitive.
864 // We're running this loop for once for each value queried resulting in a
865 // runtime of ~O(#assumes * #values).
866
867 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
868 "must be an assume intrinsic");
869
870 Value *Arg = I->getArgOperand(0);
871
872 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
873 assert(BitWidth == 1 && "assume operand is not i1?");
874 Known.setAllOnes();
875 return;
876 }
877 if (match(Arg, m_Not(m_Specific(V))) &&
878 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
879 assert(BitWidth == 1 && "assume operand is not i1?");
880 Known.setAllZero();
881 return;
882 }
883
884 // The remaining tests are all recursive, so bail out if we hit the limit.
885 if (Depth == MaxDepth)
886 continue;
887
888 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
889 if (!Cmp)
890 continue;
891
892 // Note that ptrtoint may change the bitwidth.
893 Value *A, *B;
894 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
895
896 CmpInst::Predicate Pred;
897 uint64_t C;
898 switch (Cmp->getPredicate()) {
899 default:
900 break;
901 case ICmpInst::ICMP_EQ:
902 // assume(v = a)
903 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
904 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
905 KnownBits RHSKnown =
906 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
907 Known.Zero |= RHSKnown.Zero;
908 Known.One |= RHSKnown.One;
909 // assume(v & b = a)
910 } else if (match(Cmp,
911 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
912 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
913 KnownBits RHSKnown =
914 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
915 KnownBits MaskKnown =
916 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
917
918 // For those bits in the mask that are known to be one, we can propagate
919 // known bits from the RHS to V.
920 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
921 Known.One |= RHSKnown.One & MaskKnown.One;
922 // assume(~(v & b) = a)
923 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
924 m_Value(A))) &&
925 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
926 KnownBits RHSKnown =
927 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
928 KnownBits MaskKnown =
929 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
930
931 // For those bits in the mask that are known to be one, we can propagate
932 // inverted known bits from the RHS to V.
933 Known.Zero |= RHSKnown.One & MaskKnown.One;
934 Known.One |= RHSKnown.Zero & MaskKnown.One;
935 // assume(v | b = a)
936 } else if (match(Cmp,
937 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
938 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
939 KnownBits RHSKnown =
940 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
941 KnownBits BKnown =
942 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
943
944 // For those bits in B that are known to be zero, we can propagate known
945 // bits from the RHS to V.
946 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
947 Known.One |= RHSKnown.One & BKnown.Zero;
948 // assume(~(v | b) = a)
949 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
950 m_Value(A))) &&
951 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
952 KnownBits RHSKnown =
953 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
954 KnownBits BKnown =
955 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
956
957 // For those bits in B that are known to be zero, we can propagate
958 // inverted known bits from the RHS to V.
959 Known.Zero |= RHSKnown.One & BKnown.Zero;
960 Known.One |= RHSKnown.Zero & BKnown.Zero;
961 // assume(v ^ b = a)
962 } else if (match(Cmp,
963 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
964 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
965 KnownBits RHSKnown =
966 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
967 KnownBits BKnown =
968 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
969
970 // For those bits in B that are known to be zero, we can propagate known
971 // bits from the RHS to V. For those bits in B that are known to be one,
972 // we can propagate inverted known bits from the RHS to V.
973 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
974 Known.One |= RHSKnown.One & BKnown.Zero;
975 Known.Zero |= RHSKnown.One & BKnown.One;
976 Known.One |= RHSKnown.Zero & BKnown.One;
977 // assume(~(v ^ b) = a)
978 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
979 m_Value(A))) &&
980 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
981 KnownBits RHSKnown =
982 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
983 KnownBits BKnown =
984 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
985
986 // For those bits in B that are known to be zero, we can propagate
987 // inverted known bits from the RHS to V. For those bits in B that are
988 // known to be one, we can propagate known bits from the RHS to V.
989 Known.Zero |= RHSKnown.One & BKnown.Zero;
990 Known.One |= RHSKnown.Zero & BKnown.Zero;
991 Known.Zero |= RHSKnown.Zero & BKnown.One;
992 Known.One |= RHSKnown.One & BKnown.One;
993 // assume(v << c = a)
994 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
995 m_Value(A))) &&
996 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
997 KnownBits RHSKnown =
998 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
999
1000 // For those bits in RHS that are known, we can propagate them to known
1001 // bits in V shifted to the right by C.
1002 RHSKnown.Zero.lshrInPlace(C);
1003 Known.Zero |= RHSKnown.Zero;
1004 RHSKnown.One.lshrInPlace(C);
1005 Known.One |= RHSKnown.One;
1006 // assume(~(v << c) = a)
1007 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
1008 m_Value(A))) &&
1009 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
1010 KnownBits RHSKnown =
1011 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1012 // For those bits in RHS that are known, we can propagate them inverted
1013 // to known bits in V shifted to the right by C.
1014 RHSKnown.One.lshrInPlace(C);
1015 Known.Zero |= RHSKnown.One;
1016 RHSKnown.Zero.lshrInPlace(C);
1017 Known.One |= RHSKnown.Zero;
1018 // assume(v >> c = a)
1019 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
1020 m_Value(A))) &&
1021 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
1022 KnownBits RHSKnown =
1023 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1024 // For those bits in RHS that are known, we can propagate them to known
1025 // bits in V shifted to the right by C.
1026 Known.Zero |= RHSKnown.Zero << C;
1027 Known.One |= RHSKnown.One << C;
1028 // assume(~(v >> c) = a)
1029 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
1030 m_Value(A))) &&
1031 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
1032 KnownBits RHSKnown =
1033 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1034 // For those bits in RHS that are known, we can propagate them inverted
1035 // to known bits in V shifted to the right by C.
1036 Known.Zero |= RHSKnown.One << C;
1037 Known.One |= RHSKnown.Zero << C;
1038 }
1039 break;
1040 case ICmpInst::ICMP_SGE:
1041 // assume(v >=_s c) where c is non-negative
1042 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1043 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1044 KnownBits RHSKnown =
1045 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
1046
1047 if (RHSKnown.isNonNegative()) {
1048 // We know that the sign bit is zero.
1049 Known.makeNonNegative();
1050 }
1051 }
1052 break;
1053 case ICmpInst::ICMP_SGT:
1054 // assume(v >_s c) where c is at least -1.
1055 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1056 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1057 KnownBits RHSKnown =
1058 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
1059
1060 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
1061 // We know that the sign bit is zero.
1062 Known.makeNonNegative();
1063 }
1064 }
1065 break;
1066 case ICmpInst::ICMP_SLE:
1067 // assume(v <=_s c) where c is negative
1068 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1069 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1070 KnownBits RHSKnown =
1071 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
1072
1073 if (RHSKnown.isNegative()) {
1074 // We know that the sign bit is one.
1075 Known.makeNegative();
1076 }
1077 }
1078 break;
1079 case ICmpInst::ICMP_SLT:
1080 // assume(v <_s c) where c is non-positive
1081 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1082 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1083 KnownBits RHSKnown =
1084 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1085
1086 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
1087 // We know that the sign bit is one.
1088 Known.makeNegative();
1089 }
1090 }
1091 break;
1092 case ICmpInst::ICMP_ULE:
1093 // assume(v <=_u c)
1094 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1095 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1096 KnownBits RHSKnown =
1097 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1098
1099 // Whatever high bits in c are zero are known to be zero.
1100 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
1101 }
1102 break;
1103 case ICmpInst::ICMP_ULT:
1104 // assume(v <_u c)
1105 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1106 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1107 KnownBits RHSKnown =
1108 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1109
1110 // If the RHS is known zero, then this assumption must be wrong (nothing
1111 // is unsigned less than zero). Signal a conflict and get out of here.
1112 if (RHSKnown.isZero()) {
1113 Known.Zero.setAllBits();
1114 Known.One.setAllBits();
1115 break;
1116 }
1117
1118 // Whatever high bits in c are zero are known to be zero (if c is a power
1119 // of 2, then one more).
1120 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
1121 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
1122 else
1123 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
1124 }
1125 break;
1126 }
1127 }
1128
1129 // If assumptions conflict with each other or previous known bits, then we
1130 // have a logical fallacy. It's possible that the assumption is not reachable,
1131 // so this isn't a real bug. On the other hand, the program may have undefined
1132 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
1133 // clear out the known bits, try to warn the user, and hope for the best.
1134 if (Known.Zero.intersects(Known.One)) {
1135 Known.resetAll();
1136
1137 if (Q.ORE)
1138 Q.ORE->emit([&]() {
1139 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
1140 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
1141 CxtI)
1142 << "Detected conflicting code assumptions. Program may "
1143 "have undefined behavior, or compiler may have "
1144 "internal error.";
1145 });
1146 }
1147 }
1148
1149 /// Compute known bits from a shift operator, including those with a
1150 /// non-constant shift amount. Known is the output of this function. Known2 is a
1151 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
1152 /// operator-specific functions that, given the known-zero or known-one bits
1153 /// respectively, and a shift amount, compute the implied known-zero or
1154 /// known-one bits of the shift operator's result respectively for that shift
1155 /// amount. The results from calling KZF and KOF are conservatively combined for
1156 /// all permitted shift amounts.
computeKnownBitsFromShiftOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,KnownBits & Known2,unsigned Depth,const Query & Q,function_ref<APInt (const APInt &,unsigned)> KZF,function_ref<APInt (const APInt &,unsigned)> KOF)1157 static void computeKnownBitsFromShiftOperator(
1158 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
1159 KnownBits &Known2, unsigned Depth, const Query &Q,
1160 function_ref<APInt(const APInt &, unsigned)> KZF,
1161 function_ref<APInt(const APInt &, unsigned)> KOF) {
1162 unsigned BitWidth = Known.getBitWidth();
1163
1164 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1165 if (Known.isConstant()) {
1166 unsigned ShiftAmt = Known.getConstant().getLimitedValue(BitWidth - 1);
1167
1168 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1169 Known.Zero = KZF(Known.Zero, ShiftAmt);
1170 Known.One = KOF(Known.One, ShiftAmt);
1171 // If the known bits conflict, this must be an overflowing left shift, so
1172 // the shift result is poison. We can return anything we want. Choose 0 for
1173 // the best folding opportunity.
1174 if (Known.hasConflict())
1175 Known.setAllZero();
1176
1177 return;
1178 }
1179
1180 // If the shift amount could be greater than or equal to the bit-width of the
1181 // LHS, the value could be poison, but bail out because the check below is
1182 // expensive.
1183 // TODO: Should we just carry on?
1184 if (Known.getMaxValue().uge(BitWidth)) {
1185 Known.resetAll();
1186 return;
1187 }
1188
1189 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1190 // BitWidth > 64 and any upper bits are known, we'll end up returning the
1191 // limit value (which implies all bits are known).
1192 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1193 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1194
1195 // It would be more-clearly correct to use the two temporaries for this
1196 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1197 Known.resetAll();
1198
1199 // If we know the shifter operand is nonzero, we can sometimes infer more
1200 // known bits. However this is expensive to compute, so be lazy about it and
1201 // only compute it when absolutely necessary.
1202 Optional<bool> ShifterOperandIsNonZero;
1203
1204 // Early exit if we can't constrain any well-defined shift amount.
1205 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1206 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1207 ShifterOperandIsNonZero =
1208 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1209 if (!*ShifterOperandIsNonZero)
1210 return;
1211 }
1212
1213 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1214
1215 Known.Zero.setAllBits();
1216 Known.One.setAllBits();
1217 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1218 // Combine the shifted known input bits only for those shift amounts
1219 // compatible with its known constraints.
1220 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1221 continue;
1222 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1223 continue;
1224 // If we know the shifter is nonzero, we may be able to infer more known
1225 // bits. This check is sunk down as far as possible to avoid the expensive
1226 // call to isKnownNonZero if the cheaper checks above fail.
1227 if (ShiftAmt == 0) {
1228 if (!ShifterOperandIsNonZero.hasValue())
1229 ShifterOperandIsNonZero =
1230 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1231 if (*ShifterOperandIsNonZero)
1232 continue;
1233 }
1234
1235 Known.Zero &= KZF(Known2.Zero, ShiftAmt);
1236 Known.One &= KOF(Known2.One, ShiftAmt);
1237 }
1238
1239 // If the known bits conflict, the result is poison. Return a 0 and hope the
1240 // caller can further optimize that.
1241 if (Known.hasConflict())
1242 Known.setAllZero();
1243 }
1244
computeKnownBitsFromOperator(const Operator * I,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)1245 static void computeKnownBitsFromOperator(const Operator *I,
1246 const APInt &DemandedElts,
1247 KnownBits &Known, unsigned Depth,
1248 const Query &Q) {
1249 unsigned BitWidth = Known.getBitWidth();
1250
1251 KnownBits Known2(BitWidth);
1252 switch (I->getOpcode()) {
1253 default: break;
1254 case Instruction::Load:
1255 if (MDNode *MD =
1256 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1257 computeKnownBitsFromRangeMetadata(*MD, Known);
1258 break;
1259 case Instruction::And: {
1260 // If either the LHS or the RHS are Zero, the result is zero.
1261 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1262 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1263
1264 Known &= Known2;
1265
1266 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1267 // here we handle the more general case of adding any odd number by
1268 // matching the form add(x, add(x, y)) where y is odd.
1269 // TODO: This could be generalized to clearing any bit set in y where the
1270 // following bit is known to be unset in y.
1271 Value *X = nullptr, *Y = nullptr;
1272 if (!Known.Zero[0] && !Known.One[0] &&
1273 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1274 Known2.resetAll();
1275 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1276 if (Known2.countMinTrailingOnes() > 0)
1277 Known.Zero.setBit(0);
1278 }
1279 break;
1280 }
1281 case Instruction::Or:
1282 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1283 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1284
1285 Known |= Known2;
1286 break;
1287 case Instruction::Xor:
1288 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1289 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1290
1291 Known ^= Known2;
1292 break;
1293 case Instruction::Mul: {
1294 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1295 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1296 Known, Known2, Depth, Q);
1297 break;
1298 }
1299 case Instruction::UDiv: {
1300 // For the purposes of computing leading zeros we can conservatively
1301 // treat a udiv as a logical right shift by the power of 2 known to
1302 // be less than the denominator.
1303 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1304 unsigned LeadZ = Known2.countMinLeadingZeros();
1305
1306 Known2.resetAll();
1307 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1308 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1309 if (RHSMaxLeadingZeros != BitWidth)
1310 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1311
1312 Known.Zero.setHighBits(LeadZ);
1313 break;
1314 }
1315 case Instruction::Select: {
1316 const Value *LHS = nullptr, *RHS = nullptr;
1317 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1318 if (SelectPatternResult::isMinOrMax(SPF)) {
1319 computeKnownBits(RHS, Known, Depth + 1, Q);
1320 computeKnownBits(LHS, Known2, Depth + 1, Q);
1321 } else {
1322 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1323 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1324 }
1325
1326 unsigned MaxHighOnes = 0;
1327 unsigned MaxHighZeros = 0;
1328 if (SPF == SPF_SMAX) {
1329 // If both sides are negative, the result is negative.
1330 if (Known.isNegative() && Known2.isNegative())
1331 // We can derive a lower bound on the result by taking the max of the
1332 // leading one bits.
1333 MaxHighOnes =
1334 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1335 // If either side is non-negative, the result is non-negative.
1336 else if (Known.isNonNegative() || Known2.isNonNegative())
1337 MaxHighZeros = 1;
1338 } else if (SPF == SPF_SMIN) {
1339 // If both sides are non-negative, the result is non-negative.
1340 if (Known.isNonNegative() && Known2.isNonNegative())
1341 // We can derive an upper bound on the result by taking the max of the
1342 // leading zero bits.
1343 MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1344 Known2.countMinLeadingZeros());
1345 // If either side is negative, the result is negative.
1346 else if (Known.isNegative() || Known2.isNegative())
1347 MaxHighOnes = 1;
1348 } else if (SPF == SPF_UMAX) {
1349 // We can derive a lower bound on the result by taking the max of the
1350 // leading one bits.
1351 MaxHighOnes =
1352 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1353 } else if (SPF == SPF_UMIN) {
1354 // We can derive an upper bound on the result by taking the max of the
1355 // leading zero bits.
1356 MaxHighZeros =
1357 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1358 } else if (SPF == SPF_ABS) {
1359 // RHS from matchSelectPattern returns the negation part of abs pattern.
1360 // If the negate has an NSW flag we can assume the sign bit of the result
1361 // will be 0 because that makes abs(INT_MIN) undefined.
1362 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1363 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1364 MaxHighZeros = 1;
1365 }
1366
1367 // Only known if known in both the LHS and RHS.
1368 Known.One &= Known2.One;
1369 Known.Zero &= Known2.Zero;
1370 if (MaxHighOnes > 0)
1371 Known.One.setHighBits(MaxHighOnes);
1372 if (MaxHighZeros > 0)
1373 Known.Zero.setHighBits(MaxHighZeros);
1374 break;
1375 }
1376 case Instruction::FPTrunc:
1377 case Instruction::FPExt:
1378 case Instruction::FPToUI:
1379 case Instruction::FPToSI:
1380 case Instruction::SIToFP:
1381 case Instruction::UIToFP:
1382 break; // Can't work with floating point.
1383 case Instruction::PtrToInt:
1384 case Instruction::IntToPtr:
1385 // Fall through and handle them the same as zext/trunc.
1386 LLVM_FALLTHROUGH;
1387 case Instruction::ZExt:
1388 case Instruction::Trunc: {
1389 Type *SrcTy = I->getOperand(0)->getType();
1390
1391 unsigned SrcBitWidth;
1392 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1393 // which fall through here.
1394 Type *ScalarTy = SrcTy->getScalarType();
1395 SrcBitWidth = ScalarTy->isPointerTy() ?
1396 Q.DL.getPointerAddrSizeInBits(ScalarTy) :
1397 Q.DL.getTypeSizeInBits(ScalarTy);
1398
1399 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1400 Known = Known.anyextOrTrunc(SrcBitWidth);
1401 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1402 Known = Known.zextOrTrunc(BitWidth);
1403 break;
1404 }
1405 case Instruction::BitCast: {
1406 Type *SrcTy = I->getOperand(0)->getType();
1407 if (SrcTy->isIntOrPtrTy() &&
1408 // TODO: For now, not handling conversions like:
1409 // (bitcast i64 %x to <2 x i32>)
1410 !I->getType()->isVectorTy()) {
1411 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1412 break;
1413 }
1414 break;
1415 }
1416 case Instruction::AddrSpaceCast: {
1417 Type *SrcTy = I->getOperand(0)->getType();
1418 Type *DestTy = I->getType();
1419 // Address space conversions between CHERI capability and integer pointer
1420 // do not affect the virtual address bits
1421 if (SrcTy->isIntOrPtrTy() && isCheriPointer(SrcTy, &Q.DL) != isCheriPointer(DestTy, &Q.DL)) {
1422 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1423 break;
1424 }
1425 break;
1426 }
1427 case Instruction::SExt: {
1428 // Compute the bits in the result that are not present in the input.
1429 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1430
1431 Known = Known.trunc(SrcBitWidth);
1432 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1433 // If the sign bit of the input is known set or clear, then we know the
1434 // top bits of the result.
1435 Known = Known.sext(BitWidth);
1436 break;
1437 }
1438 case Instruction::Shl: {
1439 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1440 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1441 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1442 APInt KZResult = KnownZero << ShiftAmt;
1443 KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1444 // If this shift has "nsw" keyword, then the result is either a poison
1445 // value or has the same sign bit as the first operand.
1446 if (NSW && KnownZero.isSignBitSet())
1447 KZResult.setSignBit();
1448 return KZResult;
1449 };
1450
1451 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1452 APInt KOResult = KnownOne << ShiftAmt;
1453 if (NSW && KnownOne.isSignBitSet())
1454 KOResult.setSignBit();
1455 return KOResult;
1456 };
1457
1458 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1459 KZF, KOF);
1460 break;
1461 }
1462 case Instruction::LShr: {
1463 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1464 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1465 APInt KZResult = KnownZero.lshr(ShiftAmt);
1466 // High bits known zero.
1467 KZResult.setHighBits(ShiftAmt);
1468 return KZResult;
1469 };
1470
1471 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1472 return KnownOne.lshr(ShiftAmt);
1473 };
1474
1475 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1476 KZF, KOF);
1477 break;
1478 }
1479 case Instruction::AShr: {
1480 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1481 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1482 return KnownZero.ashr(ShiftAmt);
1483 };
1484
1485 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1486 return KnownOne.ashr(ShiftAmt);
1487 };
1488
1489 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1490 KZF, KOF);
1491 break;
1492 }
1493 case Instruction::Sub: {
1494 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1495 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1496 DemandedElts, Known, Known2, Depth, Q);
1497 break;
1498 }
1499 case Instruction::Add: {
1500 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1501 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1502 DemandedElts, Known, Known2, Depth, Q);
1503 break;
1504 }
1505 case Instruction::SRem:
1506 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1507 APInt RA = Rem->getValue().abs();
1508 if (RA.isPowerOf2()) {
1509 APInt LowBits = RA - 1;
1510 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1511
1512 // The low bits of the first operand are unchanged by the srem.
1513 Known.Zero = Known2.Zero & LowBits;
1514 Known.One = Known2.One & LowBits;
1515
1516 // If the first operand is non-negative or has all low bits zero, then
1517 // the upper bits are all zero.
1518 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1519 Known.Zero |= ~LowBits;
1520
1521 // If the first operand is negative and not all low bits are zero, then
1522 // the upper bits are all one.
1523 if (Known2.isNegative() && LowBits.intersects(Known2.One))
1524 Known.One |= ~LowBits;
1525
1526 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1527 break;
1528 }
1529 }
1530
1531 // The sign bit is the LHS's sign bit, except when the result of the
1532 // remainder is zero.
1533 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1534 // If it's known zero, our sign bit is also zero.
1535 if (Known2.isNonNegative())
1536 Known.makeNonNegative();
1537
1538 break;
1539 case Instruction::URem: {
1540 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1541 const APInt &RA = Rem->getValue();
1542 if (RA.isPowerOf2()) {
1543 APInt LowBits = (RA - 1);
1544 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1545 Known.Zero |= ~LowBits;
1546 Known.One &= LowBits;
1547 break;
1548 }
1549 }
1550
1551 // Since the result is less than or equal to either operand, any leading
1552 // zero bits in either operand must also exist in the result.
1553 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1554 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1555
1556 unsigned Leaders =
1557 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1558 Known.resetAll();
1559 Known.Zero.setHighBits(Leaders);
1560 break;
1561 }
1562 case Instruction::Alloca:
1563 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1564 break;
1565 case Instruction::GetElementPtr: {
1566 // Analyze all of the subscripts of this getelementptr instruction
1567 // to determine if we can prove known low zero bits.
1568 KnownBits LocalKnown(BitWidth);
1569 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1570
1571 const auto *GEP = cast<GEPOperator>(I);
1572 if (GEP->hasAllConstantIndices()) {
1573 // If the GEP indices are all constant we can treat this as an add
1574 APInt GEPOffset(Q.DL.getIndexSizeInBits(GEP->getPointerAddressSpace()), 0);
1575 if (GEP->accumulateConstantOffset(Q.DL, GEPOffset)) {
1576 Known2.One = GEPOffset;
1577 Known2.Zero = ~GEPOffset;
1578 Known = KnownBits::computeForAddSub(
1579 /*Add=*/true, /*NSW=*/false, LocalKnown,
1580 Known2.zextOrTrunc(LocalKnown.getBitWidth()));
1581 break;
1582 }
1583 }
1584
1585 unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1586
1587 gep_type_iterator GTI = gep_type_begin(I);
1588 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1589 // TrailZ can only become smaller, short-circuit if we hit zero.
1590 if (TrailZ == 0)
1591 break;
1592
1593 Value *Index = I->getOperand(i);
1594 if (StructType *STy = GTI.getStructTypeOrNull()) {
1595 // Handle struct member offset arithmetic.
1596
1597 // Handle case when index is vector zeroinitializer
1598 Constant *CIndex = cast<Constant>(Index);
1599 if (CIndex->isZeroValue())
1600 continue;
1601
1602 if (CIndex->getType()->isVectorTy())
1603 Index = CIndex->getSplatValue();
1604
1605 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1606 const StructLayout *SL = Q.DL.getStructLayout(STy);
1607 uint64_t Offset = SL->getElementOffset(Idx);
1608 TrailZ = std::min<unsigned>(TrailZ,
1609 countTrailingZeros(Offset));
1610 } else {
1611 // Handle array index arithmetic.
1612 Type *IndexedTy = GTI.getIndexedType();
1613 if (!IndexedTy->isSized()) {
1614 TrailZ = 0;
1615 break;
1616 }
1617 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1618 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy).getKnownMinSize();
1619 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1620 computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1621 TrailZ = std::min(TrailZ,
1622 unsigned(countTrailingZeros(TypeSize) +
1623 LocalKnown.countMinTrailingZeros()));
1624 }
1625 }
1626
1627 Known.Zero.setLowBits(TrailZ);
1628 break;
1629 }
1630 case Instruction::PHI: {
1631 const PHINode *P = cast<PHINode>(I);
1632 // Handle the case of a simple two-predecessor recurrence PHI.
1633 // There's a lot more that could theoretically be done here, but
1634 // this is sufficient to catch some interesting cases.
1635 if (P->getNumIncomingValues() == 2) {
1636 for (unsigned i = 0; i != 2; ++i) {
1637 Value *L = P->getIncomingValue(i);
1638 Value *R = P->getIncomingValue(!i);
1639 Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1640 Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1641 Operator *LU = dyn_cast<Operator>(L);
1642 if (!LU)
1643 continue;
1644 unsigned Opcode = LU->getOpcode();
1645 // Check for operations that have the property that if
1646 // both their operands have low zero bits, the result
1647 // will have low zero bits.
1648 if (Opcode == Instruction::Add ||
1649 Opcode == Instruction::Sub ||
1650 Opcode == Instruction::And ||
1651 Opcode == Instruction::Or ||
1652 Opcode == Instruction::Mul) {
1653 Value *LL = LU->getOperand(0);
1654 Value *LR = LU->getOperand(1);
1655 // Find a recurrence.
1656 if (LL == I)
1657 L = LR;
1658 else if (LR == I)
1659 L = LL;
1660 else
1661 continue; // Check for recurrence with L and R flipped.
1662
1663 // Change the context instruction to the "edge" that flows into the
1664 // phi. This is important because that is where the value is actually
1665 // "evaluated" even though it is used later somewhere else. (see also
1666 // D69571).
1667 Query RecQ = Q;
1668
1669 // Ok, we have a PHI of the form L op= R. Check for low
1670 // zero bits.
1671 RecQ.CxtI = RInst;
1672 computeKnownBits(R, Known2, Depth + 1, RecQ);
1673
1674 // We need to take the minimum number of known bits
1675 KnownBits Known3(BitWidth);
1676 RecQ.CxtI = LInst;
1677 computeKnownBits(L, Known3, Depth + 1, RecQ);
1678
1679 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1680 Known3.countMinTrailingZeros()));
1681
1682 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1683 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1684 // If initial value of recurrence is nonnegative, and we are adding
1685 // a nonnegative number with nsw, the result can only be nonnegative
1686 // or poison value regardless of the number of times we execute the
1687 // add in phi recurrence. If initial value is negative and we are
1688 // adding a negative number with nsw, the result can only be
1689 // negative or poison value. Similar arguments apply to sub and mul.
1690 //
1691 // (add non-negative, non-negative) --> non-negative
1692 // (add negative, negative) --> negative
1693 if (Opcode == Instruction::Add) {
1694 if (Known2.isNonNegative() && Known3.isNonNegative())
1695 Known.makeNonNegative();
1696 else if (Known2.isNegative() && Known3.isNegative())
1697 Known.makeNegative();
1698 }
1699
1700 // (sub nsw non-negative, negative) --> non-negative
1701 // (sub nsw negative, non-negative) --> negative
1702 else if (Opcode == Instruction::Sub && LL == I) {
1703 if (Known2.isNonNegative() && Known3.isNegative())
1704 Known.makeNonNegative();
1705 else if (Known2.isNegative() && Known3.isNonNegative())
1706 Known.makeNegative();
1707 }
1708
1709 // (mul nsw non-negative, non-negative) --> non-negative
1710 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1711 Known3.isNonNegative())
1712 Known.makeNonNegative();
1713 }
1714
1715 break;
1716 }
1717 }
1718 }
1719
1720 // Unreachable blocks may have zero-operand PHI nodes.
1721 if (P->getNumIncomingValues() == 0)
1722 break;
1723
1724 // Otherwise take the unions of the known bit sets of the operands,
1725 // taking conservative care to avoid excessive recursion.
1726 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1727 // Skip if every incoming value references to ourself.
1728 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1729 break;
1730
1731 Known.Zero.setAllBits();
1732 Known.One.setAllBits();
1733 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1734 Value *IncValue = P->getIncomingValue(u);
1735 // Skip direct self references.
1736 if (IncValue == P) continue;
1737
1738 // Change the context instruction to the "edge" that flows into the
1739 // phi. This is important because that is where the value is actually
1740 // "evaluated" even though it is used later somewhere else. (see also
1741 // D69571).
1742 Query RecQ = Q;
1743 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1744
1745 Known2 = KnownBits(BitWidth);
1746 // Recurse, but cap the recursion to one level, because we don't
1747 // want to waste time spinning around in loops.
1748 computeKnownBits(IncValue, Known2, MaxDepth - 1, RecQ);
1749 Known.Zero &= Known2.Zero;
1750 Known.One &= Known2.One;
1751 // If all bits have been ruled out, there's no need to check
1752 // more operands.
1753 if (!Known.Zero && !Known.One)
1754 break;
1755 }
1756 }
1757 break;
1758 }
1759 case Instruction::Call:
1760 case Instruction::Invoke:
1761 // If range metadata is attached to this call, set known bits from that,
1762 // and then intersect with known bits based on other properties of the
1763 // function.
1764 if (MDNode *MD =
1765 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1766 computeKnownBitsFromRangeMetadata(*MD, Known);
1767 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1768 computeKnownBits(RV, Known2, Depth + 1, Q);
1769 Known.Zero |= Known2.Zero;
1770 Known.One |= Known2.One;
1771 }
1772 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1773 switch (II->getIntrinsicID()) {
1774 default: break;
1775 case Intrinsic::bitreverse:
1776 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1777 Known.Zero |= Known2.Zero.reverseBits();
1778 Known.One |= Known2.One.reverseBits();
1779 break;
1780 case Intrinsic::bswap:
1781 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1782 Known.Zero |= Known2.Zero.byteSwap();
1783 Known.One |= Known2.One.byteSwap();
1784 break;
1785 case Intrinsic::cheri_cap_address_get: {
1786 // This just fetches the virtual address -> treat the same as ptrtoint:
1787 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1788 break;
1789 }
1790 case Intrinsic::cheri_cap_address_set: {
1791 // We can treat it the same as inttoptr:
1792 // The virtual address after csetaddr will be the second argument.
1793 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1794 break;
1795 }
1796 case Intrinsic::cheri_cap_diff: {
1797 bool NSW = false;
1798 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1799 DemandedElts, Known, Known2, Depth, Q);
1800 break;
1801 }
1802 case Intrinsic::ctlz: {
1803 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1804 // If we have a known 1, its position is our upper bound.
1805 unsigned PossibleLZ = Known2.One.countLeadingZeros();
1806 // If this call is undefined for 0, the result will be less than 2^n.
1807 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1808 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1809 unsigned LowBits = Log2_32(PossibleLZ)+1;
1810 Known.Zero.setBitsFrom(LowBits);
1811 break;
1812 }
1813 case Intrinsic::cttz: {
1814 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1815 // If we have a known 1, its position is our upper bound.
1816 unsigned PossibleTZ = Known2.One.countTrailingZeros();
1817 // If this call is undefined for 0, the result will be less than 2^n.
1818 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1819 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1820 unsigned LowBits = Log2_32(PossibleTZ)+1;
1821 Known.Zero.setBitsFrom(LowBits);
1822 break;
1823 }
1824 case Intrinsic::ctpop: {
1825 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1826 // We can bound the space the count needs. Also, bits known to be zero
1827 // can't contribute to the population.
1828 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1829 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1830 Known.Zero.setBitsFrom(LowBits);
1831 // TODO: we could bound KnownOne using the lower bound on the number
1832 // of bits which might be set provided by popcnt KnownOne2.
1833 break;
1834 }
1835 case Intrinsic::fshr:
1836 case Intrinsic::fshl: {
1837 const APInt *SA;
1838 if (!match(I->getOperand(2), m_APInt(SA)))
1839 break;
1840
1841 // Normalize to funnel shift left.
1842 uint64_t ShiftAmt = SA->urem(BitWidth);
1843 if (II->getIntrinsicID() == Intrinsic::fshr)
1844 ShiftAmt = BitWidth - ShiftAmt;
1845
1846 KnownBits Known3(BitWidth);
1847 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1848 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1849
1850 Known.Zero =
1851 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1852 Known.One =
1853 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1854 break;
1855 }
1856 case Intrinsic::uadd_sat:
1857 case Intrinsic::usub_sat: {
1858 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1859 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1860 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1861
1862 // Add: Leading ones of either operand are preserved.
1863 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1864 // as leading zeros in the result.
1865 unsigned LeadingKnown;
1866 if (IsAdd)
1867 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1868 Known2.countMinLeadingOnes());
1869 else
1870 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1871 Known2.countMinLeadingOnes());
1872
1873 Known = KnownBits::computeForAddSub(
1874 IsAdd, /* NSW */ false, Known, Known2);
1875
1876 // We select between the operation result and all-ones/zero
1877 // respectively, so we can preserve known ones/zeros.
1878 if (IsAdd) {
1879 Known.One.setHighBits(LeadingKnown);
1880 Known.Zero.clearAllBits();
1881 } else {
1882 Known.Zero.setHighBits(LeadingKnown);
1883 Known.One.clearAllBits();
1884 }
1885 break;
1886 }
1887 case Intrinsic::x86_sse42_crc32_64_64:
1888 Known.Zero.setBitsFrom(32);
1889 break;
1890 }
1891 }
1892 break;
1893 case Instruction::ShuffleVector: {
1894 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1895 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1896 if (!Shuf) {
1897 Known.resetAll();
1898 return;
1899 }
1900 // For undef elements, we don't know anything about the common state of
1901 // the shuffle result.
1902 APInt DemandedLHS, DemandedRHS;
1903 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1904 Known.resetAll();
1905 return;
1906 }
1907 Known.One.setAllBits();
1908 Known.Zero.setAllBits();
1909 if (!!DemandedLHS) {
1910 const Value *LHS = Shuf->getOperand(0);
1911 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1912 // If we don't know any bits, early out.
1913 if (Known.isUnknown())
1914 break;
1915 }
1916 if (!!DemandedRHS) {
1917 const Value *RHS = Shuf->getOperand(1);
1918 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1919 Known.One &= Known2.One;
1920 Known.Zero &= Known2.Zero;
1921 }
1922 break;
1923 }
1924 case Instruction::InsertElement: {
1925 const Value *Vec = I->getOperand(0);
1926 const Value *Elt = I->getOperand(1);
1927 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1928 // Early out if the index is non-constant or out-of-range.
1929 unsigned NumElts = DemandedElts.getBitWidth();
1930 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1931 Known.resetAll();
1932 return;
1933 }
1934 Known.One.setAllBits();
1935 Known.Zero.setAllBits();
1936 unsigned EltIdx = CIdx->getZExtValue();
1937 // Do we demand the inserted element?
1938 if (DemandedElts[EltIdx]) {
1939 computeKnownBits(Elt, Known, Depth + 1, Q);
1940 // If we don't know any bits, early out.
1941 if (Known.isUnknown())
1942 break;
1943 }
1944 // We don't need the base vector element that has been inserted.
1945 APInt DemandedVecElts = DemandedElts;
1946 DemandedVecElts.clearBit(EltIdx);
1947 if (!!DemandedVecElts) {
1948 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1949 Known.One &= Known2.One;
1950 Known.Zero &= Known2.Zero;
1951 }
1952 break;
1953 }
1954 case Instruction::ExtractElement: {
1955 // Look through extract element. If the index is non-constant or
1956 // out-of-range demand all elements, otherwise just the extracted element.
1957 const Value *Vec = I->getOperand(0);
1958 const Value *Idx = I->getOperand(1);
1959 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1960 if (isa<ScalableVectorType>(Vec->getType())) {
1961 // FIXME: there's probably *something* we can do with scalable vectors
1962 Known.resetAll();
1963 break;
1964 }
1965 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1966 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1967 if (CIdx && CIdx->getValue().ult(NumElts))
1968 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1969 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1970 break;
1971 }
1972 case Instruction::ExtractValue:
1973 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1974 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1975 if (EVI->getNumIndices() != 1) break;
1976 if (EVI->getIndices()[0] == 0) {
1977 switch (II->getIntrinsicID()) {
1978 default: break;
1979 case Intrinsic::uadd_with_overflow:
1980 case Intrinsic::sadd_with_overflow:
1981 computeKnownBitsAddSub(true, II->getArgOperand(0),
1982 II->getArgOperand(1), false, DemandedElts,
1983 Known, Known2, Depth, Q);
1984 break;
1985 case Intrinsic::usub_with_overflow:
1986 case Intrinsic::ssub_with_overflow:
1987 computeKnownBitsAddSub(false, II->getArgOperand(0),
1988 II->getArgOperand(1), false, DemandedElts,
1989 Known, Known2, Depth, Q);
1990 break;
1991 case Intrinsic::umul_with_overflow:
1992 case Intrinsic::smul_with_overflow:
1993 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1994 DemandedElts, Known, Known2, Depth, Q);
1995 break;
1996 }
1997 }
1998 }
1999 break;
2000 }
2001 }
2002
2003 /// Determine which bits of V are known to be either zero or one and return
2004 /// them.
computeKnownBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2005 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
2006 unsigned Depth, const Query &Q) {
2007 KnownBits Known(getBitWidth(V->getType(), Q.DL));
2008 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2009 return Known;
2010 }
2011
2012 /// Determine which bits of V are known to be either zero or one and return
2013 /// them.
computeKnownBits(const Value * V,unsigned Depth,const Query & Q)2014 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
2015 KnownBits Known(getBitWidth(V->getType(), Q.DL));
2016 computeKnownBits(V, Known, Depth, Q);
2017 return Known;
2018 }
2019
2020 /// Determine which bits of V are known to be either zero or one and return
2021 /// them in the Known bit set.
2022 ///
2023 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
2024 /// we cannot optimize based on the assumption that it is zero without changing
2025 /// it to be an explicit zero. If we don't change it to zero, other code could
2026 /// optimized based on the contradictory assumption that it is non-zero.
2027 /// Because instcombine aggressively folds operations with undef args anyway,
2028 /// this won't lose us code quality.
2029 ///
2030 /// This function is defined on values with integer type, values with pointer
2031 /// type, and vectors of integers. In the case
2032 /// where V is a vector, known zero, and known one values are the
2033 /// same width as the vector element, and the bit is set only if it is true
2034 /// for all of the demanded elements in the vector specified by DemandedElts.
computeKnownBits(const Value * V,const APInt & DemandedElts,KnownBits & Known,unsigned Depth,const Query & Q)2035 void computeKnownBits(const Value *V, const APInt &DemandedElts,
2036 KnownBits &Known, unsigned Depth, const Query &Q) {
2037 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
2038 // No demanded elts or V is a scalable vector, better to assume we don't
2039 // know anything.
2040 Known.resetAll();
2041 return;
2042 }
2043
2044 assert(V && "No Value?");
2045 assert(Depth <= MaxDepth && "Limit Search Depth");
2046
2047 #ifndef NDEBUG
2048 Type *Ty = V->getType();
2049 unsigned BitWidth = Known.getBitWidth();
2050
2051 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
2052 "Not integer or pointer type!");
2053
2054 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2055 assert(
2056 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2057 "DemandedElt width should equal the fixed vector number of elements");
2058 } else {
2059 assert(DemandedElts == APInt(1, 1) &&
2060 "DemandedElt width should be 1 for scalars");
2061 }
2062
2063 Type *ScalarTy = Ty->getScalarType();
2064 if (ScalarTy->isPointerTy()) {
2065 assert(BitWidth == Q.DL.getPointerAddrSizeInBits(ScalarTy) &&
2066 "V and Known should have same BitWidth");
2067 } else {
2068 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
2069 "V and Known should have same BitWidth");
2070 }
2071 #endif
2072
2073 const APInt *C;
2074 if (match(V, m_APInt(C))) {
2075 // We know all of the bits for a scalar constant or a splat vector constant!
2076 Known.One = *C;
2077 Known.Zero = ~Known.One;
2078 return;
2079 }
2080 // Null and aggregate-zero are all-zeros.
2081 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
2082 Known.setAllZero();
2083 return;
2084 }
2085 // Handle a constant vector by taking the intersection of the known bits of
2086 // each element.
2087 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
2088 // We know that CDV must be a vector of integers. Take the intersection of
2089 // each element.
2090 Known.Zero.setAllBits(); Known.One.setAllBits();
2091 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2092 if (!DemandedElts[i])
2093 continue;
2094 APInt Elt = CDV->getElementAsAPInt(i);
2095 Known.Zero &= ~Elt;
2096 Known.One &= Elt;
2097 }
2098 return;
2099 }
2100
2101 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
2102 // We know that CV must be a vector of integers. Take the intersection of
2103 // each element.
2104 Known.Zero.setAllBits(); Known.One.setAllBits();
2105 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2106 if (!DemandedElts[i])
2107 continue;
2108 Constant *Element = CV->getAggregateElement(i);
2109 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2110 if (!ElementCI) {
2111 Known.resetAll();
2112 return;
2113 }
2114 const APInt &Elt = ElementCI->getValue();
2115 Known.Zero &= ~Elt;
2116 Known.One &= Elt;
2117 }
2118 return;
2119 }
2120
2121 // Start out not knowing anything.
2122 Known.resetAll();
2123
2124 // We can't imply anything about undefs.
2125 if (isa<UndefValue>(V))
2126 return;
2127
2128 // There's no point in looking through other users of ConstantData for
2129 // assumptions. Confirm that we've handled them all.
2130 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2131
2132 // Limit search depth.
2133 // All recursive calls that increase depth must come after this.
2134 if (Depth == MaxDepth)
2135 return;
2136
2137 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2138 // the bits of its aliasee.
2139 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2140 if (!GA->isInterposable())
2141 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2142 return;
2143 }
2144
2145 if (const Operator *I = dyn_cast<Operator>(V))
2146 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2147
2148 // Aligned pointers have trailing zeros - refine Known.Zero set
2149 if (isa<PointerType>(V->getType())) {
2150 Align Alignment = V->getPointerAlignment(Q.DL);
2151 Known.Zero.setLowBits(Log2(Alignment));
2152 }
2153
2154 // computeKnownBitsFromAssume strictly refines Known.
2155 // Therefore, we run them after computeKnownBitsFromOperator.
2156
2157 // Check whether a nearby assume intrinsic can determine some known bits.
2158 computeKnownBitsFromAssume(V, Known, Depth, Q);
2159
2160 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
2161 }
2162
2163 /// Return true if the given value is known to have exactly one
2164 /// bit set when defined. For vectors return true if every element is known to
2165 /// be a power of two when defined. Supports values with integer or pointer
2166 /// types and vectors of integers.
isKnownToBeAPowerOfTwo(const Value * V,bool OrZero,unsigned Depth,const Query & Q)2167 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2168 const Query &Q) {
2169 assert(Depth <= MaxDepth && "Limit Search Depth");
2170
2171 // Attempt to match against constants.
2172 if (OrZero && match(V, m_Power2OrZero()))
2173 return true;
2174 if (match(V, m_Power2()))
2175 return true;
2176
2177 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2178 // it is shifted off the end then the result is undefined.
2179 if (match(V, m_Shl(m_One(), m_Value())))
2180 return true;
2181
2182 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2183 // the bottom. If it is shifted off the bottom then the result is undefined.
2184 if (match(V, m_LShr(m_SignMask(), m_Value())))
2185 return true;
2186
2187 // The remaining tests are all recursive, so bail out if we hit the limit.
2188 if (Depth++ == MaxDepth)
2189 return false;
2190
2191 Value *X = nullptr, *Y = nullptr;
2192 // A shift left or a logical shift right of a power of two is a power of two
2193 // or zero.
2194 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2195 match(V, m_LShr(m_Value(X), m_Value()))))
2196 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2197
2198 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2199 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2200
2201 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2202 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2203 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2204
2205 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2206 // A power of two and'd with anything is a power of two or zero.
2207 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2208 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2209 return true;
2210 // X & (-X) is always a power of two or zero.
2211 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2212 return true;
2213 return false;
2214 }
2215
2216 // Adding a power-of-two or zero to the same power-of-two or zero yields
2217 // either the original power-of-two, a larger power-of-two or zero.
2218 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2219 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2220 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2221 Q.IIQ.hasNoSignedWrap(VOBO)) {
2222 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2223 match(X, m_And(m_Value(), m_Specific(Y))))
2224 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2225 return true;
2226 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2227 match(Y, m_And(m_Value(), m_Specific(X))))
2228 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2229 return true;
2230
2231 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2232 KnownBits LHSBits(BitWidth);
2233 computeKnownBits(X, LHSBits, Depth, Q);
2234
2235 KnownBits RHSBits(BitWidth);
2236 computeKnownBits(Y, RHSBits, Depth, Q);
2237 // If i8 V is a power of two or zero:
2238 // ZeroBits: 1 1 1 0 1 1 1 1
2239 // ~ZeroBits: 0 0 0 1 0 0 0 0
2240 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2241 // If OrZero isn't set, we cannot give back a zero result.
2242 // Make sure either the LHS or RHS has a bit set.
2243 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2244 return true;
2245 }
2246 }
2247
2248 // An exact divide or right shift can only shift off zero bits, so the result
2249 // is a power of two only if the first operand is a power of two and not
2250 // copying a sign bit (sdiv int_min, 2).
2251 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2252 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2253 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2254 Depth, Q);
2255 }
2256
2257 return false;
2258 }
2259
2260 /// Test whether a GEP's result is known to be non-null.
2261 ///
2262 /// Uses properties inherent in a GEP to try to determine whether it is known
2263 /// to be non-null.
2264 ///
2265 /// Currently this routine does not support vector GEPs.
isGEPKnownNonNull(const GEPOperator * GEP,unsigned Depth,const Query & Q)2266 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2267 const Query &Q) {
2268 const Function *F = nullptr;
2269 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2270 F = I->getFunction();
2271
2272 if (!GEP->isInBounds() ||
2273 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2274 return false;
2275
2276 // FIXME: Support vector-GEPs.
2277 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2278
2279 // If the base pointer is non-null, we cannot walk to a null address with an
2280 // inbounds GEP in address space zero.
2281 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2282 return true;
2283
2284 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2285 // If so, then the GEP cannot produce a null pointer, as doing so would
2286 // inherently violate the inbounds contract within address space zero.
2287 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2288 GTI != GTE; ++GTI) {
2289 // Struct types are easy -- they must always be indexed by a constant.
2290 if (StructType *STy = GTI.getStructTypeOrNull()) {
2291 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2292 unsigned ElementIdx = OpC->getZExtValue();
2293 const StructLayout *SL = Q.DL.getStructLayout(STy);
2294 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2295 if (ElementOffset > 0)
2296 return true;
2297 continue;
2298 }
2299
2300 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2301 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2302 continue;
2303
2304 // Fast path the constant operand case both for efficiency and so we don't
2305 // increment Depth when just zipping down an all-constant GEP.
2306 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2307 if (!OpC->isZero())
2308 return true;
2309 continue;
2310 }
2311
2312 // We post-increment Depth here because while isKnownNonZero increments it
2313 // as well, when we pop back up that increment won't persist. We don't want
2314 // to recurse 10k times just because we have 10k GEP operands. We don't
2315 // bail completely out because we want to handle constant GEPs regardless
2316 // of depth.
2317 if (Depth++ >= MaxDepth)
2318 continue;
2319
2320 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2321 return true;
2322 }
2323
2324 return false;
2325 }
2326
isKnownNonNullFromDominatingCondition(const Value * V,const Instruction * CtxI,const DominatorTree * DT)2327 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2328 const Instruction *CtxI,
2329 const DominatorTree *DT) {
2330 if (isa<Constant>(V))
2331 return false;
2332
2333 if (!CtxI || !DT)
2334 return false;
2335
2336 unsigned NumUsesExplored = 0;
2337 for (auto *U : V->users()) {
2338 // Avoid massive lists
2339 if (NumUsesExplored >= DomConditionsMaxUses)
2340 break;
2341 NumUsesExplored++;
2342
2343 // If the value is used as an argument to a call or invoke, then argument
2344 // attributes may provide an answer about null-ness.
2345 if (const auto *CB = dyn_cast<CallBase>(U))
2346 if (auto *CalledFunc = CB->getCalledFunction())
2347 for (const Argument &Arg : CalledFunc->args())
2348 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2349 Arg.hasNonNullAttr() && DT->dominates(CB, CtxI))
2350 return true;
2351
2352 // If the value is used as a load/store, then the pointer must be non null.
2353 if (V == getLoadStorePointerOperand(U)) {
2354 const Instruction *I = cast<Instruction>(U);
2355 if (!NullPointerIsDefined(I->getFunction(),
2356 V->getType()->getPointerAddressSpace()) &&
2357 DT->dominates(I, CtxI))
2358 return true;
2359 }
2360
2361 // Consider only compare instructions uniquely controlling a branch
2362 CmpInst::Predicate Pred;
2363 if (!match(const_cast<User *>(U),
2364 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
2365 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
2366 continue;
2367
2368 SmallVector<const User *, 4> WorkList;
2369 SmallPtrSet<const User *, 4> Visited;
2370 for (auto *CmpU : U->users()) {
2371 assert(WorkList.empty() && "Should be!");
2372 if (Visited.insert(CmpU).second)
2373 WorkList.push_back(CmpU);
2374
2375 while (!WorkList.empty()) {
2376 auto *Curr = WorkList.pop_back_val();
2377
2378 // If a user is an AND, add all its users to the work list. We only
2379 // propagate "pred != null" condition through AND because it is only
2380 // correct to assume that all conditions of AND are met in true branch.
2381 // TODO: Support similar logic of OR and EQ predicate?
2382 if (Pred == ICmpInst::ICMP_NE)
2383 if (auto *BO = dyn_cast<BinaryOperator>(Curr))
2384 if (BO->getOpcode() == Instruction::And) {
2385 for (auto *BOU : BO->users())
2386 if (Visited.insert(BOU).second)
2387 WorkList.push_back(BOU);
2388 continue;
2389 }
2390
2391 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2392 assert(BI->isConditional() && "uses a comparison!");
2393
2394 BasicBlock *NonNullSuccessor =
2395 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
2396 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2397 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2398 return true;
2399 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
2400 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2401 return true;
2402 }
2403 }
2404 }
2405 }
2406
2407 return false;
2408 }
2409
2410 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2411 /// ensure that the value it's attached to is never Value? 'RangeType' is
2412 /// is the type of the value described by the range.
rangeMetadataExcludesValue(const MDNode * Ranges,const APInt & Value)2413 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2414 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2415 assert(NumRanges >= 1);
2416 for (unsigned i = 0; i < NumRanges; ++i) {
2417 ConstantInt *Lower =
2418 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2419 ConstantInt *Upper =
2420 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2421 ConstantRange Range(Lower->getValue(), Upper->getValue());
2422 if (Range.contains(Value))
2423 return false;
2424 }
2425 return true;
2426 }
2427
2428 /// Return true if the given value is known to be non-zero when defined. For
2429 /// vectors, return true if every demanded element is known to be non-zero when
2430 /// defined. For pointers, if the context instruction and dominator tree are
2431 /// specified, perform context-sensitive analysis and return true if the
2432 /// pointer couldn't possibly be null at the specified instruction.
2433 /// Supports values with integer or pointer type and vectors of integers.
isKnownNonZero(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2434 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2435 const Query &Q) {
2436 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2437 // vector
2438 if (isa<ScalableVectorType>(V->getType()))
2439 return false;
2440
2441 if (auto *C = dyn_cast<Constant>(V)) {
2442 if (C->isNullValue())
2443 return false;
2444 if (isa<ConstantInt>(C))
2445 // Must be non-zero due to null test above.
2446 return true;
2447
2448 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2449 // See the comment for IntToPtr/PtrToInt instructions below.
2450 if (CE->getOpcode() == Instruction::IntToPtr ||
2451 CE->getOpcode() == Instruction::PtrToInt)
2452 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2453 Q.DL.getTypeSizeInBits(CE->getType()))
2454 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2455 }
2456
2457 // For constant vectors, check that all elements are undefined or known
2458 // non-zero to determine that the whole vector is known non-zero.
2459 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2460 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2461 if (!DemandedElts[i])
2462 continue;
2463 Constant *Elt = C->getAggregateElement(i);
2464 if (!Elt || Elt->isNullValue())
2465 return false;
2466 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2467 return false;
2468 }
2469 return true;
2470 }
2471
2472 // A global variable in address space 0 is non null unless extern weak
2473 // or an absolute symbol reference. Other address spaces may have null as a
2474 // valid address for a global, so we can't assume anything.
2475 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2476 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage()) {
2477 unsigned GVAddrSpace = GV->getType()->getAddressSpace();
2478 if (GVAddrSpace == 0 || Q.DL.isFatPointer(GVAddrSpace))
2479 return true;
2480 }
2481 } else
2482 return false;
2483 }
2484
2485 if (auto *I = dyn_cast<Instruction>(V)) {
2486 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2487 // If the possible ranges don't contain zero, then the value is
2488 // definitely non-zero.
2489 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2490 const APInt ZeroValue(Ty->getBitWidth(), 0);
2491 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2492 return true;
2493 }
2494 }
2495 }
2496
2497 if (isKnownNonZeroFromAssume(V, Q))
2498 return true;
2499
2500 // Some of the tests below are recursive, so bail out if we hit the limit.
2501 if (Depth++ >= MaxDepth)
2502 return false;
2503
2504 // Check for pointer simplifications.
2505 if (V->getType()->isPointerTy()) {
2506 // Alloca never returns null, malloc might.
2507 if (isa<AllocaInst>(V)) {
2508 unsigned AS = Q.DL.getAllocaAddrSpace();
2509 // XXXAR: AMDGPU broke this because their allocaAddressSpace is weird
2510 if (AS == 0 || Q.DL.isFatPointer(AS))
2511 return true;
2512 }
2513
2514 // A byval, inalloca, or nonnull argument is never null.
2515 if (const Argument *A = dyn_cast<Argument>(V))
2516 if (A->hasPassPointeeByValueAttr() || A->hasNonNullAttr())
2517 return true;
2518
2519 // A Load tagged with nonnull metadata is never null.
2520 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2521 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2522 return true;
2523
2524 if (const auto *Call = dyn_cast<CallBase>(V)) {
2525 if (Call->isReturnNonNull())
2526 return true;
2527 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2528 return isKnownNonZero(RP, Depth, Q);
2529 }
2530 }
2531
2532 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2533 return true;
2534
2535 // Check for recursive pointer simplifications.
2536 if (V->getType()->isPointerTy()) {
2537 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2538 // do not alter the value, or at least not the nullness property of the
2539 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2540 //
2541 // Note that we have to take special care to avoid looking through
2542 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2543 // as casts that can alter the value, e.g., AddrSpaceCasts.
2544 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2545 if (isGEPKnownNonNull(GEP, Depth, Q))
2546 return true;
2547
2548 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2549 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2550
2551 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2552 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2553 Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2554 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2555 }
2556
2557 // Similar to int2ptr above, we can look through ptr2int here if the cast
2558 // is a no-op or an extend and not a truncate.
2559 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2560 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2561 Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2562 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2563
2564 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2565
2566 // X | Y != 0 if X != 0 or Y != 0.
2567 Value *X = nullptr, *Y = nullptr;
2568 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2569 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2570 isKnownNonZero(Y, DemandedElts, Depth, Q);
2571
2572 // ext X != 0 if X != 0.
2573 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2574 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2575
2576 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2577 // if the lowest bit is shifted off the end.
2578 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2579 // shl nuw can't remove any non-zero bits.
2580 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2581 if (Q.IIQ.hasNoUnsignedWrap(BO))
2582 return isKnownNonZero(X, Depth, Q);
2583
2584 KnownBits Known(BitWidth);
2585 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2586 if (Known.One[0])
2587 return true;
2588 }
2589 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2590 // defined if the sign bit is shifted off the end.
2591 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2592 // shr exact can only shift out zero bits.
2593 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2594 if (BO->isExact())
2595 return isKnownNonZero(X, Depth, Q);
2596
2597 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2598 if (Known.isNegative())
2599 return true;
2600
2601 // If the shifter operand is a constant, and all of the bits shifted
2602 // out are known to be zero, and X is known non-zero then at least one
2603 // non-zero bit must remain.
2604 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2605 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2606 // Is there a known one in the portion not shifted out?
2607 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2608 return true;
2609 // Are all the bits to be shifted out known zero?
2610 if (Known.countMinTrailingZeros() >= ShiftVal)
2611 return isKnownNonZero(X, DemandedElts, Depth, Q);
2612 }
2613 }
2614 // div exact can only produce a zero if the dividend is zero.
2615 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2616 return isKnownNonZero(X, DemandedElts, Depth, Q);
2617 }
2618 // X + Y.
2619 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2620 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2621 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2622
2623 // If X and Y are both non-negative (as signed values) then their sum is not
2624 // zero unless both X and Y are zero.
2625 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2626 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2627 isKnownNonZero(Y, DemandedElts, Depth, Q))
2628 return true;
2629
2630 // If X and Y are both negative (as signed values) then their sum is not
2631 // zero unless both X and Y equal INT_MIN.
2632 if (XKnown.isNegative() && YKnown.isNegative()) {
2633 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2634 // The sign bit of X is set. If some other bit is set then X is not equal
2635 // to INT_MIN.
2636 if (XKnown.One.intersects(Mask))
2637 return true;
2638 // The sign bit of Y is set. If some other bit is set then Y is not equal
2639 // to INT_MIN.
2640 if (YKnown.One.intersects(Mask))
2641 return true;
2642 }
2643
2644 // The sum of a non-negative number and a power of two is not zero.
2645 if (XKnown.isNonNegative() &&
2646 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2647 return true;
2648 if (YKnown.isNonNegative() &&
2649 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2650 return true;
2651 }
2652 // X * Y.
2653 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2654 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2655 // If X and Y are non-zero then so is X * Y as long as the multiplication
2656 // does not overflow.
2657 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2658 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2659 isKnownNonZero(Y, DemandedElts, Depth, Q))
2660 return true;
2661 }
2662 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2663 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2664 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2665 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2666 return true;
2667 }
2668 // PHI
2669 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2670 // Try and detect a recurrence that monotonically increases from a
2671 // starting value, as these are common as induction variables.
2672 if (PN->getNumIncomingValues() == 2) {
2673 Value *Start = PN->getIncomingValue(0);
2674 Value *Induction = PN->getIncomingValue(1);
2675 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2676 std::swap(Start, Induction);
2677 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2678 if (!C->isZero() && !C->isNegative()) {
2679 ConstantInt *X;
2680 if (Q.IIQ.UseInstrInfo &&
2681 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2682 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2683 !X->isNegative())
2684 return true;
2685 }
2686 }
2687 }
2688 // Check if all incoming values are non-zero constant.
2689 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2690 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2691 });
2692 if (AllNonZeroConstants)
2693 return true;
2694 }
2695 // ExtractElement
2696 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2697 const Value *Vec = EEI->getVectorOperand();
2698 const Value *Idx = EEI->getIndexOperand();
2699 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2700 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
2701 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2702 if (CIdx && CIdx->getValue().ult(NumElts))
2703 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2704 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2705 }
2706
2707 KnownBits Known(BitWidth);
2708 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2709 return Known.One != 0;
2710 }
2711
isKnownNonZero(const Value * V,unsigned Depth,const Query & Q)2712 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2713 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2714 // vector
2715 if (isa<ScalableVectorType>(V->getType()))
2716 return false;
2717
2718 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2719 APInt DemandedElts =
2720 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2721 return isKnownNonZero(V, DemandedElts, Depth, Q);
2722 }
2723
2724 /// Return true if V2 == V1 + X, where X is known non-zero.
isAddOfNonZero(const Value * V1,const Value * V2,const Query & Q)2725 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2726 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2727 if (!BO || BO->getOpcode() != Instruction::Add)
2728 return false;
2729 Value *Op = nullptr;
2730 if (V2 == BO->getOperand(0))
2731 Op = BO->getOperand(1);
2732 else if (V2 == BO->getOperand(1))
2733 Op = BO->getOperand(0);
2734 else
2735 return false;
2736 return isKnownNonZero(Op, 0, Q);
2737 }
2738
2739 /// Return true if it is known that V1 != V2.
isKnownNonEqual(const Value * V1,const Value * V2,const Query & Q)2740 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2741 if (V1 == V2)
2742 return false;
2743 if (V1->getType() != V2->getType())
2744 // We can't look through casts yet.
2745 return false;
2746 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2747 return true;
2748
2749 if (V1->getType()->isIntOrIntVectorTy()) {
2750 // Are any known bits in V1 contradictory to known bits in V2? If V1
2751 // has a known zero where V2 has a known one, they must not be equal.
2752 KnownBits Known1 = computeKnownBits(V1, 0, Q);
2753 KnownBits Known2 = computeKnownBits(V2, 0, Q);
2754
2755 if (Known1.Zero.intersects(Known2.One) ||
2756 Known2.Zero.intersects(Known1.One))
2757 return true;
2758 }
2759 return false;
2760 }
2761
2762 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2763 /// simplify operations downstream. Mask is known to be zero for bits that V
2764 /// cannot have.
2765 ///
2766 /// This function is defined on values with integer type, values with pointer
2767 /// type, and vectors of integers. In the case
2768 /// where V is a vector, the mask, known zero, and known one values are the
2769 /// same width as the vector element, and the bit is set only if it is true
2770 /// for all of the elements in the vector.
MaskedValueIsZero(const Value * V,const APInt & Mask,unsigned Depth,const Query & Q)2771 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2772 const Query &Q) {
2773 KnownBits Known(Mask.getBitWidth());
2774 computeKnownBits(V, Known, Depth, Q);
2775 return Mask.isSubsetOf(Known.Zero);
2776 }
2777
2778 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2779 // Returns the input and lower/upper bounds.
isSignedMinMaxClamp(const Value * Select,const Value * & In,const APInt * & CLow,const APInt * & CHigh)2780 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2781 const APInt *&CLow, const APInt *&CHigh) {
2782 assert(isa<Operator>(Select) &&
2783 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2784 "Input should be a Select!");
2785
2786 const Value *LHS = nullptr, *RHS = nullptr;
2787 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2788 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2789 return false;
2790
2791 if (!match(RHS, m_APInt(CLow)))
2792 return false;
2793
2794 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2795 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2796 if (getInverseMinMaxFlavor(SPF) != SPF2)
2797 return false;
2798
2799 if (!match(RHS2, m_APInt(CHigh)))
2800 return false;
2801
2802 if (SPF == SPF_SMIN)
2803 std::swap(CLow, CHigh);
2804
2805 In = LHS2;
2806 return CLow->sle(*CHigh);
2807 }
2808
2809 /// For vector constants, loop over the elements and find the constant with the
2810 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2811 /// or if any element was not analyzed; otherwise, return the count for the
2812 /// element with the minimum number of sign bits.
computeNumSignBitsVectorConstant(const Value * V,const APInt & DemandedElts,unsigned TyBits)2813 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2814 const APInt &DemandedElts,
2815 unsigned TyBits) {
2816 const auto *CV = dyn_cast<Constant>(V);
2817 if (!CV || !isa<FixedVectorType>(CV->getType()))
2818 return 0;
2819
2820 unsigned MinSignBits = TyBits;
2821 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2822 for (unsigned i = 0; i != NumElts; ++i) {
2823 if (!DemandedElts[i])
2824 continue;
2825 // If we find a non-ConstantInt, bail out.
2826 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2827 if (!Elt)
2828 return 0;
2829
2830 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2831 }
2832
2833 return MinSignBits;
2834 }
2835
2836 static unsigned ComputeNumSignBitsImpl(const Value *V,
2837 const APInt &DemandedElts,
2838 unsigned Depth, const Query &Q);
2839
ComputeNumSignBits(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2840 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2841 unsigned Depth, const Query &Q) {
2842 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2843 assert(Result > 0 && "At least one sign bit needs to be present!");
2844 return Result;
2845 }
2846
2847 /// Return the number of times the sign bit of the register is replicated into
2848 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2849 /// (itself), but other cases can give us information. For example, immediately
2850 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2851 /// other, so we return 3. For vectors, return the number of sign bits for the
2852 /// vector element with the minimum number of known sign bits of the demanded
2853 /// elements in the vector specified by DemandedElts.
ComputeNumSignBitsImpl(const Value * V,const APInt & DemandedElts,unsigned Depth,const Query & Q)2854 static unsigned ComputeNumSignBitsImpl(const Value *V,
2855 const APInt &DemandedElts,
2856 unsigned Depth, const Query &Q) {
2857 Type *Ty = V->getType();
2858
2859 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2860 // vector
2861 if (isa<ScalableVectorType>(Ty))
2862 return 1;
2863
2864 #ifndef NDEBUG
2865 assert(Depth <= MaxDepth && "Limit Search Depth");
2866
2867 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2868 assert(
2869 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2870 "DemandedElt width should equal the fixed vector number of elements");
2871 } else {
2872 assert(DemandedElts == APInt(1, 1) &&
2873 "DemandedElt width should be 1 for scalars");
2874 }
2875 #endif
2876
2877 // We return the minimum number of sign bits that are guaranteed to be present
2878 // in V, so for undef we have to conservatively return 1. We don't have the
2879 // same behavior for poison though -- that's a FIXME today.
2880
2881 Type *ScalarTy = Ty->getScalarType();
2882 unsigned TyBits = ScalarTy->isPointerTy() ?
2883 Q.DL.getPointerAddrSizeInBits(ScalarTy) :
2884 Q.DL.getTypeSizeInBits(ScalarTy);
2885
2886 unsigned Tmp, Tmp2;
2887 unsigned FirstAnswer = 1;
2888
2889 // Note that ConstantInt is handled by the general computeKnownBits case
2890 // below.
2891
2892 if (Depth == MaxDepth)
2893 return 1; // Limit search depth.
2894
2895 if (auto *U = dyn_cast<Operator>(V)) {
2896 switch (Operator::getOpcode(V)) {
2897 default: break;
2898 case Instruction::SExt:
2899 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2900 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2901
2902 case Instruction::SDiv: {
2903 const APInt *Denominator;
2904 // sdiv X, C -> adds log(C) sign bits.
2905 if (match(U->getOperand(1), m_APInt(Denominator))) {
2906
2907 // Ignore non-positive denominator.
2908 if (!Denominator->isStrictlyPositive())
2909 break;
2910
2911 // Calculate the incoming numerator bits.
2912 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2913
2914 // Add floor(log(C)) bits to the numerator bits.
2915 return std::min(TyBits, NumBits + Denominator->logBase2());
2916 }
2917 break;
2918 }
2919
2920 case Instruction::SRem: {
2921 const APInt *Denominator;
2922 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2923 // positive constant. This let us put a lower bound on the number of sign
2924 // bits.
2925 if (match(U->getOperand(1), m_APInt(Denominator))) {
2926
2927 // Ignore non-positive denominator.
2928 if (!Denominator->isStrictlyPositive())
2929 break;
2930
2931 // Calculate the incoming numerator bits. SRem by a positive constant
2932 // can't lower the number of sign bits.
2933 unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2934
2935 // Calculate the leading sign bit constraints by examining the
2936 // denominator. Given that the denominator is positive, there are two
2937 // cases:
2938 //
2939 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2940 // (1 << ceilLogBase2(C)).
2941 //
2942 // 2. the numerator is negative. Then the result range is (-C,0] and
2943 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2944 //
2945 // Thus a lower bound on the number of sign bits is `TyBits -
2946 // ceilLogBase2(C)`.
2947
2948 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2949 return std::max(NumrBits, ResBits);
2950 }
2951 break;
2952 }
2953
2954 case Instruction::AShr: {
2955 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2956 // ashr X, C -> adds C sign bits. Vectors too.
2957 const APInt *ShAmt;
2958 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2959 if (ShAmt->uge(TyBits))
2960 break; // Bad shift.
2961 unsigned ShAmtLimited = ShAmt->getZExtValue();
2962 Tmp += ShAmtLimited;
2963 if (Tmp > TyBits) Tmp = TyBits;
2964 }
2965 return Tmp;
2966 }
2967 case Instruction::Shl: {
2968 const APInt *ShAmt;
2969 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2970 // shl destroys sign bits.
2971 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2972 if (ShAmt->uge(TyBits) || // Bad shift.
2973 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2974 Tmp2 = ShAmt->getZExtValue();
2975 return Tmp - Tmp2;
2976 }
2977 break;
2978 }
2979 case Instruction::And:
2980 case Instruction::Or:
2981 case Instruction::Xor: // NOT is handled here.
2982 // Logical binary ops preserve the number of sign bits at the worst.
2983 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2984 if (Tmp != 1) {
2985 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2986 FirstAnswer = std::min(Tmp, Tmp2);
2987 // We computed what we know about the sign bits as our first
2988 // answer. Now proceed to the generic code that uses
2989 // computeKnownBits, and pick whichever answer is better.
2990 }
2991 break;
2992
2993 case Instruction::Select: {
2994 // If we have a clamp pattern, we know that the number of sign bits will
2995 // be the minimum of the clamp min/max range.
2996 const Value *X;
2997 const APInt *CLow, *CHigh;
2998 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2999 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3000
3001 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3002 if (Tmp == 1) break;
3003 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3004 return std::min(Tmp, Tmp2);
3005 }
3006
3007 case Instruction::Add:
3008 // Add can have at most one carry bit. Thus we know that the output
3009 // is, at worst, one more bit than the inputs.
3010 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3011 if (Tmp == 1) break;
3012
3013 // Special case decrementing a value (ADD X, -1):
3014 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3015 if (CRHS->isAllOnesValue()) {
3016 KnownBits Known(TyBits);
3017 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3018
3019 // If the input is known to be 0 or 1, the output is 0/-1, which is
3020 // all sign bits set.
3021 if ((Known.Zero | 1).isAllOnesValue())
3022 return TyBits;
3023
3024 // If we are subtracting one from a positive number, there is no carry
3025 // out of the result.
3026 if (Known.isNonNegative())
3027 return Tmp;
3028 }
3029
3030 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3031 if (Tmp2 == 1) break;
3032 return std::min(Tmp, Tmp2) - 1;
3033
3034 case Instruction::Sub:
3035 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3036 if (Tmp2 == 1) break;
3037
3038 // Handle NEG.
3039 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3040 if (CLHS->isNullValue()) {
3041 KnownBits Known(TyBits);
3042 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3043 // If the input is known to be 0 or 1, the output is 0/-1, which is
3044 // all sign bits set.
3045 if ((Known.Zero | 1).isAllOnesValue())
3046 return TyBits;
3047
3048 // If the input is known to be positive (the sign bit is known clear),
3049 // the output of the NEG has the same number of sign bits as the
3050 // input.
3051 if (Known.isNonNegative())
3052 return Tmp2;
3053
3054 // Otherwise, we treat this like a SUB.
3055 }
3056
3057 // Sub can have at most one carry bit. Thus we know that the output
3058 // is, at worst, one more bit than the inputs.
3059 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3060 if (Tmp == 1) break;
3061 return std::min(Tmp, Tmp2) - 1;
3062
3063 case Instruction::Mul: {
3064 // The output of the Mul can be at most twice the valid bits in the
3065 // inputs.
3066 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3067 if (SignBitsOp0 == 1) break;
3068 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3069 if (SignBitsOp1 == 1) break;
3070 unsigned OutValidBits =
3071 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3072 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3073 }
3074
3075 case Instruction::PHI: {
3076 const PHINode *PN = cast<PHINode>(U);
3077 unsigned NumIncomingValues = PN->getNumIncomingValues();
3078 // Don't analyze large in-degree PHIs.
3079 if (NumIncomingValues > 4) break;
3080 // Unreachable blocks may have zero-operand PHI nodes.
3081 if (NumIncomingValues == 0) break;
3082
3083 // Take the minimum of all incoming values. This can't infinitely loop
3084 // because of our depth threshold.
3085 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
3086 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
3087 if (Tmp == 1) return Tmp;
3088 Tmp = std::min(
3089 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
3090 }
3091 return Tmp;
3092 }
3093
3094 case Instruction::Trunc:
3095 // FIXME: it's tricky to do anything useful for this, but it is an
3096 // important case for targets like X86.
3097 break;
3098
3099 case Instruction::ExtractElement:
3100 // Look through extract element. At the moment we keep this simple and
3101 // skip tracking the specific element. But at least we might find
3102 // information valid for all elements of the vector (for example if vector
3103 // is sign extended, shifted, etc).
3104 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3105
3106 case Instruction::ShuffleVector: {
3107 // Collect the minimum number of sign bits that are shared by every vector
3108 // element referenced by the shuffle.
3109 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3110 if (!Shuf) {
3111 // FIXME: Add support for shufflevector constant expressions.
3112 return 1;
3113 }
3114 APInt DemandedLHS, DemandedRHS;
3115 // For undef elements, we don't know anything about the common state of
3116 // the shuffle result.
3117 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3118 return 1;
3119 Tmp = std::numeric_limits<unsigned>::max();
3120 if (!!DemandedLHS) {
3121 const Value *LHS = Shuf->getOperand(0);
3122 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3123 }
3124 // If we don't know anything, early out and try computeKnownBits
3125 // fall-back.
3126 if (Tmp == 1)
3127 break;
3128 if (!!DemandedRHS) {
3129 const Value *RHS = Shuf->getOperand(1);
3130 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3131 Tmp = std::min(Tmp, Tmp2);
3132 }
3133 // If we don't know anything, early out and try computeKnownBits
3134 // fall-back.
3135 if (Tmp == 1)
3136 break;
3137 assert(Tmp <= Ty->getScalarSizeInBits() &&
3138 "Failed to determine minimum sign bits");
3139 return Tmp;
3140 }
3141 }
3142 }
3143
3144 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3145 // use this information.
3146
3147 // If we can examine all elements of a vector constant successfully, we're
3148 // done (we can't do any better than that). If not, keep trying.
3149 if (unsigned VecSignBits =
3150 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3151 return VecSignBits;
3152
3153 KnownBits Known(TyBits);
3154 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3155
3156 // If we know that the sign bit is either zero or one, determine the number of
3157 // identical bits in the top of the input value.
3158 return std::max(FirstAnswer, Known.countMinSignBits());
3159 }
3160
3161 /// This function computes the integer multiple of Base that equals V.
3162 /// If successful, it returns true and returns the multiple in
3163 /// Multiple. If unsuccessful, it returns false. It looks
3164 /// through SExt instructions only if LookThroughSExt is true.
ComputeMultiple(Value * V,unsigned Base,Value * & Multiple,bool LookThroughSExt,unsigned Depth)3165 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3166 bool LookThroughSExt, unsigned Depth) {
3167 assert(V && "No Value?");
3168 assert(Depth <= MaxDepth && "Limit Search Depth");
3169 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3170
3171 Type *T = V->getType();
3172
3173 ConstantInt *CI = dyn_cast<ConstantInt>(V);
3174
3175 if (Base == 0)
3176 return false;
3177
3178 if (Base == 1) {
3179 Multiple = V;
3180 return true;
3181 }
3182
3183 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3184 Constant *BaseVal = ConstantInt::get(T, Base);
3185 if (CO && CO == BaseVal) {
3186 // Multiple is 1.
3187 Multiple = ConstantInt::get(T, 1);
3188 return true;
3189 }
3190
3191 if (CI && CI->getZExtValue() % Base == 0) {
3192 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3193 return true;
3194 }
3195
3196 if (Depth == MaxDepth) return false; // Limit search depth.
3197
3198 Operator *I = dyn_cast<Operator>(V);
3199 if (!I) return false;
3200
3201 switch (I->getOpcode()) {
3202 default: break;
3203 case Instruction::SExt:
3204 if (!LookThroughSExt) return false;
3205 // otherwise fall through to ZExt
3206 LLVM_FALLTHROUGH;
3207 case Instruction::ZExt:
3208 return ComputeMultiple(I->getOperand(0), Base, Multiple,
3209 LookThroughSExt, Depth+1);
3210 case Instruction::Shl:
3211 case Instruction::Mul: {
3212 Value *Op0 = I->getOperand(0);
3213 Value *Op1 = I->getOperand(1);
3214
3215 if (I->getOpcode() == Instruction::Shl) {
3216 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3217 if (!Op1CI) return false;
3218 // Turn Op0 << Op1 into Op0 * 2^Op1
3219 APInt Op1Int = Op1CI->getValue();
3220 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3221 APInt API(Op1Int.getBitWidth(), 0);
3222 API.setBit(BitToSet);
3223 Op1 = ConstantInt::get(V->getContext(), API);
3224 }
3225
3226 Value *Mul0 = nullptr;
3227 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3228 if (Constant *Op1C = dyn_cast<Constant>(Op1))
3229 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3230 if (Op1C->getType()->getPrimitiveSizeInBits() <
3231 MulC->getType()->getPrimitiveSizeInBits())
3232 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3233 if (Op1C->getType()->getPrimitiveSizeInBits() >
3234 MulC->getType()->getPrimitiveSizeInBits())
3235 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3236
3237 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3238 Multiple = ConstantExpr::getMul(MulC, Op1C);
3239 return true;
3240 }
3241
3242 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3243 if (Mul0CI->getValue() == 1) {
3244 // V == Base * Op1, so return Op1
3245 Multiple = Op1;
3246 return true;
3247 }
3248 }
3249
3250 Value *Mul1 = nullptr;
3251 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3252 if (Constant *Op0C = dyn_cast<Constant>(Op0))
3253 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3254 if (Op0C->getType()->getPrimitiveSizeInBits() <
3255 MulC->getType()->getPrimitiveSizeInBits())
3256 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3257 if (Op0C->getType()->getPrimitiveSizeInBits() >
3258 MulC->getType()->getPrimitiveSizeInBits())
3259 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3260
3261 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3262 Multiple = ConstantExpr::getMul(MulC, Op0C);
3263 return true;
3264 }
3265
3266 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3267 if (Mul1CI->getValue() == 1) {
3268 // V == Base * Op0, so return Op0
3269 Multiple = Op0;
3270 return true;
3271 }
3272 }
3273 }
3274 }
3275
3276 // We could not determine if V is a multiple of Base.
3277 return false;
3278 }
3279
getIntrinsicForCallSite(const CallBase & CB,const TargetLibraryInfo * TLI)3280 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3281 const TargetLibraryInfo *TLI) {
3282 const Function *F = CB.getCalledFunction();
3283 if (!F)
3284 return Intrinsic::not_intrinsic;
3285
3286 if (F->isIntrinsic())
3287 return F->getIntrinsicID();
3288
3289 // We are going to infer semantics of a library function based on mapping it
3290 // to an LLVM intrinsic. Check that the library function is available from
3291 // this callbase and in this environment.
3292 LibFunc Func;
3293 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3294 !CB.onlyReadsMemory())
3295 return Intrinsic::not_intrinsic;
3296
3297 switch (Func) {
3298 default:
3299 break;
3300 case LibFunc_sin:
3301 case LibFunc_sinf:
3302 case LibFunc_sinl:
3303 return Intrinsic::sin;
3304 case LibFunc_cos:
3305 case LibFunc_cosf:
3306 case LibFunc_cosl:
3307 return Intrinsic::cos;
3308 case LibFunc_exp:
3309 case LibFunc_expf:
3310 case LibFunc_expl:
3311 return Intrinsic::exp;
3312 case LibFunc_exp2:
3313 case LibFunc_exp2f:
3314 case LibFunc_exp2l:
3315 return Intrinsic::exp2;
3316 case LibFunc_log:
3317 case LibFunc_logf:
3318 case LibFunc_logl:
3319 return Intrinsic::log;
3320 case LibFunc_log10:
3321 case LibFunc_log10f:
3322 case LibFunc_log10l:
3323 return Intrinsic::log10;
3324 case LibFunc_log2:
3325 case LibFunc_log2f:
3326 case LibFunc_log2l:
3327 return Intrinsic::log2;
3328 case LibFunc_fabs:
3329 case LibFunc_fabsf:
3330 case LibFunc_fabsl:
3331 return Intrinsic::fabs;
3332 case LibFunc_fmin:
3333 case LibFunc_fminf:
3334 case LibFunc_fminl:
3335 return Intrinsic::minnum;
3336 case LibFunc_fmax:
3337 case LibFunc_fmaxf:
3338 case LibFunc_fmaxl:
3339 return Intrinsic::maxnum;
3340 case LibFunc_copysign:
3341 case LibFunc_copysignf:
3342 case LibFunc_copysignl:
3343 return Intrinsic::copysign;
3344 case LibFunc_floor:
3345 case LibFunc_floorf:
3346 case LibFunc_floorl:
3347 return Intrinsic::floor;
3348 case LibFunc_ceil:
3349 case LibFunc_ceilf:
3350 case LibFunc_ceill:
3351 return Intrinsic::ceil;
3352 case LibFunc_trunc:
3353 case LibFunc_truncf:
3354 case LibFunc_truncl:
3355 return Intrinsic::trunc;
3356 case LibFunc_rint:
3357 case LibFunc_rintf:
3358 case LibFunc_rintl:
3359 return Intrinsic::rint;
3360 case LibFunc_nearbyint:
3361 case LibFunc_nearbyintf:
3362 case LibFunc_nearbyintl:
3363 return Intrinsic::nearbyint;
3364 case LibFunc_round:
3365 case LibFunc_roundf:
3366 case LibFunc_roundl:
3367 return Intrinsic::round;
3368 case LibFunc_roundeven:
3369 case LibFunc_roundevenf:
3370 case LibFunc_roundevenl:
3371 return Intrinsic::roundeven;
3372 case LibFunc_pow:
3373 case LibFunc_powf:
3374 case LibFunc_powl:
3375 return Intrinsic::pow;
3376 case LibFunc_sqrt:
3377 case LibFunc_sqrtf:
3378 case LibFunc_sqrtl:
3379 return Intrinsic::sqrt;
3380 }
3381
3382 return Intrinsic::not_intrinsic;
3383 }
3384
3385 /// Return true if we can prove that the specified FP value is never equal to
3386 /// -0.0.
3387 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3388 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3389 /// the same as +0.0 in floating-point ops.
3390 ///
3391 /// NOTE: this function will need to be revisited when we support non-default
3392 /// rounding modes!
CannotBeNegativeZero(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3393 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3394 unsigned Depth) {
3395 if (auto *CFP = dyn_cast<ConstantFP>(V))
3396 return !CFP->getValueAPF().isNegZero();
3397
3398 // Limit search depth.
3399 if (Depth == MaxDepth)
3400 return false;
3401
3402 auto *Op = dyn_cast<Operator>(V);
3403 if (!Op)
3404 return false;
3405
3406 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3407 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3408 return true;
3409
3410 // sitofp and uitofp turn into +0.0 for zero.
3411 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3412 return true;
3413
3414 if (auto *Call = dyn_cast<CallInst>(Op)) {
3415 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3416 switch (IID) {
3417 default:
3418 break;
3419 // sqrt(-0.0) = -0.0, no other negative results are possible.
3420 case Intrinsic::sqrt:
3421 case Intrinsic::canonicalize:
3422 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3423 // fabs(x) != -0.0
3424 case Intrinsic::fabs:
3425 return true;
3426 }
3427 }
3428
3429 return false;
3430 }
3431
3432 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3433 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3434 /// bit despite comparing equal.
cannotBeOrderedLessThanZeroImpl(const Value * V,const TargetLibraryInfo * TLI,bool SignBitOnly,unsigned Depth)3435 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3436 const TargetLibraryInfo *TLI,
3437 bool SignBitOnly,
3438 unsigned Depth) {
3439 // TODO: This function does not do the right thing when SignBitOnly is true
3440 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3441 // which flips the sign bits of NaNs. See
3442 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3443
3444 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3445 return !CFP->getValueAPF().isNegative() ||
3446 (!SignBitOnly && CFP->getValueAPF().isZero());
3447 }
3448
3449 // Handle vector of constants.
3450 if (auto *CV = dyn_cast<Constant>(V)) {
3451 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3452 unsigned NumElts = CVFVTy->getNumElements();
3453 for (unsigned i = 0; i != NumElts; ++i) {
3454 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3455 if (!CFP)
3456 return false;
3457 if (CFP->getValueAPF().isNegative() &&
3458 (SignBitOnly || !CFP->getValueAPF().isZero()))
3459 return false;
3460 }
3461
3462 // All non-negative ConstantFPs.
3463 return true;
3464 }
3465 }
3466
3467 if (Depth == MaxDepth)
3468 return false; // Limit search depth.
3469
3470 const Operator *I = dyn_cast<Operator>(V);
3471 if (!I)
3472 return false;
3473
3474 switch (I->getOpcode()) {
3475 default:
3476 break;
3477 // Unsigned integers are always nonnegative.
3478 case Instruction::UIToFP:
3479 return true;
3480 case Instruction::FMul:
3481 case Instruction::FDiv:
3482 // X * X is always non-negative or a NaN.
3483 // X / X is always exactly 1.0 or a NaN.
3484 if (I->getOperand(0) == I->getOperand(1) &&
3485 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3486 return true;
3487
3488 LLVM_FALLTHROUGH;
3489 case Instruction::FAdd:
3490 case Instruction::FRem:
3491 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3492 Depth + 1) &&
3493 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3494 Depth + 1);
3495 case Instruction::Select:
3496 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3497 Depth + 1) &&
3498 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3499 Depth + 1);
3500 case Instruction::FPExt:
3501 case Instruction::FPTrunc:
3502 // Widening/narrowing never change sign.
3503 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3504 Depth + 1);
3505 case Instruction::ExtractElement:
3506 // Look through extract element. At the moment we keep this simple and skip
3507 // tracking the specific element. But at least we might find information
3508 // valid for all elements of the vector.
3509 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3510 Depth + 1);
3511 case Instruction::Call:
3512 const auto *CI = cast<CallInst>(I);
3513 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3514 switch (IID) {
3515 default:
3516 break;
3517 case Intrinsic::maxnum: {
3518 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3519 auto isPositiveNum = [&](Value *V) {
3520 if (SignBitOnly) {
3521 // With SignBitOnly, this is tricky because the result of
3522 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3523 // a constant strictly greater than 0.0.
3524 const APFloat *C;
3525 return match(V, m_APFloat(C)) &&
3526 *C > APFloat::getZero(C->getSemantics());
3527 }
3528
3529 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3530 // maxnum can't be ordered-less-than-zero.
3531 return isKnownNeverNaN(V, TLI) &&
3532 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3533 };
3534
3535 // TODO: This could be improved. We could also check that neither operand
3536 // has its sign bit set (and at least 1 is not-NAN?).
3537 return isPositiveNum(V0) || isPositiveNum(V1);
3538 }
3539
3540 case Intrinsic::maximum:
3541 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3542 Depth + 1) ||
3543 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3544 Depth + 1);
3545 case Intrinsic::minnum:
3546 case Intrinsic::minimum:
3547 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3548 Depth + 1) &&
3549 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3550 Depth + 1);
3551 case Intrinsic::exp:
3552 case Intrinsic::exp2:
3553 case Intrinsic::fabs:
3554 return true;
3555
3556 case Intrinsic::sqrt:
3557 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3558 if (!SignBitOnly)
3559 return true;
3560 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3561 CannotBeNegativeZero(CI->getOperand(0), TLI));
3562
3563 case Intrinsic::powi:
3564 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3565 // powi(x,n) is non-negative if n is even.
3566 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3567 return true;
3568 }
3569 // TODO: This is not correct. Given that exp is an integer, here are the
3570 // ways that pow can return a negative value:
3571 //
3572 // pow(x, exp) --> negative if exp is odd and x is negative.
3573 // pow(-0, exp) --> -inf if exp is negative odd.
3574 // pow(-0, exp) --> -0 if exp is positive odd.
3575 // pow(-inf, exp) --> -0 if exp is negative odd.
3576 // pow(-inf, exp) --> -inf if exp is positive odd.
3577 //
3578 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3579 // but we must return false if x == -0. Unfortunately we do not currently
3580 // have a way of expressing this constraint. See details in
3581 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3582 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3583 Depth + 1);
3584
3585 case Intrinsic::fma:
3586 case Intrinsic::fmuladd:
3587 // x*x+y is non-negative if y is non-negative.
3588 return I->getOperand(0) == I->getOperand(1) &&
3589 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3590 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3591 Depth + 1);
3592 }
3593 break;
3594 }
3595 return false;
3596 }
3597
CannotBeOrderedLessThanZero(const Value * V,const TargetLibraryInfo * TLI)3598 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3599 const TargetLibraryInfo *TLI) {
3600 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3601 }
3602
SignBitMustBeZero(const Value * V,const TargetLibraryInfo * TLI)3603 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3604 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3605 }
3606
isKnownNeverInfinity(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3607 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3608 unsigned Depth) {
3609 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3610
3611 // If we're told that infinities won't happen, assume they won't.
3612 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3613 if (FPMathOp->hasNoInfs())
3614 return true;
3615
3616 // Handle scalar constants.
3617 if (auto *CFP = dyn_cast<ConstantFP>(V))
3618 return !CFP->isInfinity();
3619
3620 if (Depth == MaxDepth)
3621 return false;
3622
3623 if (auto *Inst = dyn_cast<Instruction>(V)) {
3624 switch (Inst->getOpcode()) {
3625 case Instruction::Select: {
3626 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3627 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3628 }
3629 case Instruction::UIToFP:
3630 // If the input type fits into the floating type the result is finite.
3631 return ilogb(APFloat::getLargest(
3632 Inst->getType()->getScalarType()->getFltSemantics())) >=
3633 (int)Inst->getOperand(0)->getType()->getScalarSizeInBits();
3634 default:
3635 break;
3636 }
3637 }
3638
3639 // try to handle fixed width vector constants
3640 if (isa<FixedVectorType>(V->getType()) && isa<Constant>(V)) {
3641 // For vectors, verify that each element is not infinity.
3642 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
3643 for (unsigned i = 0; i != NumElts; ++i) {
3644 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3645 if (!Elt)
3646 return false;
3647 if (isa<UndefValue>(Elt))
3648 continue;
3649 auto *CElt = dyn_cast<ConstantFP>(Elt);
3650 if (!CElt || CElt->isInfinity())
3651 return false;
3652 }
3653 // All elements were confirmed non-infinity or undefined.
3654 return true;
3655 }
3656
3657 // was not able to prove that V never contains infinity
3658 return false;
3659 }
3660
isKnownNeverNaN(const Value * V,const TargetLibraryInfo * TLI,unsigned Depth)3661 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3662 unsigned Depth) {
3663 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3664
3665 // If we're told that NaNs won't happen, assume they won't.
3666 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3667 if (FPMathOp->hasNoNaNs())
3668 return true;
3669
3670 // Handle scalar constants.
3671 if (auto *CFP = dyn_cast<ConstantFP>(V))
3672 return !CFP->isNaN();
3673
3674 if (Depth == MaxDepth)
3675 return false;
3676
3677 if (auto *Inst = dyn_cast<Instruction>(V)) {
3678 switch (Inst->getOpcode()) {
3679 case Instruction::FAdd:
3680 case Instruction::FSub:
3681 // Adding positive and negative infinity produces NaN.
3682 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3683 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3684 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3685 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3686
3687 case Instruction::FMul:
3688 // Zero multiplied with infinity produces NaN.
3689 // FIXME: If neither side can be zero fmul never produces NaN.
3690 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3691 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3692 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3693 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3694
3695 case Instruction::FDiv:
3696 case Instruction::FRem:
3697 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3698 return false;
3699
3700 case Instruction::Select: {
3701 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3702 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3703 }
3704 case Instruction::SIToFP:
3705 case Instruction::UIToFP:
3706 return true;
3707 case Instruction::FPTrunc:
3708 case Instruction::FPExt:
3709 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3710 default:
3711 break;
3712 }
3713 }
3714
3715 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3716 switch (II->getIntrinsicID()) {
3717 case Intrinsic::canonicalize:
3718 case Intrinsic::fabs:
3719 case Intrinsic::copysign:
3720 case Intrinsic::exp:
3721 case Intrinsic::exp2:
3722 case Intrinsic::floor:
3723 case Intrinsic::ceil:
3724 case Intrinsic::trunc:
3725 case Intrinsic::rint:
3726 case Intrinsic::nearbyint:
3727 case Intrinsic::round:
3728 case Intrinsic::roundeven:
3729 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3730 case Intrinsic::sqrt:
3731 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3732 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3733 case Intrinsic::minnum:
3734 case Intrinsic::maxnum:
3735 // If either operand is not NaN, the result is not NaN.
3736 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3737 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3738 default:
3739 return false;
3740 }
3741 }
3742
3743 // Try to handle fixed width vector constants
3744 if (isa<FixedVectorType>(V->getType()) && isa<Constant>(V)) {
3745 // For vectors, verify that each element is not NaN.
3746 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
3747 for (unsigned i = 0; i != NumElts; ++i) {
3748 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3749 if (!Elt)
3750 return false;
3751 if (isa<UndefValue>(Elt))
3752 continue;
3753 auto *CElt = dyn_cast<ConstantFP>(Elt);
3754 if (!CElt || CElt->isNaN())
3755 return false;
3756 }
3757 // All elements were confirmed not-NaN or undefined.
3758 return true;
3759 }
3760
3761 // Was not able to prove that V never contains NaN
3762 return false;
3763 }
3764
isBytewiseValue(Value * V,const DataLayout & DL)3765 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3766
3767 // All byte-wide stores are splatable, even of arbitrary variables.
3768 if (V->getType()->isIntegerTy(8))
3769 return V;
3770
3771 LLVMContext &Ctx = V->getContext();
3772
3773 // Undef don't care.
3774 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3775 if (isa<UndefValue>(V))
3776 return UndefInt8;
3777
3778 // Return Undef for zero-sized type.
3779 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3780 return UndefInt8;
3781
3782 Constant *C = dyn_cast<Constant>(V);
3783 if (!C) {
3784 // Conceptually, we could handle things like:
3785 // %a = zext i8 %X to i16
3786 // %b = shl i16 %a, 8
3787 // %c = or i16 %a, %b
3788 // but until there is an example that actually needs this, it doesn't seem
3789 // worth worrying about.
3790 return nullptr;
3791 }
3792
3793 // Handle 'null' ConstantArrayZero etc.
3794 if (C->isNullValue())
3795 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3796
3797 // Constant floating-point values can be handled as integer values if the
3798 // corresponding integer value is "byteable". An important case is 0.0.
3799 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3800 Type *Ty = nullptr;
3801 if (CFP->getType()->isHalfTy())
3802 Ty = Type::getInt16Ty(Ctx);
3803 else if (CFP->getType()->isFloatTy())
3804 Ty = Type::getInt32Ty(Ctx);
3805 else if (CFP->getType()->isDoubleTy())
3806 Ty = Type::getInt64Ty(Ctx);
3807 // Don't handle long double formats, which have strange constraints.
3808 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3809 : nullptr;
3810 }
3811
3812 // We can handle constant integers that are multiple of 8 bits.
3813 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3814 if (CI->getBitWidth() % 8 == 0) {
3815 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3816 if (!CI->getValue().isSplat(8))
3817 return nullptr;
3818 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3819 }
3820 }
3821
3822 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3823 if (CE->getOpcode() == Instruction::IntToPtr) {
3824 auto PS = DL.getPointerSizeInBits(
3825 cast<PointerType>(CE->getType())->getAddressSpace());
3826 return isBytewiseValue(
3827 ConstantExpr::getIntegerCast(CE->getOperand(0),
3828 Type::getIntNTy(Ctx, PS), false),
3829 DL);
3830 }
3831 }
3832
3833 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3834 if (LHS == RHS)
3835 return LHS;
3836 if (!LHS || !RHS)
3837 return nullptr;
3838 if (LHS == UndefInt8)
3839 return RHS;
3840 if (RHS == UndefInt8)
3841 return LHS;
3842 return nullptr;
3843 };
3844
3845 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3846 Value *Val = UndefInt8;
3847 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3848 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3849 return nullptr;
3850 return Val;
3851 }
3852
3853 if (isa<ConstantAggregate>(C)) {
3854 Value *Val = UndefInt8;
3855 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3856 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3857 return nullptr;
3858 return Val;
3859 }
3860
3861 // Don't try to handle the handful of other constants.
3862 return nullptr;
3863 }
3864
3865 // This is the recursive version of BuildSubAggregate. It takes a few different
3866 // arguments. Idxs is the index within the nested struct From that we are
3867 // looking at now (which is of type IndexedType). IdxSkip is the number of
3868 // indices from Idxs that should be left out when inserting into the resulting
3869 // struct. To is the result struct built so far, new insertvalue instructions
3870 // build on that.
BuildSubAggregate(Value * From,Value * To,Type * IndexedType,SmallVectorImpl<unsigned> & Idxs,unsigned IdxSkip,Instruction * InsertBefore)3871 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3872 SmallVectorImpl<unsigned> &Idxs,
3873 unsigned IdxSkip,
3874 Instruction *InsertBefore) {
3875 StructType *STy = dyn_cast<StructType>(IndexedType);
3876 if (STy) {
3877 // Save the original To argument so we can modify it
3878 Value *OrigTo = To;
3879 // General case, the type indexed by Idxs is a struct
3880 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3881 // Process each struct element recursively
3882 Idxs.push_back(i);
3883 Value *PrevTo = To;
3884 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3885 InsertBefore);
3886 Idxs.pop_back();
3887 if (!To) {
3888 // Couldn't find any inserted value for this index? Cleanup
3889 while (PrevTo != OrigTo) {
3890 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3891 PrevTo = Del->getAggregateOperand();
3892 Del->eraseFromParent();
3893 }
3894 // Stop processing elements
3895 break;
3896 }
3897 }
3898 // If we successfully found a value for each of our subaggregates
3899 if (To)
3900 return To;
3901 }
3902 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3903 // the struct's elements had a value that was inserted directly. In the latter
3904 // case, perhaps we can't determine each of the subelements individually, but
3905 // we might be able to find the complete struct somewhere.
3906
3907 // Find the value that is at that particular spot
3908 Value *V = FindInsertedValue(From, Idxs);
3909
3910 if (!V)
3911 return nullptr;
3912
3913 // Insert the value in the new (sub) aggregate
3914 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3915 "tmp", InsertBefore);
3916 }
3917
3918 // This helper takes a nested struct and extracts a part of it (which is again a
3919 // struct) into a new value. For example, given the struct:
3920 // { a, { b, { c, d }, e } }
3921 // and the indices "1, 1" this returns
3922 // { c, d }.
3923 //
3924 // It does this by inserting an insertvalue for each element in the resulting
3925 // struct, as opposed to just inserting a single struct. This will only work if
3926 // each of the elements of the substruct are known (ie, inserted into From by an
3927 // insertvalue instruction somewhere).
3928 //
3929 // All inserted insertvalue instructions are inserted before InsertBefore
BuildSubAggregate(Value * From,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)3930 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3931 Instruction *InsertBefore) {
3932 assert(InsertBefore && "Must have someplace to insert!");
3933 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3934 idx_range);
3935 Value *To = UndefValue::get(IndexedType);
3936 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3937 unsigned IdxSkip = Idxs.size();
3938
3939 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3940 }
3941
3942 /// Given an aggregate and a sequence of indices, see if the scalar value
3943 /// indexed is already around as a register, for example if it was inserted
3944 /// directly into the aggregate.
3945 ///
3946 /// If InsertBefore is not null, this function will duplicate (modified)
3947 /// insertvalues when a part of a nested struct is extracted.
FindInsertedValue(Value * V,ArrayRef<unsigned> idx_range,Instruction * InsertBefore)3948 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3949 Instruction *InsertBefore) {
3950 // Nothing to index? Just return V then (this is useful at the end of our
3951 // recursion).
3952 if (idx_range.empty())
3953 return V;
3954 // We have indices, so V should have an indexable type.
3955 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3956 "Not looking at a struct or array?");
3957 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3958 "Invalid indices for type?");
3959
3960 if (Constant *C = dyn_cast<Constant>(V)) {
3961 C = C->getAggregateElement(idx_range[0]);
3962 if (!C) return nullptr;
3963 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3964 }
3965
3966 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3967 // Loop the indices for the insertvalue instruction in parallel with the
3968 // requested indices
3969 const unsigned *req_idx = idx_range.begin();
3970 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3971 i != e; ++i, ++req_idx) {
3972 if (req_idx == idx_range.end()) {
3973 // We can't handle this without inserting insertvalues
3974 if (!InsertBefore)
3975 return nullptr;
3976
3977 // The requested index identifies a part of a nested aggregate. Handle
3978 // this specially. For example,
3979 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3980 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3981 // %C = extractvalue {i32, { i32, i32 } } %B, 1
3982 // This can be changed into
3983 // %A = insertvalue {i32, i32 } undef, i32 10, 0
3984 // %C = insertvalue {i32, i32 } %A, i32 11, 1
3985 // which allows the unused 0,0 element from the nested struct to be
3986 // removed.
3987 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3988 InsertBefore);
3989 }
3990
3991 // This insert value inserts something else than what we are looking for.
3992 // See if the (aggregate) value inserted into has the value we are
3993 // looking for, then.
3994 if (*req_idx != *i)
3995 return FindInsertedValue(I->getAggregateOperand(), idx_range,
3996 InsertBefore);
3997 }
3998 // If we end up here, the indices of the insertvalue match with those
3999 // requested (though possibly only partially). Now we recursively look at
4000 // the inserted value, passing any remaining indices.
4001 return FindInsertedValue(I->getInsertedValueOperand(),
4002 makeArrayRef(req_idx, idx_range.end()),
4003 InsertBefore);
4004 }
4005
4006 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4007 // If we're extracting a value from an aggregate that was extracted from
4008 // something else, we can extract from that something else directly instead.
4009 // However, we will need to chain I's indices with the requested indices.
4010
4011 // Calculate the number of indices required
4012 unsigned size = I->getNumIndices() + idx_range.size();
4013 // Allocate some space to put the new indices in
4014 SmallVector<unsigned, 5> Idxs;
4015 Idxs.reserve(size);
4016 // Add indices from the extract value instruction
4017 Idxs.append(I->idx_begin(), I->idx_end());
4018
4019 // Add requested indices
4020 Idxs.append(idx_range.begin(), idx_range.end());
4021
4022 assert(Idxs.size() == size
4023 && "Number of indices added not correct?");
4024
4025 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4026 }
4027 // Otherwise, we don't know (such as, extracting from a function return value
4028 // or load instruction)
4029 return nullptr;
4030 }
4031
isGEPBasedOnPointerToString(const GEPOperator * GEP,unsigned CharSize)4032 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4033 unsigned CharSize) {
4034 // Make sure the GEP has exactly three arguments.
4035 if (GEP->getNumOperands() != 3)
4036 return false;
4037
4038 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4039 // CharSize.
4040 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4041 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4042 return false;
4043
4044 // Check to make sure that the first operand of the GEP is an integer and
4045 // has value 0 so that we are sure we're indexing into the initializer.
4046 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4047 if (!FirstIdx || !FirstIdx->isZero())
4048 return false;
4049
4050 return true;
4051 }
4052
getConstantDataArrayInfo(const Value * V,ConstantDataArraySlice & Slice,unsigned ElementSize,uint64_t Offset)4053 bool llvm::getConstantDataArrayInfo(const Value *V,
4054 ConstantDataArraySlice &Slice,
4055 unsigned ElementSize, uint64_t Offset) {
4056 assert(V);
4057
4058 // Look through bitcast instructions and geps.
4059 V = V->stripPointerCasts();
4060
4061 // If the value is a GEP instruction or constant expression, treat it as an
4062 // offset.
4063 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4064 // The GEP operator should be based on a pointer to string constant, and is
4065 // indexing into the string constant.
4066 if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4067 return false;
4068
4069 // If the second index isn't a ConstantInt, then this is a variable index
4070 // into the array. If this occurs, we can't say anything meaningful about
4071 // the string.
4072 uint64_t StartIdx = 0;
4073 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4074 StartIdx = CI->getZExtValue();
4075 else
4076 return false;
4077 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4078 StartIdx + Offset);
4079 }
4080
4081 // The GEP instruction, constant or instruction, must reference a global
4082 // variable that is a constant and is initialized. The referenced constant
4083 // initializer is the array that we'll use for optimization.
4084 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4085 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4086 return false;
4087
4088 const ConstantDataArray *Array;
4089 ArrayType *ArrayTy;
4090 if (GV->getInitializer()->isNullValue()) {
4091 Type *GVTy = GV->getValueType();
4092 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4093 // A zeroinitializer for the array; there is no ConstantDataArray.
4094 Array = nullptr;
4095 } else {
4096 const DataLayout &DL = GV->getParent()->getDataLayout();
4097 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4098 uint64_t Length = SizeInBytes / (ElementSize / 8);
4099 if (Length <= Offset)
4100 return false;
4101
4102 Slice.Array = nullptr;
4103 Slice.Offset = 0;
4104 Slice.Length = Length - Offset;
4105 return true;
4106 }
4107 } else {
4108 // This must be a ConstantDataArray.
4109 Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4110 if (!Array)
4111 return false;
4112 ArrayTy = Array->getType();
4113 }
4114 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4115 return false;
4116
4117 uint64_t NumElts = ArrayTy->getArrayNumElements();
4118 if (Offset > NumElts)
4119 return false;
4120
4121 Slice.Array = Array;
4122 Slice.Offset = Offset;
4123 Slice.Length = NumElts - Offset;
4124 return true;
4125 }
4126
4127 /// This function computes the length of a null-terminated C string pointed to
4128 /// by V. If successful, it returns true and returns the string in Str.
4129 /// If unsuccessful, it returns false.
getConstantStringInfo(const Value * V,StringRef & Str,uint64_t Offset,bool TrimAtNul)4130 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4131 uint64_t Offset, bool TrimAtNul) {
4132 ConstantDataArraySlice Slice;
4133 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4134 return false;
4135
4136 if (Slice.Array == nullptr) {
4137 if (TrimAtNul) {
4138 Str = StringRef();
4139 return true;
4140 }
4141 if (Slice.Length == 1) {
4142 Str = StringRef("", 1);
4143 return true;
4144 }
4145 // We cannot instantiate a StringRef as we do not have an appropriate string
4146 // of 0s at hand.
4147 return false;
4148 }
4149
4150 // Start out with the entire array in the StringRef.
4151 Str = Slice.Array->getAsString();
4152 // Skip over 'offset' bytes.
4153 Str = Str.substr(Slice.Offset);
4154
4155 if (TrimAtNul) {
4156 // Trim off the \0 and anything after it. If the array is not nul
4157 // terminated, we just return the whole end of string. The client may know
4158 // some other way that the string is length-bound.
4159 Str = Str.substr(0, Str.find('\0'));
4160 }
4161 return true;
4162 }
4163
4164 // These next two are very similar to the above, but also look through PHI
4165 // nodes.
4166 // TODO: See if we can integrate these two together.
4167
4168 /// If we can compute the length of the string pointed to by
4169 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLengthH(const Value * V,SmallPtrSetImpl<const PHINode * > & PHIs,unsigned CharSize)4170 static uint64_t GetStringLengthH(const Value *V,
4171 SmallPtrSetImpl<const PHINode*> &PHIs,
4172 unsigned CharSize) {
4173 // Look through noop bitcast instructions.
4174 V = V->stripPointerCasts();
4175
4176 // If this is a PHI node, there are two cases: either we have already seen it
4177 // or we haven't.
4178 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4179 if (!PHIs.insert(PN).second)
4180 return ~0ULL; // already in the set.
4181
4182 // If it was new, see if all the input strings are the same length.
4183 uint64_t LenSoFar = ~0ULL;
4184 for (Value *IncValue : PN->incoming_values()) {
4185 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4186 if (Len == 0) return 0; // Unknown length -> unknown.
4187
4188 if (Len == ~0ULL) continue;
4189
4190 if (Len != LenSoFar && LenSoFar != ~0ULL)
4191 return 0; // Disagree -> unknown.
4192 LenSoFar = Len;
4193 }
4194
4195 // Success, all agree.
4196 return LenSoFar;
4197 }
4198
4199 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4200 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4201 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4202 if (Len1 == 0) return 0;
4203 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4204 if (Len2 == 0) return 0;
4205 if (Len1 == ~0ULL) return Len2;
4206 if (Len2 == ~0ULL) return Len1;
4207 if (Len1 != Len2) return 0;
4208 return Len1;
4209 }
4210
4211 // Otherwise, see if we can read the string.
4212 ConstantDataArraySlice Slice;
4213 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4214 return 0;
4215
4216 if (Slice.Array == nullptr)
4217 return 1;
4218
4219 // Search for nul characters
4220 unsigned NullIndex = 0;
4221 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4222 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4223 break;
4224 }
4225
4226 return NullIndex + 1;
4227 }
4228
4229 /// If we can compute the length of the string pointed to by
4230 /// the specified pointer, return 'len+1'. If we can't, return 0.
GetStringLength(const Value * V,unsigned CharSize)4231 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4232 if (!V->getType()->isPointerTy())
4233 return 0;
4234
4235 SmallPtrSet<const PHINode*, 32> PHIs;
4236 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4237 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4238 // an empty string as a length.
4239 return Len == ~0ULL ? 1 : Len;
4240 }
4241
4242 const Value *
getArgumentAliasingToReturnedPointer(const CallBase * Call,bool MustPreserveNullness)4243 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4244 bool MustPreserveNullness) {
4245 assert(Call &&
4246 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4247 if (const Value *RV = Call->getReturnedArgOperand())
4248 return RV;
4249 // This can be used only as a aliasing property.
4250 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4251 Call, MustPreserveNullness))
4252 return Call->getArgOperand(0);
4253 return nullptr;
4254 }
4255
isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase * Call,bool MustPreserveNullness)4256 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4257 const CallBase *Call, bool MustPreserveNullness) {
4258 switch (Call->getIntrinsicID()) {
4259 // NOTE: we can't return true for setbounds even though the resulting pointer
4260 // aliases with the target. It might not grant access to the last few bytes!
4261 // If we return true here for setbounds, then GVN will remove some
4262 // loads that would have trapped at runtime. See cheri-intrinisics.ll test.
4263 // case Intrinsic::cheri_cap_bounds_set:
4264 // case Intrinsic::cheri_cap_bounds_set_exact:
4265 // case Intrinsic::cheri_bounded_stack_cap:
4266 case Intrinsic::launder_invariant_group:
4267 case Intrinsic::strip_invariant_group:
4268 case Intrinsic::aarch64_irg:
4269 case Intrinsic::aarch64_tagp:
4270 return true;
4271 case Intrinsic::ptrmask:
4272 return !MustPreserveNullness;
4273 default:
4274 return false;
4275 }
4276 }
4277
4278 /// \p PN defines a loop-variant pointer to an object. Check if the
4279 /// previous iteration of the loop was referring to the same object as \p PN.
isSameUnderlyingObjectInLoop(const PHINode * PN,const LoopInfo * LI)4280 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4281 const LoopInfo *LI) {
4282 // Find the loop-defined value.
4283 Loop *L = LI->getLoopFor(PN->getParent());
4284 if (PN->getNumIncomingValues() != 2)
4285 return true;
4286
4287 // Find the value from previous iteration.
4288 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4289 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4290 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4291 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4292 return true;
4293
4294 // If a new pointer is loaded in the loop, the pointer references a different
4295 // object in every iteration. E.g.:
4296 // for (i)
4297 // int *p = a[i];
4298 // ...
4299 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4300 if (!L->isLoopInvariant(Load->getPointerOperand()))
4301 return false;
4302 return true;
4303 }
4304
GetUnderlyingObject(Value * V,const DataLayout & DL,unsigned MaxLookup)4305 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
4306 unsigned MaxLookup) {
4307 if (!V->getType()->isPointerTy())
4308 return V;
4309 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4310 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4311 V = GEP->getPointerOperand();
4312 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4313 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4314 V = cast<Operator>(V)->getOperand(0);
4315 if (!V->getType()->isPointerTy())
4316 return V;
4317 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
4318 if (GA->isInterposable())
4319 return V;
4320 V = GA->getAliasee();
4321 } else {
4322 if (auto *PHI = dyn_cast<PHINode>(V)) {
4323 // Look through single-arg phi nodes created by LCSSA.
4324 if (PHI->getNumIncomingValues() == 1) {
4325 V = PHI->getIncomingValue(0);
4326 continue;
4327 }
4328 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4329 // CaptureTracking can know about special capturing properties of some
4330 // intrinsics like launder.invariant.group, that can't be expressed with
4331 // the attributes, but have properties like returning aliasing pointer.
4332 // Because some analysis may assume that nocaptured pointer is not
4333 // returned from some special intrinsic (because function would have to
4334 // be marked with returns attribute), it is crucial to use this function
4335 // because it should be in sync with CaptureTracking. Not using it may
4336 // cause weird miscompilations where 2 aliasing pointers are assumed to
4337 // noalias.
4338 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4339 V = RP;
4340 continue;
4341 }
4342 }
4343
4344 return V;
4345 }
4346 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4347 }
4348 return V;
4349 }
4350
GetUnderlyingObjects(const Value * V,SmallVectorImpl<const Value * > & Objects,const DataLayout & DL,LoopInfo * LI,unsigned MaxLookup)4351 void llvm::GetUnderlyingObjects(const Value *V,
4352 SmallVectorImpl<const Value *> &Objects,
4353 const DataLayout &DL, LoopInfo *LI,
4354 unsigned MaxLookup) {
4355 SmallPtrSet<const Value *, 4> Visited;
4356 SmallVector<const Value *, 4> Worklist;
4357 Worklist.push_back(V);
4358 do {
4359 const Value *P = Worklist.pop_back_val();
4360 P = GetUnderlyingObject(P, DL, MaxLookup);
4361
4362 if (!Visited.insert(P).second)
4363 continue;
4364
4365 if (auto *SI = dyn_cast<SelectInst>(P)) {
4366 Worklist.push_back(SI->getTrueValue());
4367 Worklist.push_back(SI->getFalseValue());
4368 continue;
4369 }
4370
4371 if (auto *PN = dyn_cast<PHINode>(P)) {
4372 // If this PHI changes the underlying object in every iteration of the
4373 // loop, don't look through it. Consider:
4374 // int **A;
4375 // for (i) {
4376 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4377 // Curr = A[i];
4378 // *Prev, *Curr;
4379 //
4380 // Prev is tracking Curr one iteration behind so they refer to different
4381 // underlying objects.
4382 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4383 isSameUnderlyingObjectInLoop(PN, LI))
4384 for (Value *IncValue : PN->incoming_values())
4385 Worklist.push_back(IncValue);
4386 continue;
4387 }
4388
4389 Objects.push_back(P);
4390 } while (!Worklist.empty());
4391 }
4392
4393 /// This is the function that does the work of looking through basic
4394 /// ptrtoint+arithmetic+inttoptr sequences.
getUnderlyingObjectFromInt(const Value * V)4395 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4396 do {
4397 if (const Operator *U = dyn_cast<Operator>(V)) {
4398 // If we find a ptrtoint, we can transfer control back to the
4399 // regular getUnderlyingObjectFromInt.
4400 if (U->getOpcode() == Instruction::PtrToInt)
4401 return U->getOperand(0);
4402 // If we find an add of a constant, a multiplied value, or a phi, it's
4403 // likely that the other operand will lead us to the base
4404 // object. We don't have to worry about the case where the
4405 // object address is somehow being computed by the multiply,
4406 // because our callers only care when the result is an
4407 // identifiable object.
4408 if (U->getOpcode() != Instruction::Add ||
4409 (!isa<ConstantInt>(U->getOperand(1)) &&
4410 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4411 !isa<PHINode>(U->getOperand(1))))
4412 return V;
4413 V = U->getOperand(0);
4414 } else {
4415 return V;
4416 }
4417 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4418 } while (true);
4419 }
4420
4421 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
4422 /// ptrtoint+arithmetic+inttoptr sequences.
4423 /// It returns false if unidentified object is found in GetUnderlyingObjects.
getUnderlyingObjectsForCodeGen(const Value * V,SmallVectorImpl<Value * > & Objects,const DataLayout & DL)4424 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4425 SmallVectorImpl<Value *> &Objects,
4426 const DataLayout &DL) {
4427 SmallPtrSet<const Value *, 16> Visited;
4428 SmallVector<const Value *, 4> Working(1, V);
4429 do {
4430 V = Working.pop_back_val();
4431
4432 SmallVector<const Value *, 4> Objs;
4433 GetUnderlyingObjects(V, Objs, DL);
4434
4435 for (const Value *V : Objs) {
4436 if (!Visited.insert(V).second)
4437 continue;
4438 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4439 const Value *O =
4440 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4441 if (O->getType()->isPointerTy()) {
4442 Working.push_back(O);
4443 continue;
4444 }
4445 }
4446 // If GetUnderlyingObjects fails to find an identifiable object,
4447 // getUnderlyingObjectsForCodeGen also fails for safety.
4448 if (!isIdentifiedObject(V)) {
4449 Objects.clear();
4450 return false;
4451 }
4452 Objects.push_back(const_cast<Value *>(V));
4453 }
4454 } while (!Working.empty());
4455 return true;
4456 }
4457
4458 /// Return true if the only users of this pointer are lifetime markers.
onlyUsedByLifetimeMarkers(const Value * V)4459 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4460 for (const User *U : V->users()) {
4461 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4462 if (!II) return false;
4463
4464 if (!II->isLifetimeStartOrEnd())
4465 return false;
4466 }
4467 return true;
4468 }
4469
mustSuppressSpeculation(const LoadInst & LI)4470 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4471 if (!LI.isUnordered())
4472 return true;
4473 const Function &F = *LI.getFunction();
4474 // Speculative load may create a race that did not exist in the source.
4475 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4476 // Speculative load may load data from dirty regions.
4477 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4478 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4479 }
4480
4481
isSafeToSpeculativelyExecute(const Value * V,const Instruction * CtxI,const DominatorTree * DT)4482 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4483 const Instruction *CtxI,
4484 const DominatorTree *DT) {
4485 const Operator *Inst = dyn_cast<Operator>(V);
4486 if (!Inst)
4487 return false;
4488
4489 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4490 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4491 if (C->canTrap())
4492 return false;
4493
4494 switch (Inst->getOpcode()) {
4495 default:
4496 return true;
4497 case Instruction::UDiv:
4498 case Instruction::URem: {
4499 // x / y is undefined if y == 0.
4500 const APInt *V;
4501 if (match(Inst->getOperand(1), m_APInt(V)))
4502 return *V != 0;
4503 return false;
4504 }
4505 case Instruction::SDiv:
4506 case Instruction::SRem: {
4507 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4508 const APInt *Numerator, *Denominator;
4509 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4510 return false;
4511 // We cannot hoist this division if the denominator is 0.
4512 if (*Denominator == 0)
4513 return false;
4514 // It's safe to hoist if the denominator is not 0 or -1.
4515 if (*Denominator != -1)
4516 return true;
4517 // At this point we know that the denominator is -1. It is safe to hoist as
4518 // long we know that the numerator is not INT_MIN.
4519 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4520 return !Numerator->isMinSignedValue();
4521 // The numerator *might* be MinSignedValue.
4522 return false;
4523 }
4524 case Instruction::Load: {
4525 const LoadInst *LI = cast<LoadInst>(Inst);
4526 if (mustSuppressSpeculation(*LI))
4527 return false;
4528 const DataLayout &DL = LI->getModule()->getDataLayout();
4529 return isDereferenceableAndAlignedPointer(
4530 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4531 DL, CtxI, DT);
4532 }
4533 case Instruction::Call: {
4534 auto *CI = cast<const CallInst>(Inst);
4535 const Function *Callee = CI->getCalledFunction();
4536
4537 // The called function could have undefined behavior or side-effects, even
4538 // if marked readnone nounwind.
4539 return Callee && Callee->isSpeculatable();
4540 }
4541 case Instruction::VAArg:
4542 case Instruction::Alloca:
4543 case Instruction::Invoke:
4544 case Instruction::CallBr:
4545 case Instruction::PHI:
4546 case Instruction::Store:
4547 case Instruction::Ret:
4548 case Instruction::Br:
4549 case Instruction::IndirectBr:
4550 case Instruction::Switch:
4551 case Instruction::Unreachable:
4552 case Instruction::Fence:
4553 case Instruction::AtomicRMW:
4554 case Instruction::AtomicCmpXchg:
4555 case Instruction::LandingPad:
4556 case Instruction::Resume:
4557 case Instruction::CatchSwitch:
4558 case Instruction::CatchPad:
4559 case Instruction::CatchRet:
4560 case Instruction::CleanupPad:
4561 case Instruction::CleanupRet:
4562 return false; // Misc instructions which have effects
4563 }
4564 }
4565
mayBeMemoryDependent(const Instruction & I)4566 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4567 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4568 }
4569
4570 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
mapOverflowResult(ConstantRange::OverflowResult OR)4571 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4572 switch (OR) {
4573 case ConstantRange::OverflowResult::MayOverflow:
4574 return OverflowResult::MayOverflow;
4575 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4576 return OverflowResult::AlwaysOverflowsLow;
4577 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4578 return OverflowResult::AlwaysOverflowsHigh;
4579 case ConstantRange::OverflowResult::NeverOverflows:
4580 return OverflowResult::NeverOverflows;
4581 }
4582 llvm_unreachable("Unknown OverflowResult");
4583 }
4584
4585 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
computeConstantRangeIncludingKnownBits(const Value * V,bool ForSigned,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,OptimizationRemarkEmitter * ORE=nullptr,bool UseInstrInfo=true)4586 static ConstantRange computeConstantRangeIncludingKnownBits(
4587 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4588 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4589 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4590 KnownBits Known = computeKnownBits(
4591 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4592 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4593 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4594 ConstantRange::PreferredRangeType RangeType =
4595 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4596 return CR1.intersectWith(CR2, RangeType);
4597 }
4598
computeOverflowForUnsignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4599 OverflowResult llvm::computeOverflowForUnsignedMul(
4600 const Value *LHS, const Value *RHS, const DataLayout &DL,
4601 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4602 bool UseInstrInfo) {
4603 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4604 nullptr, UseInstrInfo);
4605 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4606 nullptr, UseInstrInfo);
4607 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4608 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4609 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4610 }
4611
4612 OverflowResult
computeOverflowForSignedMul(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4613 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4614 const DataLayout &DL, AssumptionCache *AC,
4615 const Instruction *CxtI,
4616 const DominatorTree *DT, bool UseInstrInfo) {
4617 // Multiplying n * m significant bits yields a result of n + m significant
4618 // bits. If the total number of significant bits does not exceed the
4619 // result bit width (minus 1), there is no overflow.
4620 // This means if we have enough leading sign bits in the operands
4621 // we can guarantee that the result does not overflow.
4622 // Ref: "Hacker's Delight" by Henry Warren
4623 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4624
4625 // Note that underestimating the number of sign bits gives a more
4626 // conservative answer.
4627 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4628 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4629
4630 // First handle the easy case: if we have enough sign bits there's
4631 // definitely no overflow.
4632 if (SignBits > BitWidth + 1)
4633 return OverflowResult::NeverOverflows;
4634
4635 // There are two ambiguous cases where there can be no overflow:
4636 // SignBits == BitWidth + 1 and
4637 // SignBits == BitWidth
4638 // The second case is difficult to check, therefore we only handle the
4639 // first case.
4640 if (SignBits == BitWidth + 1) {
4641 // It overflows only when both arguments are negative and the true
4642 // product is exactly the minimum negative number.
4643 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4644 // For simplicity we just check if at least one side is not negative.
4645 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4646 nullptr, UseInstrInfo);
4647 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4648 nullptr, UseInstrInfo);
4649 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4650 return OverflowResult::NeverOverflows;
4651 }
4652 return OverflowResult::MayOverflow;
4653 }
4654
computeOverflowForUnsignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT,bool UseInstrInfo)4655 OverflowResult llvm::computeOverflowForUnsignedAdd(
4656 const Value *LHS, const Value *RHS, const DataLayout &DL,
4657 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4658 bool UseInstrInfo) {
4659 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4660 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4661 nullptr, UseInstrInfo);
4662 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4663 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4664 nullptr, UseInstrInfo);
4665 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4666 }
4667
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4668 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4669 const Value *RHS,
4670 const AddOperator *Add,
4671 const DataLayout &DL,
4672 AssumptionCache *AC,
4673 const Instruction *CxtI,
4674 const DominatorTree *DT) {
4675 if (Add && Add->hasNoSignedWrap()) {
4676 return OverflowResult::NeverOverflows;
4677 }
4678
4679 // If LHS and RHS each have at least two sign bits, the addition will look
4680 // like
4681 //
4682 // XX..... +
4683 // YY.....
4684 //
4685 // If the carry into the most significant position is 0, X and Y can't both
4686 // be 1 and therefore the carry out of the addition is also 0.
4687 //
4688 // If the carry into the most significant position is 1, X and Y can't both
4689 // be 0 and therefore the carry out of the addition is also 1.
4690 //
4691 // Since the carry into the most significant position is always equal to
4692 // the carry out of the addition, there is no signed overflow.
4693 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4694 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4695 return OverflowResult::NeverOverflows;
4696
4697 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4698 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4699 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4700 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4701 OverflowResult OR =
4702 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4703 if (OR != OverflowResult::MayOverflow)
4704 return OR;
4705
4706 // The remaining code needs Add to be available. Early returns if not so.
4707 if (!Add)
4708 return OverflowResult::MayOverflow;
4709
4710 // If the sign of Add is the same as at least one of the operands, this add
4711 // CANNOT overflow. If this can be determined from the known bits of the
4712 // operands the above signedAddMayOverflow() check will have already done so.
4713 // The only other way to improve on the known bits is from an assumption, so
4714 // call computeKnownBitsFromAssume() directly.
4715 bool LHSOrRHSKnownNonNegative =
4716 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4717 bool LHSOrRHSKnownNegative =
4718 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4719 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4720 KnownBits AddKnown(LHSRange.getBitWidth());
4721 computeKnownBitsFromAssume(
4722 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4723 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4724 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4725 return OverflowResult::NeverOverflows;
4726 }
4727
4728 return OverflowResult::MayOverflow;
4729 }
4730
computeOverflowForUnsignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4731 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4732 const Value *RHS,
4733 const DataLayout &DL,
4734 AssumptionCache *AC,
4735 const Instruction *CxtI,
4736 const DominatorTree *DT) {
4737 // Checking for conditions implied by dominating conditions may be expensive.
4738 // Limit it to usub_with_overflow calls for now.
4739 if (match(CxtI,
4740 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4741 if (auto C =
4742 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4743 if (*C)
4744 return OverflowResult::NeverOverflows;
4745 return OverflowResult::AlwaysOverflowsLow;
4746 }
4747 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4748 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4749 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4750 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4751 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4752 }
4753
computeOverflowForSignedSub(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)4754 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4755 const Value *RHS,
4756 const DataLayout &DL,
4757 AssumptionCache *AC,
4758 const Instruction *CxtI,
4759 const DominatorTree *DT) {
4760 // If LHS and RHS each have at least two sign bits, the subtraction
4761 // cannot overflow.
4762 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4763 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4764 return OverflowResult::NeverOverflows;
4765
4766 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4767 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4768 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4769 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4770 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4771 }
4772
isOverflowIntrinsicNoWrap(const WithOverflowInst * WO,const DominatorTree & DT)4773 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4774 const DominatorTree &DT) {
4775 SmallVector<const BranchInst *, 2> GuardingBranches;
4776 SmallVector<const ExtractValueInst *, 2> Results;
4777
4778 for (const User *U : WO->users()) {
4779 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4780 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4781
4782 if (EVI->getIndices()[0] == 0)
4783 Results.push_back(EVI);
4784 else {
4785 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4786
4787 for (const auto *U : EVI->users())
4788 if (const auto *B = dyn_cast<BranchInst>(U)) {
4789 assert(B->isConditional() && "How else is it using an i1?");
4790 GuardingBranches.push_back(B);
4791 }
4792 }
4793 } else {
4794 // We are using the aggregate directly in a way we don't want to analyze
4795 // here (storing it to a global, say).
4796 return false;
4797 }
4798 }
4799
4800 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4801 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4802 if (!NoWrapEdge.isSingleEdge())
4803 return false;
4804
4805 // Check if all users of the add are provably no-wrap.
4806 for (const auto *Result : Results) {
4807 // If the extractvalue itself is not executed on overflow, the we don't
4808 // need to check each use separately, since domination is transitive.
4809 if (DT.dominates(NoWrapEdge, Result->getParent()))
4810 continue;
4811
4812 for (auto &RU : Result->uses())
4813 if (!DT.dominates(NoWrapEdge, RU))
4814 return false;
4815 }
4816
4817 return true;
4818 };
4819
4820 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4821 }
4822
canCreatePoison(const Instruction * I)4823 bool llvm::canCreatePoison(const Instruction *I) {
4824 // See whether I has flags that may create poison
4825 if (isa<OverflowingBinaryOperator>(I) &&
4826 (I->hasNoSignedWrap() || I->hasNoUnsignedWrap()))
4827 return true;
4828 if (isa<PossiblyExactOperator>(I) && I->isExact())
4829 return true;
4830 if (auto *FP = dyn_cast<FPMathOperator>(I)) {
4831 auto FMF = FP->getFastMathFlags();
4832 if (FMF.noNaNs() || FMF.noInfs())
4833 return true;
4834 }
4835 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
4836 if (GEP->isInBounds())
4837 return true;
4838
4839 unsigned Opcode = I->getOpcode();
4840
4841 // Check whether opcode is a poison-generating operation
4842 switch (Opcode) {
4843 case Instruction::Shl:
4844 case Instruction::AShr:
4845 case Instruction::LShr: {
4846 // Shifts return poison if shiftwidth is larger than the bitwidth.
4847 if (auto *C = dyn_cast<Constant>(I->getOperand(1))) {
4848 SmallVector<Constant *, 4> ShiftAmounts;
4849 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4850 unsigned NumElts = FVTy->getNumElements();
4851 for (unsigned i = 0; i < NumElts; ++i)
4852 ShiftAmounts.push_back(C->getAggregateElement(i));
4853 } else if (isa<ScalableVectorType>(C->getType()))
4854 return true; // Can't tell, just return true to be safe
4855 else
4856 ShiftAmounts.push_back(C);
4857
4858 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4859 auto *CI = dyn_cast<ConstantInt>(C);
4860 return CI && CI->getZExtValue() < C->getType()->getIntegerBitWidth();
4861 });
4862 return !Safe;
4863 }
4864 return true;
4865 }
4866 case Instruction::FPToSI:
4867 case Instruction::FPToUI:
4868 // fptosi/ui yields poison if the resulting value does not fit in the
4869 // destination type.
4870 return true;
4871 case Instruction::Call:
4872 case Instruction::CallBr:
4873 case Instruction::Invoke:
4874 // Function calls can return a poison value even if args are non-poison
4875 // values.
4876 return true;
4877 case Instruction::InsertElement:
4878 case Instruction::ExtractElement: {
4879 // If index exceeds the length of the vector, it returns poison
4880 auto *VTy = cast<VectorType>(I->getOperand(0)->getType());
4881 unsigned IdxOp = I->getOpcode() == Instruction::InsertElement ? 2 : 1;
4882 auto *Idx = dyn_cast<ConstantInt>(I->getOperand(IdxOp));
4883 if (!Idx || Idx->getZExtValue() >= VTy->getElementCount().Min)
4884 return true;
4885 return false;
4886 }
4887 case Instruction::FNeg:
4888 case Instruction::PHI:
4889 case Instruction::Select:
4890 case Instruction::URem:
4891 case Instruction::SRem:
4892 case Instruction::ShuffleVector:
4893 case Instruction::ExtractValue:
4894 case Instruction::InsertValue:
4895 case Instruction::Freeze:
4896 case Instruction::ICmp:
4897 case Instruction::FCmp:
4898 case Instruction::GetElementPtr:
4899 return false;
4900 default:
4901 if (isa<CastInst>(I))
4902 return false;
4903 else if (isa<BinaryOperator>(I))
4904 return false;
4905 // Be conservative and return true.
4906 return true;
4907 }
4908 }
4909
isGuaranteedNotToBeUndefOrPoison(const Value * V,const Instruction * CtxI,const DominatorTree * DT,unsigned Depth)4910 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V,
4911 const Instruction *CtxI,
4912 const DominatorTree *DT,
4913 unsigned Depth) {
4914 if (Depth >= MaxDepth)
4915 return false;
4916
4917 // If the value is a freeze instruction, then it can never
4918 // be undef or poison.
4919 if (isa<FreezeInst>(V))
4920 return true;
4921 // TODO: Some instructions are guaranteed to return neither undef
4922 // nor poison if their arguments are not poison/undef.
4923
4924 if (auto *C = dyn_cast<Constant>(V)) {
4925 // TODO: We can analyze ConstExpr by opcode to determine if there is any
4926 // possibility of poison.
4927 if (isa<UndefValue>(C) || isa<ConstantExpr>(C))
4928 return false;
4929
4930 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4931 isa<ConstantPointerNull>(C) || isa<Function>(C))
4932 return true;
4933
4934 if (C->getType()->isVectorTy())
4935 return !C->containsUndefElement() && !C->containsConstantExpression();
4936
4937 // TODO: Recursively analyze aggregates or other constants.
4938 return false;
4939 }
4940
4941 // Strip cast operations from a pointer value.
4942 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4943 // inbounds with zero offset. To guarantee that the result isn't poison, the
4944 // stripped pointer is checked as it has to be pointing into an allocated
4945 // object or be null `null` to ensure `inbounds` getelement pointers with a
4946 // zero offset could not produce poison.
4947 // It can strip off addrspacecast that do not change bit representation as
4948 // well. We believe that such addrspacecast is equivalent to no-op.
4949 auto *StrippedV = V->stripPointerCastsSameRepresentation();
4950 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4951 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4952 return true;
4953
4954 auto OpCheck = [&](const Value *V) {
4955 return isGuaranteedNotToBeUndefOrPoison(V, CtxI, DT, Depth + 1);
4956 };
4957
4958 if (auto *I = dyn_cast<Instruction>(V)) {
4959 switch (I->getOpcode()) {
4960 case Instruction::GetElementPtr: {
4961 auto *GEPI = dyn_cast<GetElementPtrInst>(I);
4962 if (!GEPI->isInBounds() && llvm::all_of(GEPI->operands(), OpCheck))
4963 return true;
4964 break;
4965 }
4966 case Instruction::FCmp: {
4967 auto *FI = dyn_cast<FCmpInst>(I);
4968 if (FI->getFastMathFlags().none() &&
4969 llvm::all_of(FI->operands(), OpCheck))
4970 return true;
4971 break;
4972 }
4973 case Instruction::BitCast:
4974 case Instruction::PHI:
4975 case Instruction::ICmp:
4976 if (llvm::all_of(I->operands(), OpCheck))
4977 return true;
4978 break;
4979 default:
4980 break;
4981 }
4982
4983 if (programUndefinedIfPoison(I) && I->getType()->isIntegerTy(1))
4984 // Note: once we have an agreement that poison is a value-wise concept,
4985 // we can remove the isIntegerTy(1) constraint.
4986 return true;
4987 }
4988
4989 // CxtI may be null or a cloned instruction.
4990 if (!CtxI || !CtxI->getParent() || !DT)
4991 return false;
4992
4993 auto *DNode = DT->getNode(CtxI->getParent());
4994 if (!DNode)
4995 // Unreachable block
4996 return false;
4997
4998 // If V is used as a branch condition before reaching CtxI, V cannot be
4999 // undef or poison.
5000 // br V, BB1, BB2
5001 // BB1:
5002 // CtxI ; V cannot be undef or poison here
5003 auto *Dominator = DNode->getIDom();
5004 while (Dominator) {
5005 auto *TI = Dominator->getBlock()->getTerminator();
5006
5007 if (auto BI = dyn_cast<BranchInst>(TI)) {
5008 if (BI->isConditional() && BI->getCondition() == V)
5009 return true;
5010 } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5011 if (SI->getCondition() == V)
5012 return true;
5013 }
5014
5015 Dominator = Dominator->getIDom();
5016 }
5017
5018 return false;
5019 }
5020
computeOverflowForSignedAdd(const AddOperator * Add,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5021 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5022 const DataLayout &DL,
5023 AssumptionCache *AC,
5024 const Instruction *CxtI,
5025 const DominatorTree *DT) {
5026 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5027 Add, DL, AC, CxtI, DT);
5028 }
5029
computeOverflowForSignedAdd(const Value * LHS,const Value * RHS,const DataLayout & DL,AssumptionCache * AC,const Instruction * CxtI,const DominatorTree * DT)5030 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5031 const Value *RHS,
5032 const DataLayout &DL,
5033 AssumptionCache *AC,
5034 const Instruction *CxtI,
5035 const DominatorTree *DT) {
5036 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5037 }
5038
isGuaranteedToTransferExecutionToSuccessor(const Instruction * I)5039 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5040 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5041 // of time because it's possible for another thread to interfere with it for an
5042 // arbitrary length of time, but programs aren't allowed to rely on that.
5043
5044 // If there is no successor, then execution can't transfer to it.
5045 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
5046 return !CRI->unwindsToCaller();
5047 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
5048 return !CatchSwitch->unwindsToCaller();
5049 if (isa<ResumeInst>(I))
5050 return false;
5051 if (isa<ReturnInst>(I))
5052 return false;
5053 if (isa<UnreachableInst>(I))
5054 return false;
5055
5056 // Calls can throw, or contain an infinite loop, or kill the process.
5057 if (const auto *CB = dyn_cast<CallBase>(I)) {
5058 // Call sites that throw have implicit non-local control flow.
5059 if (!CB->doesNotThrow())
5060 return false;
5061
5062 // A function which doens't throw and has "willreturn" attribute will
5063 // always return.
5064 if (CB->hasFnAttr(Attribute::WillReturn))
5065 return true;
5066
5067 // Non-throwing call sites can loop infinitely, call exit/pthread_exit
5068 // etc. and thus not return. However, LLVM already assumes that
5069 //
5070 // - Thread exiting actions are modeled as writes to memory invisible to
5071 // the program.
5072 //
5073 // - Loops that don't have side effects (side effects are volatile/atomic
5074 // stores and IO) always terminate (see http://llvm.org/PR965).
5075 // Furthermore IO itself is also modeled as writes to memory invisible to
5076 // the program.
5077 //
5078 // We rely on those assumptions here, and use the memory effects of the call
5079 // target as a proxy for checking that it always returns.
5080
5081 // FIXME: This isn't aggressive enough; a call which only writes to a global
5082 // is guaranteed to return.
5083 return CB->onlyReadsMemory() || CB->onlyAccessesArgMemory();
5084 }
5085
5086 // Other instructions return normally.
5087 return true;
5088 }
5089
isGuaranteedToTransferExecutionToSuccessor(const BasicBlock * BB)5090 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5091 // TODO: This is slightly conservative for invoke instruction since exiting
5092 // via an exception *is* normal control for them.
5093 for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
5094 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
5095 return false;
5096 return true;
5097 }
5098
isGuaranteedToExecuteForEveryIteration(const Instruction * I,const Loop * L)5099 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5100 const Loop *L) {
5101 // The loop header is guaranteed to be executed for every iteration.
5102 //
5103 // FIXME: Relax this constraint to cover all basic blocks that are
5104 // guaranteed to be executed at every iteration.
5105 if (I->getParent() != L->getHeader()) return false;
5106
5107 for (const Instruction &LI : *L->getHeader()) {
5108 if (&LI == I) return true;
5109 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5110 }
5111 llvm_unreachable("Instruction not contained in its own parent basic block.");
5112 }
5113
propagatesPoison(const Instruction * I)5114 bool llvm::propagatesPoison(const Instruction *I) {
5115 switch (I->getOpcode()) {
5116 case Instruction::Freeze:
5117 case Instruction::Select:
5118 case Instruction::PHI:
5119 case Instruction::Call:
5120 case Instruction::Invoke:
5121 return false;
5122 case Instruction::ICmp:
5123 case Instruction::FCmp:
5124 case Instruction::GetElementPtr:
5125 return true;
5126 default:
5127 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5128 return true;
5129
5130 // Be conservative and return false.
5131 return false;
5132 }
5133 }
5134
getGuaranteedNonPoisonOp(const Instruction * I)5135 const Value *llvm::getGuaranteedNonPoisonOp(const Instruction *I) {
5136 switch (I->getOpcode()) {
5137 case Instruction::Store:
5138 return cast<StoreInst>(I)->getPointerOperand();
5139
5140 case Instruction::Load:
5141 return cast<LoadInst>(I)->getPointerOperand();
5142
5143 case Instruction::AtomicCmpXchg:
5144 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
5145
5146 case Instruction::AtomicRMW:
5147 return cast<AtomicRMWInst>(I)->getPointerOperand();
5148
5149 case Instruction::UDiv:
5150 case Instruction::SDiv:
5151 case Instruction::URem:
5152 case Instruction::SRem:
5153 return I->getOperand(1);
5154
5155 case Instruction::Call:
5156 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5157 switch (II->getIntrinsicID()) {
5158 case Intrinsic::assume:
5159 return II->getArgOperand(0);
5160 default:
5161 return nullptr;
5162 }
5163 }
5164 return nullptr;
5165
5166 default:
5167 return nullptr;
5168 }
5169 }
5170
mustTriggerUB(const Instruction * I,const SmallSet<const Value *,16> & KnownPoison)5171 bool llvm::mustTriggerUB(const Instruction *I,
5172 const SmallSet<const Value *, 16>& KnownPoison) {
5173 auto *NotPoison = getGuaranteedNonPoisonOp(I);
5174 return (NotPoison && KnownPoison.count(NotPoison));
5175 }
5176
5177
programUndefinedIfPoison(const Instruction * PoisonI)5178 bool llvm::programUndefinedIfPoison(const Instruction *PoisonI) {
5179 // We currently only look for uses of poison values within the same basic
5180 // block, as that makes it easier to guarantee that the uses will be
5181 // executed given that PoisonI is executed.
5182 //
5183 // FIXME: Expand this to consider uses beyond the same basic block. To do
5184 // this, look out for the distinction between post-dominance and strong
5185 // post-dominance.
5186 const BasicBlock *BB = PoisonI->getParent();
5187
5188 // Set of instructions that we have proved will yield poison if PoisonI
5189 // does.
5190 SmallSet<const Value *, 16> YieldsPoison;
5191 SmallSet<const BasicBlock *, 4> Visited;
5192 YieldsPoison.insert(PoisonI);
5193 Visited.insert(PoisonI->getParent());
5194
5195 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
5196
5197 unsigned Iter = 0;
5198 while (Iter++ < MaxDepth) {
5199 for (auto &I : make_range(Begin, End)) {
5200 if (&I != PoisonI) {
5201 if (mustTriggerUB(&I, YieldsPoison))
5202 return true;
5203 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5204 return false;
5205 }
5206
5207 // Mark poison that propagates from I through uses of I.
5208 if (YieldsPoison.count(&I)) {
5209 for (const User *User : I.users()) {
5210 const Instruction *UserI = cast<Instruction>(User);
5211 if (propagatesPoison(UserI))
5212 YieldsPoison.insert(User);
5213 }
5214 }
5215 }
5216
5217 if (auto *NextBB = BB->getSingleSuccessor()) {
5218 if (Visited.insert(NextBB).second) {
5219 BB = NextBB;
5220 Begin = BB->getFirstNonPHI()->getIterator();
5221 End = BB->end();
5222 continue;
5223 }
5224 }
5225
5226 break;
5227 }
5228 return false;
5229 }
5230
isKnownNonNaN(const Value * V,FastMathFlags FMF)5231 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5232 if (FMF.noNaNs())
5233 return true;
5234
5235 if (auto *C = dyn_cast<ConstantFP>(V))
5236 return !C->isNaN();
5237
5238 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5239 if (!C->getElementType()->isFloatingPointTy())
5240 return false;
5241 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5242 if (C->getElementAsAPFloat(I).isNaN())
5243 return false;
5244 }
5245 return true;
5246 }
5247
5248 if (isa<ConstantAggregateZero>(V))
5249 return true;
5250
5251 return false;
5252 }
5253
isKnownNonZero(const Value * V)5254 static bool isKnownNonZero(const Value *V) {
5255 if (auto *C = dyn_cast<ConstantFP>(V))
5256 return !C->isZero();
5257
5258 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5259 if (!C->getElementType()->isFloatingPointTy())
5260 return false;
5261 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5262 if (C->getElementAsAPFloat(I).isZero())
5263 return false;
5264 }
5265 return true;
5266 }
5267
5268 return false;
5269 }
5270
5271 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5272 /// Given non-min/max outer cmp/select from the clamp pattern this
5273 /// function recognizes if it can be substitued by a "canonical" min/max
5274 /// pattern.
matchFastFloatClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS)5275 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5276 Value *CmpLHS, Value *CmpRHS,
5277 Value *TrueVal, Value *FalseVal,
5278 Value *&LHS, Value *&RHS) {
5279 // Try to match
5280 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5281 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5282 // and return description of the outer Max/Min.
5283
5284 // First, check if select has inverse order:
5285 if (CmpRHS == FalseVal) {
5286 std::swap(TrueVal, FalseVal);
5287 Pred = CmpInst::getInversePredicate(Pred);
5288 }
5289
5290 // Assume success now. If there's no match, callers should not use these anyway.
5291 LHS = TrueVal;
5292 RHS = FalseVal;
5293
5294 const APFloat *FC1;
5295 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5296 return {SPF_UNKNOWN, SPNB_NA, false};
5297
5298 const APFloat *FC2;
5299 switch (Pred) {
5300 case CmpInst::FCMP_OLT:
5301 case CmpInst::FCMP_OLE:
5302 case CmpInst::FCMP_ULT:
5303 case CmpInst::FCMP_ULE:
5304 if (match(FalseVal,
5305 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5306 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5307 *FC1 < *FC2)
5308 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5309 break;
5310 case CmpInst::FCMP_OGT:
5311 case CmpInst::FCMP_OGE:
5312 case CmpInst::FCMP_UGT:
5313 case CmpInst::FCMP_UGE:
5314 if (match(FalseVal,
5315 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5316 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5317 *FC1 > *FC2)
5318 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5319 break;
5320 default:
5321 break;
5322 }
5323
5324 return {SPF_UNKNOWN, SPNB_NA, false};
5325 }
5326
5327 /// Recognize variations of:
5328 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
matchClamp(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal)5329 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5330 Value *CmpLHS, Value *CmpRHS,
5331 Value *TrueVal, Value *FalseVal) {
5332 // Swap the select operands and predicate to match the patterns below.
5333 if (CmpRHS != TrueVal) {
5334 Pred = ICmpInst::getSwappedPredicate(Pred);
5335 std::swap(TrueVal, FalseVal);
5336 }
5337 const APInt *C1;
5338 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5339 const APInt *C2;
5340 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5341 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5342 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5343 return {SPF_SMAX, SPNB_NA, false};
5344
5345 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5346 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5347 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5348 return {SPF_SMIN, SPNB_NA, false};
5349
5350 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5351 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5352 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5353 return {SPF_UMAX, SPNB_NA, false};
5354
5355 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5356 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5357 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5358 return {SPF_UMIN, SPNB_NA, false};
5359 }
5360 return {SPF_UNKNOWN, SPNB_NA, false};
5361 }
5362
5363 /// Recognize variations of:
5364 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
matchMinMaxOfMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TVal,Value * FVal,unsigned Depth)5365 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5366 Value *CmpLHS, Value *CmpRHS,
5367 Value *TVal, Value *FVal,
5368 unsigned Depth) {
5369 // TODO: Allow FP min/max with nnan/nsz.
5370 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5371
5372 Value *A = nullptr, *B = nullptr;
5373 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5374 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5375 return {SPF_UNKNOWN, SPNB_NA, false};
5376
5377 Value *C = nullptr, *D = nullptr;
5378 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5379 if (L.Flavor != R.Flavor)
5380 return {SPF_UNKNOWN, SPNB_NA, false};
5381
5382 // We have something like: x Pred y ? min(a, b) : min(c, d).
5383 // Try to match the compare to the min/max operations of the select operands.
5384 // First, make sure we have the right compare predicate.
5385 switch (L.Flavor) {
5386 case SPF_SMIN:
5387 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5388 Pred = ICmpInst::getSwappedPredicate(Pred);
5389 std::swap(CmpLHS, CmpRHS);
5390 }
5391 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5392 break;
5393 return {SPF_UNKNOWN, SPNB_NA, false};
5394 case SPF_SMAX:
5395 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5396 Pred = ICmpInst::getSwappedPredicate(Pred);
5397 std::swap(CmpLHS, CmpRHS);
5398 }
5399 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5400 break;
5401 return {SPF_UNKNOWN, SPNB_NA, false};
5402 case SPF_UMIN:
5403 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5404 Pred = ICmpInst::getSwappedPredicate(Pred);
5405 std::swap(CmpLHS, CmpRHS);
5406 }
5407 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5408 break;
5409 return {SPF_UNKNOWN, SPNB_NA, false};
5410 case SPF_UMAX:
5411 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5412 Pred = ICmpInst::getSwappedPredicate(Pred);
5413 std::swap(CmpLHS, CmpRHS);
5414 }
5415 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5416 break;
5417 return {SPF_UNKNOWN, SPNB_NA, false};
5418 default:
5419 return {SPF_UNKNOWN, SPNB_NA, false};
5420 }
5421
5422 // If there is a common operand in the already matched min/max and the other
5423 // min/max operands match the compare operands (either directly or inverted),
5424 // then this is min/max of the same flavor.
5425
5426 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5427 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5428 if (D == B) {
5429 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5430 match(A, m_Not(m_Specific(CmpRHS)))))
5431 return {L.Flavor, SPNB_NA, false};
5432 }
5433 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5434 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5435 if (C == B) {
5436 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5437 match(A, m_Not(m_Specific(CmpRHS)))))
5438 return {L.Flavor, SPNB_NA, false};
5439 }
5440 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5441 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5442 if (D == A) {
5443 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5444 match(B, m_Not(m_Specific(CmpRHS)))))
5445 return {L.Flavor, SPNB_NA, false};
5446 }
5447 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5448 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5449 if (C == A) {
5450 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5451 match(B, m_Not(m_Specific(CmpRHS)))))
5452 return {L.Flavor, SPNB_NA, false};
5453 }
5454
5455 return {SPF_UNKNOWN, SPNB_NA, false};
5456 }
5457
5458 /// If the input value is the result of a 'not' op, constant integer, or vector
5459 /// splat of a constant integer, return the bitwise-not source value.
5460 /// TODO: This could be extended to handle non-splat vector integer constants.
getNotValue(Value * V)5461 static Value *getNotValue(Value *V) {
5462 Value *NotV;
5463 if (match(V, m_Not(m_Value(NotV))))
5464 return NotV;
5465
5466 const APInt *C;
5467 if (match(V, m_APInt(C)))
5468 return ConstantInt::get(V->getType(), ~(*C));
5469
5470 return nullptr;
5471 }
5472
5473 /// Match non-obvious integer minimum and maximum sequences.
matchMinMax(CmpInst::Predicate Pred,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5474 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5475 Value *CmpLHS, Value *CmpRHS,
5476 Value *TrueVal, Value *FalseVal,
5477 Value *&LHS, Value *&RHS,
5478 unsigned Depth) {
5479 // Assume success. If there's no match, callers should not use these anyway.
5480 LHS = TrueVal;
5481 RHS = FalseVal;
5482
5483 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5484 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5485 return SPR;
5486
5487 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5488 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5489 return SPR;
5490
5491 // Look through 'not' ops to find disguised min/max.
5492 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5493 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5494 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5495 switch (Pred) {
5496 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5497 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5498 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5499 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5500 default: break;
5501 }
5502 }
5503
5504 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5505 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5506 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5507 switch (Pred) {
5508 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5509 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5510 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5511 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5512 default: break;
5513 }
5514 }
5515
5516 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5517 return {SPF_UNKNOWN, SPNB_NA, false};
5518
5519 // Z = X -nsw Y
5520 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5521 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5522 if (match(TrueVal, m_Zero()) &&
5523 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5524 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5525
5526 // Z = X -nsw Y
5527 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5528 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5529 if (match(FalseVal, m_Zero()) &&
5530 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5531 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5532
5533 const APInt *C1;
5534 if (!match(CmpRHS, m_APInt(C1)))
5535 return {SPF_UNKNOWN, SPNB_NA, false};
5536
5537 // An unsigned min/max can be written with a signed compare.
5538 const APInt *C2;
5539 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5540 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5541 // Is the sign bit set?
5542 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5543 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5544 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5545 C2->isMaxSignedValue())
5546 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5547
5548 // Is the sign bit clear?
5549 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5550 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5551 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5552 C2->isMinSignedValue())
5553 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5554 }
5555
5556 return {SPF_UNKNOWN, SPNB_NA, false};
5557 }
5558
isKnownNegation(const Value * X,const Value * Y,bool NeedNSW)5559 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5560 assert(X && Y && "Invalid operand");
5561
5562 // X = sub (0, Y) || X = sub nsw (0, Y)
5563 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5564 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5565 return true;
5566
5567 // Y = sub (0, X) || Y = sub nsw (0, X)
5568 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5569 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5570 return true;
5571
5572 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5573 Value *A, *B;
5574 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5575 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5576 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5577 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5578 }
5579
matchSelectPattern(CmpInst::Predicate Pred,FastMathFlags FMF,Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,unsigned Depth)5580 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5581 FastMathFlags FMF,
5582 Value *CmpLHS, Value *CmpRHS,
5583 Value *TrueVal, Value *FalseVal,
5584 Value *&LHS, Value *&RHS,
5585 unsigned Depth) {
5586 if (CmpInst::isFPPredicate(Pred)) {
5587 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5588 // 0.0 operand, set the compare's 0.0 operands to that same value for the
5589 // purpose of identifying min/max. Disregard vector constants with undefined
5590 // elements because those can not be back-propagated for analysis.
5591 Value *OutputZeroVal = nullptr;
5592 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5593 !cast<Constant>(TrueVal)->containsUndefElement())
5594 OutputZeroVal = TrueVal;
5595 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5596 !cast<Constant>(FalseVal)->containsUndefElement())
5597 OutputZeroVal = FalseVal;
5598
5599 if (OutputZeroVal) {
5600 if (match(CmpLHS, m_AnyZeroFP()))
5601 CmpLHS = OutputZeroVal;
5602 if (match(CmpRHS, m_AnyZeroFP()))
5603 CmpRHS = OutputZeroVal;
5604 }
5605 }
5606
5607 LHS = CmpLHS;
5608 RHS = CmpRHS;
5609
5610 // Signed zero may return inconsistent results between implementations.
5611 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5612 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5613 // Therefore, we behave conservatively and only proceed if at least one of the
5614 // operands is known to not be zero or if we don't care about signed zero.
5615 switch (Pred) {
5616 default: break;
5617 // FIXME: Include OGT/OLT/UGT/ULT.
5618 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5619 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5620 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5621 !isKnownNonZero(CmpRHS))
5622 return {SPF_UNKNOWN, SPNB_NA, false};
5623 }
5624
5625 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5626 bool Ordered = false;
5627
5628 // When given one NaN and one non-NaN input:
5629 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5630 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5631 // ordered comparison fails), which could be NaN or non-NaN.
5632 // so here we discover exactly what NaN behavior is required/accepted.
5633 if (CmpInst::isFPPredicate(Pred)) {
5634 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5635 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5636
5637 if (LHSSafe && RHSSafe) {
5638 // Both operands are known non-NaN.
5639 NaNBehavior = SPNB_RETURNS_ANY;
5640 } else if (CmpInst::isOrdered(Pred)) {
5641 // An ordered comparison will return false when given a NaN, so it
5642 // returns the RHS.
5643 Ordered = true;
5644 if (LHSSafe)
5645 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5646 NaNBehavior = SPNB_RETURNS_NAN;
5647 else if (RHSSafe)
5648 NaNBehavior = SPNB_RETURNS_OTHER;
5649 else
5650 // Completely unsafe.
5651 return {SPF_UNKNOWN, SPNB_NA, false};
5652 } else {
5653 Ordered = false;
5654 // An unordered comparison will return true when given a NaN, so it
5655 // returns the LHS.
5656 if (LHSSafe)
5657 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5658 NaNBehavior = SPNB_RETURNS_OTHER;
5659 else if (RHSSafe)
5660 NaNBehavior = SPNB_RETURNS_NAN;
5661 else
5662 // Completely unsafe.
5663 return {SPF_UNKNOWN, SPNB_NA, false};
5664 }
5665 }
5666
5667 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5668 std::swap(CmpLHS, CmpRHS);
5669 Pred = CmpInst::getSwappedPredicate(Pred);
5670 if (NaNBehavior == SPNB_RETURNS_NAN)
5671 NaNBehavior = SPNB_RETURNS_OTHER;
5672 else if (NaNBehavior == SPNB_RETURNS_OTHER)
5673 NaNBehavior = SPNB_RETURNS_NAN;
5674 Ordered = !Ordered;
5675 }
5676
5677 // ([if]cmp X, Y) ? X : Y
5678 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5679 switch (Pred) {
5680 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5681 case ICmpInst::ICMP_UGT:
5682 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5683 case ICmpInst::ICMP_SGT:
5684 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5685 case ICmpInst::ICMP_ULT:
5686 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5687 case ICmpInst::ICMP_SLT:
5688 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5689 case FCmpInst::FCMP_UGT:
5690 case FCmpInst::FCMP_UGE:
5691 case FCmpInst::FCMP_OGT:
5692 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5693 case FCmpInst::FCMP_ULT:
5694 case FCmpInst::FCMP_ULE:
5695 case FCmpInst::FCMP_OLT:
5696 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5697 }
5698 }
5699
5700 if (isKnownNegation(TrueVal, FalseVal)) {
5701 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5702 // match against either LHS or sext(LHS).
5703 auto MaybeSExtCmpLHS =
5704 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5705 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5706 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5707 if (match(TrueVal, MaybeSExtCmpLHS)) {
5708 // Set the return values. If the compare uses the negated value (-X >s 0),
5709 // swap the return values because the negated value is always 'RHS'.
5710 LHS = TrueVal;
5711 RHS = FalseVal;
5712 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5713 std::swap(LHS, RHS);
5714
5715 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5716 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5717 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5718 return {SPF_ABS, SPNB_NA, false};
5719
5720 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5721 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5722 return {SPF_ABS, SPNB_NA, false};
5723
5724 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5725 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5726 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5727 return {SPF_NABS, SPNB_NA, false};
5728 }
5729 else if (match(FalseVal, MaybeSExtCmpLHS)) {
5730 // Set the return values. If the compare uses the negated value (-X >s 0),
5731 // swap the return values because the negated value is always 'RHS'.
5732 LHS = FalseVal;
5733 RHS = TrueVal;
5734 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5735 std::swap(LHS, RHS);
5736
5737 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5738 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5739 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5740 return {SPF_NABS, SPNB_NA, false};
5741
5742 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5743 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5744 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5745 return {SPF_ABS, SPNB_NA, false};
5746 }
5747 }
5748
5749 if (CmpInst::isIntPredicate(Pred))
5750 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5751
5752 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5753 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5754 // semantics than minNum. Be conservative in such case.
5755 if (NaNBehavior != SPNB_RETURNS_ANY ||
5756 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5757 !isKnownNonZero(CmpRHS)))
5758 return {SPF_UNKNOWN, SPNB_NA, false};
5759
5760 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5761 }
5762
5763 /// Helps to match a select pattern in case of a type mismatch.
5764 ///
5765 /// The function processes the case when type of true and false values of a
5766 /// select instruction differs from type of the cmp instruction operands because
5767 /// of a cast instruction. The function checks if it is legal to move the cast
5768 /// operation after "select". If yes, it returns the new second value of
5769 /// "select" (with the assumption that cast is moved):
5770 /// 1. As operand of cast instruction when both values of "select" are same cast
5771 /// instructions.
5772 /// 2. As restored constant (by applying reverse cast operation) when the first
5773 /// value of the "select" is a cast operation and the second value is a
5774 /// constant.
5775 /// NOTE: We return only the new second value because the first value could be
5776 /// accessed as operand of cast instruction.
lookThroughCast(CmpInst * CmpI,Value * V1,Value * V2,Instruction::CastOps * CastOp)5777 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5778 Instruction::CastOps *CastOp) {
5779 auto *Cast1 = dyn_cast<CastInst>(V1);
5780 if (!Cast1)
5781 return nullptr;
5782
5783 *CastOp = Cast1->getOpcode();
5784 Type *SrcTy = Cast1->getSrcTy();
5785 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5786 // If V1 and V2 are both the same cast from the same type, look through V1.
5787 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5788 return Cast2->getOperand(0);
5789 return nullptr;
5790 }
5791
5792 auto *C = dyn_cast<Constant>(V2);
5793 if (!C)
5794 return nullptr;
5795
5796 Constant *CastedTo = nullptr;
5797 switch (*CastOp) {
5798 case Instruction::ZExt:
5799 if (CmpI->isUnsigned())
5800 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5801 break;
5802 case Instruction::SExt:
5803 if (CmpI->isSigned())
5804 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5805 break;
5806 case Instruction::Trunc:
5807 Constant *CmpConst;
5808 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5809 CmpConst->getType() == SrcTy) {
5810 // Here we have the following case:
5811 //
5812 // %cond = cmp iN %x, CmpConst
5813 // %tr = trunc iN %x to iK
5814 // %narrowsel = select i1 %cond, iK %t, iK C
5815 //
5816 // We can always move trunc after select operation:
5817 //
5818 // %cond = cmp iN %x, CmpConst
5819 // %widesel = select i1 %cond, iN %x, iN CmpConst
5820 // %tr = trunc iN %widesel to iK
5821 //
5822 // Note that C could be extended in any way because we don't care about
5823 // upper bits after truncation. It can't be abs pattern, because it would
5824 // look like:
5825 //
5826 // select i1 %cond, x, -x.
5827 //
5828 // So only min/max pattern could be matched. Such match requires widened C
5829 // == CmpConst. That is why set widened C = CmpConst, condition trunc
5830 // CmpConst == C is checked below.
5831 CastedTo = CmpConst;
5832 } else {
5833 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5834 }
5835 break;
5836 case Instruction::FPTrunc:
5837 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5838 break;
5839 case Instruction::FPExt:
5840 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5841 break;
5842 case Instruction::FPToUI:
5843 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5844 break;
5845 case Instruction::FPToSI:
5846 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5847 break;
5848 case Instruction::UIToFP:
5849 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5850 break;
5851 case Instruction::SIToFP:
5852 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5853 break;
5854 default:
5855 break;
5856 }
5857
5858 if (!CastedTo)
5859 return nullptr;
5860
5861 // Make sure the cast doesn't lose any information.
5862 Constant *CastedBack =
5863 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5864 if (CastedBack != C)
5865 return nullptr;
5866
5867 return CastedTo;
5868 }
5869
matchSelectPattern(Value * V,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)5870 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5871 Instruction::CastOps *CastOp,
5872 unsigned Depth) {
5873 if (Depth >= MaxDepth)
5874 return {SPF_UNKNOWN, SPNB_NA, false};
5875
5876 SelectInst *SI = dyn_cast<SelectInst>(V);
5877 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5878
5879 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5880 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5881
5882 Value *TrueVal = SI->getTrueValue();
5883 Value *FalseVal = SI->getFalseValue();
5884
5885 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5886 CastOp, Depth);
5887 }
5888
matchDecomposedSelectPattern(CmpInst * CmpI,Value * TrueVal,Value * FalseVal,Value * & LHS,Value * & RHS,Instruction::CastOps * CastOp,unsigned Depth)5889 SelectPatternResult llvm::matchDecomposedSelectPattern(
5890 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5891 Instruction::CastOps *CastOp, unsigned Depth) {
5892 CmpInst::Predicate Pred = CmpI->getPredicate();
5893 Value *CmpLHS = CmpI->getOperand(0);
5894 Value *CmpRHS = CmpI->getOperand(1);
5895 FastMathFlags FMF;
5896 if (isa<FPMathOperator>(CmpI))
5897 FMF = CmpI->getFastMathFlags();
5898
5899 // Bail out early.
5900 if (CmpI->isEquality())
5901 return {SPF_UNKNOWN, SPNB_NA, false};
5902
5903 // Deal with type mismatches.
5904 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5905 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5906 // If this is a potential fmin/fmax with a cast to integer, then ignore
5907 // -0.0 because there is no corresponding integer value.
5908 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5909 FMF.setNoSignedZeros();
5910 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5911 cast<CastInst>(TrueVal)->getOperand(0), C,
5912 LHS, RHS, Depth);
5913 }
5914 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5915 // If this is a potential fmin/fmax with a cast to integer, then ignore
5916 // -0.0 because there is no corresponding integer value.
5917 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5918 FMF.setNoSignedZeros();
5919 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5920 C, cast<CastInst>(FalseVal)->getOperand(0),
5921 LHS, RHS, Depth);
5922 }
5923 }
5924 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5925 LHS, RHS, Depth);
5926 }
5927
getMinMaxPred(SelectPatternFlavor SPF,bool Ordered)5928 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5929 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5930 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5931 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5932 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5933 if (SPF == SPF_FMINNUM)
5934 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5935 if (SPF == SPF_FMAXNUM)
5936 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5937 llvm_unreachable("unhandled!");
5938 }
5939
getInverseMinMaxFlavor(SelectPatternFlavor SPF)5940 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5941 if (SPF == SPF_SMIN) return SPF_SMAX;
5942 if (SPF == SPF_UMIN) return SPF_UMAX;
5943 if (SPF == SPF_SMAX) return SPF_SMIN;
5944 if (SPF == SPF_UMAX) return SPF_UMIN;
5945 llvm_unreachable("unhandled!");
5946 }
5947
getInverseMinMaxPred(SelectPatternFlavor SPF)5948 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5949 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5950 }
5951
5952 /// Return true if "icmp Pred LHS RHS" is always true.
isTruePredicate(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const DataLayout & DL,unsigned Depth)5953 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5954 const Value *RHS, const DataLayout &DL,
5955 unsigned Depth) {
5956 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5957 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5958 return true;
5959
5960 switch (Pred) {
5961 default:
5962 return false;
5963
5964 case CmpInst::ICMP_SLE: {
5965 const APInt *C;
5966
5967 // LHS s<= LHS +_{nsw} C if C >= 0
5968 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5969 return !C->isNegative();
5970 return false;
5971 }
5972
5973 case CmpInst::ICMP_ULE: {
5974 const APInt *C;
5975
5976 // LHS u<= LHS +_{nuw} C for any C
5977 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5978 return true;
5979
5980 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5981 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5982 const Value *&X,
5983 const APInt *&CA, const APInt *&CB) {
5984 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5985 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5986 return true;
5987
5988 // If X & C == 0 then (X | C) == X +_{nuw} C
5989 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5990 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5991 KnownBits Known(CA->getBitWidth());
5992 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5993 /*CxtI*/ nullptr, /*DT*/ nullptr);
5994 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5995 return true;
5996 }
5997
5998 return false;
5999 };
6000
6001 const Value *X;
6002 const APInt *CLHS, *CRHS;
6003 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6004 return CLHS->ule(*CRHS);
6005
6006 return false;
6007 }
6008 }
6009 }
6010
6011 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6012 /// ALHS ARHS" is true. Otherwise, return None.
6013 static Optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred,const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,const DataLayout & DL,unsigned Depth)6014 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6015 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6016 const DataLayout &DL, unsigned Depth) {
6017 switch (Pred) {
6018 default:
6019 return None;
6020
6021 case CmpInst::ICMP_SLT:
6022 case CmpInst::ICMP_SLE:
6023 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6024 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6025 return true;
6026 return None;
6027
6028 case CmpInst::ICMP_ULT:
6029 case CmpInst::ICMP_ULE:
6030 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6031 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6032 return true;
6033 return None;
6034 }
6035 }
6036
6037 /// Return true if the operands of the two compares match. IsSwappedOps is true
6038 /// when the operands match, but are swapped.
isMatchingOps(const Value * ALHS,const Value * ARHS,const Value * BLHS,const Value * BRHS,bool & IsSwappedOps)6039 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6040 const Value *BLHS, const Value *BRHS,
6041 bool &IsSwappedOps) {
6042
6043 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6044 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6045 return IsMatchingOps || IsSwappedOps;
6046 }
6047
6048 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6049 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6050 /// Otherwise, return None if we can't infer anything.
isImpliedCondMatchingOperands(CmpInst::Predicate APred,CmpInst::Predicate BPred,bool AreSwappedOps)6051 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6052 CmpInst::Predicate BPred,
6053 bool AreSwappedOps) {
6054 // Canonicalize the predicate as if the operands were not commuted.
6055 if (AreSwappedOps)
6056 BPred = ICmpInst::getSwappedPredicate(BPred);
6057
6058 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6059 return true;
6060 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6061 return false;
6062
6063 return None;
6064 }
6065
6066 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6067 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6068 /// Otherwise, return None if we can't infer anything.
6069 static Optional<bool>
isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,const ConstantInt * C1,CmpInst::Predicate BPred,const ConstantInt * C2)6070 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6071 const ConstantInt *C1,
6072 CmpInst::Predicate BPred,
6073 const ConstantInt *C2) {
6074 ConstantRange DomCR =
6075 ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6076 ConstantRange CR =
6077 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6078 ConstantRange Intersection = DomCR.intersectWith(CR);
6079 ConstantRange Difference = DomCR.difference(CR);
6080 if (Intersection.isEmptySet())
6081 return false;
6082 if (Difference.isEmptySet())
6083 return true;
6084 return None;
6085 }
6086
6087 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6088 /// false. Otherwise, return None if we can't infer anything.
isImpliedCondICmps(const ICmpInst * LHS,CmpInst::Predicate BPred,const Value * BLHS,const Value * BRHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6089 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6090 CmpInst::Predicate BPred,
6091 const Value *BLHS, const Value *BRHS,
6092 const DataLayout &DL, bool LHSIsTrue,
6093 unsigned Depth) {
6094 Value *ALHS = LHS->getOperand(0);
6095 Value *ARHS = LHS->getOperand(1);
6096
6097 // The rest of the logic assumes the LHS condition is true. If that's not the
6098 // case, invert the predicate to make it so.
6099 CmpInst::Predicate APred =
6100 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6101
6102 // Can we infer anything when the two compares have matching operands?
6103 bool AreSwappedOps;
6104 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6105 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6106 APred, BPred, AreSwappedOps))
6107 return Implication;
6108 // No amount of additional analysis will infer the second condition, so
6109 // early exit.
6110 return None;
6111 }
6112
6113 // Can we infer anything when the LHS operands match and the RHS operands are
6114 // constants (not necessarily matching)?
6115 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6116 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6117 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6118 return Implication;
6119 // No amount of additional analysis will infer the second condition, so
6120 // early exit.
6121 return None;
6122 }
6123
6124 if (APred == BPred)
6125 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6126 return None;
6127 }
6128
6129 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6130 /// false. Otherwise, return None if we can't infer anything. We expect the
6131 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
6132 static Optional<bool>
isImpliedCondAndOr(const BinaryOperator * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6133 isImpliedCondAndOr(const BinaryOperator *LHS, CmpInst::Predicate RHSPred,
6134 const Value *RHSOp0, const Value *RHSOp1,
6135
6136 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6137 // The LHS must be an 'or' or an 'and' instruction.
6138 assert((LHS->getOpcode() == Instruction::And ||
6139 LHS->getOpcode() == Instruction::Or) &&
6140 "Expected LHS to be 'and' or 'or'.");
6141
6142 assert(Depth <= MaxDepth && "Hit recursion limit");
6143
6144 // If the result of an 'or' is false, then we know both legs of the 'or' are
6145 // false. Similarly, if the result of an 'and' is true, then we know both
6146 // legs of the 'and' are true.
6147 Value *ALHS, *ARHS;
6148 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
6149 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
6150 // FIXME: Make this non-recursion.
6151 if (Optional<bool> Implication = isImpliedCondition(
6152 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6153 return Implication;
6154 if (Optional<bool> Implication = isImpliedCondition(
6155 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6156 return Implication;
6157 return None;
6158 }
6159 return None;
6160 }
6161
6162 Optional<bool>
isImpliedCondition(const Value * LHS,CmpInst::Predicate RHSPred,const Value * RHSOp0,const Value * RHSOp1,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6163 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6164 const Value *RHSOp0, const Value *RHSOp1,
6165 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6166 // Bail out when we hit the limit.
6167 if (Depth == MaxDepth)
6168 return None;
6169
6170 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6171 // example.
6172 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6173 return None;
6174
6175 Type *OpTy = LHS->getType();
6176 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6177
6178 // FIXME: Extending the code below to handle vectors.
6179 if (OpTy->isVectorTy())
6180 return None;
6181
6182 assert(OpTy->isIntegerTy(1) && "implied by above");
6183
6184 // Both LHS and RHS are icmps.
6185 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6186 if (LHSCmp)
6187 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6188 Depth);
6189
6190 /// The LHS should be an 'or' or an 'and' instruction. We expect the RHS to
6191 /// be / an icmp. FIXME: Add support for and/or on the RHS.
6192 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
6193 if (LHSBO) {
6194 if ((LHSBO->getOpcode() == Instruction::And ||
6195 LHSBO->getOpcode() == Instruction::Or))
6196 return isImpliedCondAndOr(LHSBO, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6197 Depth);
6198 }
6199 return None;
6200 }
6201
isImpliedCondition(const Value * LHS,const Value * RHS,const DataLayout & DL,bool LHSIsTrue,unsigned Depth)6202 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6203 const DataLayout &DL, bool LHSIsTrue,
6204 unsigned Depth) {
6205 // LHS ==> RHS by definition
6206 if (LHS == RHS)
6207 return LHSIsTrue;
6208
6209 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6210 if (RHSCmp)
6211 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6212 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6213 LHSIsTrue, Depth);
6214 return None;
6215 }
6216
6217 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6218 // condition dominating ContextI or nullptr, if no condition is found.
6219 static std::pair<Value *, bool>
getDomPredecessorCondition(const Instruction * ContextI)6220 getDomPredecessorCondition(const Instruction *ContextI) {
6221 if (!ContextI || !ContextI->getParent())
6222 return {nullptr, false};
6223
6224 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6225 // dominator tree (eg, from a SimplifyQuery) instead?
6226 const BasicBlock *ContextBB = ContextI->getParent();
6227 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6228 if (!PredBB)
6229 return {nullptr, false};
6230
6231 // We need a conditional branch in the predecessor.
6232 Value *PredCond;
6233 BasicBlock *TrueBB, *FalseBB;
6234 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6235 return {nullptr, false};
6236
6237 // The branch should get simplified. Don't bother simplifying this condition.
6238 if (TrueBB == FalseBB)
6239 return {nullptr, false};
6240
6241 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6242 "Predecessor block does not point to successor?");
6243
6244 // Is this condition implied by the predecessor condition?
6245 return {PredCond, TrueBB == ContextBB};
6246 }
6247
isImpliedByDomCondition(const Value * Cond,const Instruction * ContextI,const DataLayout & DL)6248 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6249 const Instruction *ContextI,
6250 const DataLayout &DL) {
6251 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6252 auto PredCond = getDomPredecessorCondition(ContextI);
6253 if (PredCond.first)
6254 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6255 return None;
6256 }
6257
isImpliedByDomCondition(CmpInst::Predicate Pred,const Value * LHS,const Value * RHS,const Instruction * ContextI,const DataLayout & DL)6258 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6259 const Value *LHS, const Value *RHS,
6260 const Instruction *ContextI,
6261 const DataLayout &DL) {
6262 auto PredCond = getDomPredecessorCondition(ContextI);
6263 if (PredCond.first)
6264 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6265 PredCond.second);
6266 return None;
6267 }
6268
setLimitsForBinOp(const BinaryOperator & BO,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6269 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6270 APInt &Upper, const InstrInfoQuery &IIQ) {
6271 unsigned Width = Lower.getBitWidth();
6272 const APInt *C;
6273 switch (BO.getOpcode()) {
6274 case Instruction::Add:
6275 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6276 // FIXME: If we have both nuw and nsw, we should reduce the range further.
6277 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6278 // 'add nuw x, C' produces [C, UINT_MAX].
6279 Lower = *C;
6280 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6281 if (C->isNegative()) {
6282 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6283 Lower = APInt::getSignedMinValue(Width);
6284 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6285 } else {
6286 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6287 Lower = APInt::getSignedMinValue(Width) + *C;
6288 Upper = APInt::getSignedMaxValue(Width) + 1;
6289 }
6290 }
6291 }
6292 break;
6293
6294 case Instruction::And:
6295 if (match(BO.getOperand(1), m_APInt(C)))
6296 // 'and x, C' produces [0, C].
6297 Upper = *C + 1;
6298 break;
6299
6300 case Instruction::Or:
6301 if (match(BO.getOperand(1), m_APInt(C)))
6302 // 'or x, C' produces [C, UINT_MAX].
6303 Lower = *C;
6304 break;
6305
6306 case Instruction::AShr:
6307 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6308 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6309 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6310 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6311 } else if (match(BO.getOperand(0), m_APInt(C))) {
6312 unsigned ShiftAmount = Width - 1;
6313 if (!C->isNullValue() && IIQ.isExact(&BO))
6314 ShiftAmount = C->countTrailingZeros();
6315 if (C->isNegative()) {
6316 // 'ashr C, x' produces [C, C >> (Width-1)]
6317 Lower = *C;
6318 Upper = C->ashr(ShiftAmount) + 1;
6319 } else {
6320 // 'ashr C, x' produces [C >> (Width-1), C]
6321 Lower = C->ashr(ShiftAmount);
6322 Upper = *C + 1;
6323 }
6324 }
6325 break;
6326
6327 case Instruction::LShr:
6328 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6329 // 'lshr x, C' produces [0, UINT_MAX >> C].
6330 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6331 } else if (match(BO.getOperand(0), m_APInt(C))) {
6332 // 'lshr C, x' produces [C >> (Width-1), C].
6333 unsigned ShiftAmount = Width - 1;
6334 if (!C->isNullValue() && IIQ.isExact(&BO))
6335 ShiftAmount = C->countTrailingZeros();
6336 Lower = C->lshr(ShiftAmount);
6337 Upper = *C + 1;
6338 }
6339 break;
6340
6341 case Instruction::Shl:
6342 if (match(BO.getOperand(0), m_APInt(C))) {
6343 if (IIQ.hasNoUnsignedWrap(&BO)) {
6344 // 'shl nuw C, x' produces [C, C << CLZ(C)]
6345 Lower = *C;
6346 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6347 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6348 if (C->isNegative()) {
6349 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6350 unsigned ShiftAmount = C->countLeadingOnes() - 1;
6351 Lower = C->shl(ShiftAmount);
6352 Upper = *C + 1;
6353 } else {
6354 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6355 unsigned ShiftAmount = C->countLeadingZeros() - 1;
6356 Lower = *C;
6357 Upper = C->shl(ShiftAmount) + 1;
6358 }
6359 }
6360 }
6361 break;
6362
6363 case Instruction::SDiv:
6364 if (match(BO.getOperand(1), m_APInt(C))) {
6365 APInt IntMin = APInt::getSignedMinValue(Width);
6366 APInt IntMax = APInt::getSignedMaxValue(Width);
6367 if (C->isAllOnesValue()) {
6368 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6369 // where C != -1 and C != 0 and C != 1
6370 Lower = IntMin + 1;
6371 Upper = IntMax + 1;
6372 } else if (C->countLeadingZeros() < Width - 1) {
6373 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6374 // where C != -1 and C != 0 and C != 1
6375 Lower = IntMin.sdiv(*C);
6376 Upper = IntMax.sdiv(*C);
6377 if (Lower.sgt(Upper))
6378 std::swap(Lower, Upper);
6379 Upper = Upper + 1;
6380 assert(Upper != Lower && "Upper part of range has wrapped!");
6381 }
6382 } else if (match(BO.getOperand(0), m_APInt(C))) {
6383 if (C->isMinSignedValue()) {
6384 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6385 Lower = *C;
6386 Upper = Lower.lshr(1) + 1;
6387 } else {
6388 // 'sdiv C, x' produces [-|C|, |C|].
6389 Upper = C->abs() + 1;
6390 Lower = (-Upper) + 1;
6391 }
6392 }
6393 break;
6394
6395 case Instruction::UDiv:
6396 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6397 // 'udiv x, C' produces [0, UINT_MAX / C].
6398 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6399 } else if (match(BO.getOperand(0), m_APInt(C))) {
6400 // 'udiv C, x' produces [0, C].
6401 Upper = *C + 1;
6402 }
6403 break;
6404
6405 case Instruction::SRem:
6406 if (match(BO.getOperand(1), m_APInt(C))) {
6407 // 'srem x, C' produces (-|C|, |C|).
6408 Upper = C->abs();
6409 Lower = (-Upper) + 1;
6410 }
6411 break;
6412
6413 case Instruction::URem:
6414 if (match(BO.getOperand(1), m_APInt(C)))
6415 // 'urem x, C' produces [0, C).
6416 Upper = *C;
6417 break;
6418
6419 default:
6420 break;
6421 }
6422 }
6423
setLimitsForIntrinsic(const IntrinsicInst & II,APInt & Lower,APInt & Upper)6424 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6425 APInt &Upper) {
6426 unsigned Width = Lower.getBitWidth();
6427 const APInt *C;
6428 switch (II.getIntrinsicID()) {
6429 case Intrinsic::uadd_sat:
6430 // uadd.sat(x, C) produces [C, UINT_MAX].
6431 if (match(II.getOperand(0), m_APInt(C)) ||
6432 match(II.getOperand(1), m_APInt(C)))
6433 Lower = *C;
6434 break;
6435 case Intrinsic::sadd_sat:
6436 if (match(II.getOperand(0), m_APInt(C)) ||
6437 match(II.getOperand(1), m_APInt(C))) {
6438 if (C->isNegative()) {
6439 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6440 Lower = APInt::getSignedMinValue(Width);
6441 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6442 } else {
6443 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6444 Lower = APInt::getSignedMinValue(Width) + *C;
6445 Upper = APInt::getSignedMaxValue(Width) + 1;
6446 }
6447 }
6448 break;
6449 case Intrinsic::usub_sat:
6450 // usub.sat(C, x) produces [0, C].
6451 if (match(II.getOperand(0), m_APInt(C)))
6452 Upper = *C + 1;
6453 // usub.sat(x, C) produces [0, UINT_MAX - C].
6454 else if (match(II.getOperand(1), m_APInt(C)))
6455 Upper = APInt::getMaxValue(Width) - *C + 1;
6456 break;
6457 case Intrinsic::ssub_sat:
6458 if (match(II.getOperand(0), m_APInt(C))) {
6459 if (C->isNegative()) {
6460 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6461 Lower = APInt::getSignedMinValue(Width);
6462 Upper = *C - APInt::getSignedMinValue(Width) + 1;
6463 } else {
6464 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6465 Lower = *C - APInt::getSignedMaxValue(Width);
6466 Upper = APInt::getSignedMaxValue(Width) + 1;
6467 }
6468 } else if (match(II.getOperand(1), m_APInt(C))) {
6469 if (C->isNegative()) {
6470 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6471 Lower = APInt::getSignedMinValue(Width) - *C;
6472 Upper = APInt::getSignedMaxValue(Width) + 1;
6473 } else {
6474 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6475 Lower = APInt::getSignedMinValue(Width);
6476 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6477 }
6478 }
6479 break;
6480 default:
6481 break;
6482 }
6483 }
6484
setLimitsForSelectPattern(const SelectInst & SI,APInt & Lower,APInt & Upper,const InstrInfoQuery & IIQ)6485 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6486 APInt &Upper, const InstrInfoQuery &IIQ) {
6487 const Value *LHS = nullptr, *RHS = nullptr;
6488 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6489 if (R.Flavor == SPF_UNKNOWN)
6490 return;
6491
6492 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6493
6494 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6495 // If the negation part of the abs (in RHS) has the NSW flag,
6496 // then the result of abs(X) is [0..SIGNED_MAX],
6497 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6498 Lower = APInt::getNullValue(BitWidth);
6499 if (match(RHS, m_Neg(m_Specific(LHS))) &&
6500 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6501 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6502 else
6503 Upper = APInt::getSignedMinValue(BitWidth) + 1;
6504 return;
6505 }
6506
6507 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6508 // The result of -abs(X) is <= 0.
6509 Lower = APInt::getSignedMinValue(BitWidth);
6510 Upper = APInt(BitWidth, 1);
6511 return;
6512 }
6513
6514 const APInt *C;
6515 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6516 return;
6517
6518 switch (R.Flavor) {
6519 case SPF_UMIN:
6520 Upper = *C + 1;
6521 break;
6522 case SPF_UMAX:
6523 Lower = *C;
6524 break;
6525 case SPF_SMIN:
6526 Lower = APInt::getSignedMinValue(BitWidth);
6527 Upper = *C + 1;
6528 break;
6529 case SPF_SMAX:
6530 Lower = *C;
6531 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6532 break;
6533 default:
6534 break;
6535 }
6536 }
6537
computeConstantRange(const Value * V,bool UseInstrInfo,AssumptionCache * AC,const Instruction * CtxI,unsigned Depth)6538 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6539 AssumptionCache *AC,
6540 const Instruction *CtxI,
6541 unsigned Depth) {
6542 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6543
6544 if (Depth == MaxDepth)
6545 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6546
6547 const APInt *C;
6548 if (match(V, m_APInt(C)))
6549 return ConstantRange(*C);
6550
6551 InstrInfoQuery IIQ(UseInstrInfo);
6552 unsigned BitWidth = V->getType()->getScalarSizeInBits();
6553 APInt Lower = APInt(BitWidth, 0);
6554 APInt Upper = APInt(BitWidth, 0);
6555 if (auto *BO = dyn_cast<BinaryOperator>(V))
6556 setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6557 else if (auto *II = dyn_cast<IntrinsicInst>(V))
6558 setLimitsForIntrinsic(*II, Lower, Upper);
6559 else if (auto *SI = dyn_cast<SelectInst>(V))
6560 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6561
6562 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6563
6564 if (auto *I = dyn_cast<Instruction>(V))
6565 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6566 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6567
6568 if (CtxI && AC) {
6569 // Try to restrict the range based on information from assumptions.
6570 for (auto &AssumeVH : AC->assumptionsFor(V)) {
6571 if (!AssumeVH)
6572 continue;
6573 CallInst *I = cast<CallInst>(AssumeVH);
6574 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6575 "Got assumption for the wrong function!");
6576 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6577 "must be an assume intrinsic");
6578
6579 if (!isValidAssumeForContext(I, CtxI, nullptr))
6580 continue;
6581 Value *Arg = I->getArgOperand(0);
6582 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6583 // Currently we just use information from comparisons.
6584 if (!Cmp || Cmp->getOperand(0) != V)
6585 continue;
6586 ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6587 AC, I, Depth + 1);
6588 CR = CR.intersectWith(
6589 ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6590 }
6591 }
6592
6593 return CR;
6594 }
6595
6596 static Optional<int64_t>
getOffsetFromIndex(const GEPOperator * GEP,unsigned Idx,const DataLayout & DL)6597 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6598 // Skip over the first indices.
6599 gep_type_iterator GTI = gep_type_begin(GEP);
6600 for (unsigned i = 1; i != Idx; ++i, ++GTI)
6601 /*skip along*/;
6602
6603 // Compute the offset implied by the rest of the indices.
6604 int64_t Offset = 0;
6605 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6606 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6607 if (!OpC)
6608 return None;
6609 if (OpC->isZero())
6610 continue; // No offset.
6611
6612 // Handle struct indices, which add their field offset to the pointer.
6613 if (StructType *STy = GTI.getStructTypeOrNull()) {
6614 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6615 continue;
6616 }
6617
6618 // Otherwise, we have a sequential type like an array or fixed-length
6619 // vector. Multiply the index by the ElementSize.
6620 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6621 if (Size.isScalable())
6622 return None;
6623 Offset += Size.getFixedSize() * OpC->getSExtValue();
6624 }
6625
6626 return Offset;
6627 }
6628
isPointerOffset(const Value * Ptr1,const Value * Ptr2,const DataLayout & DL)6629 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6630 const DataLayout &DL) {
6631 Ptr1 = Ptr1->stripPointerCasts();
6632 Ptr2 = Ptr2->stripPointerCasts();
6633
6634 // Handle the trivial case first.
6635 if (Ptr1 == Ptr2) {
6636 return 0;
6637 }
6638
6639 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6640 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6641
6642 // If one pointer is a GEP see if the GEP is a constant offset from the base,
6643 // as in "P" and "gep P, 1".
6644 // Also do this iteratively to handle the the following case:
6645 // Ptr_t1 = GEP Ptr1, c1
6646 // Ptr_t2 = GEP Ptr_t1, c2
6647 // Ptr2 = GEP Ptr_t2, c3
6648 // where we will return c1+c2+c3.
6649 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6650 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6651 // are the same, and return the difference between offsets.
6652 auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6653 const Value *Ptr) -> Optional<int64_t> {
6654 const GEPOperator *GEP_T = GEP;
6655 int64_t OffsetVal = 0;
6656 bool HasSameBase = false;
6657 while (GEP_T) {
6658 auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6659 if (!Offset)
6660 return None;
6661 OffsetVal += *Offset;
6662 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6663 if (Op0 == Ptr) {
6664 HasSameBase = true;
6665 break;
6666 }
6667 GEP_T = dyn_cast<GEPOperator>(Op0);
6668 }
6669 if (!HasSameBase)
6670 return None;
6671 return OffsetVal;
6672 };
6673
6674 if (GEP1) {
6675 auto Offset = getOffsetFromBase(GEP1, Ptr2);
6676 if (Offset)
6677 return -*Offset;
6678 }
6679 if (GEP2) {
6680 auto Offset = getOffsetFromBase(GEP2, Ptr1);
6681 if (Offset)
6682 return Offset;
6683 }
6684
6685 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6686 // base. After that base, they may have some number of common (and
6687 // potentially variable) indices. After that they handle some constant
6688 // offset, which determines their offset from each other. At this point, we
6689 // handle no other case.
6690 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6691 return None;
6692
6693 // Skip any common indices and track the GEP types.
6694 unsigned Idx = 1;
6695 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6696 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6697 break;
6698
6699 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6700 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6701 if (!Offset1 || !Offset2)
6702 return None;
6703 return *Offset2 - *Offset1;
6704 }
6705