1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/GuardUtils.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 const unsigned MaxDepth = 6;
81 
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85                                               cl::Hidden, cl::init(20));
86 
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
90   if (unsigned BitWidth = Ty->getScalarSizeInBits())
91     return BitWidth;
92 
93   return DL.getPointerTypeSizeInBits(Ty);
94 }
95 
96 namespace {
97 
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
102 struct Query {
103   const DataLayout &DL;
104   AssumptionCache *AC;
105   const Instruction *CxtI;
106   const DominatorTree *DT;
107 
108   // Unlike the other analyses, this may be a nullptr because not all clients
109   // provide it currently.
110   OptimizationRemarkEmitter *ORE;
111 
112   /// Set of assumptions that should be excluded from further queries.
113   /// This is because of the potential for mutual recursion to cause
114   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
115   /// classic case of this is assume(x = y), which will attempt to determine
116   /// bits in x from bits in y, which will attempt to determine bits in y from
117   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
118   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
119   /// (all of which can call computeKnownBits), and so on.
120   std::array<const Value *, MaxDepth> Excluded;
121 
122   /// If true, it is safe to use metadata during simplification.
123   InstrInfoQuery IIQ;
124 
125   unsigned NumExcluded = 0;
126 
127   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
128         const DominatorTree *DT, bool UseInstrInfo,
129         OptimizationRemarkEmitter *ORE = nullptr)
130       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
131 
132   Query(const Query &Q, const Value *NewExcl)
133       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
134         NumExcluded(Q.NumExcluded) {
135     Excluded = Q.Excluded;
136     Excluded[NumExcluded++] = NewExcl;
137     assert(NumExcluded <= Excluded.size());
138   }
139 
140   bool isExcluded(const Value *Value) const {
141     if (NumExcluded == 0)
142       return false;
143     auto End = Excluded.begin() + NumExcluded;
144     return std::find(Excluded.begin(), End, Value) != End;
145   }
146 };
147 
148 } // end anonymous namespace
149 
150 // Given the provided Value and, potentially, a context instruction, return
151 // the preferred context instruction (if any).
152 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
153   // If we've been provided with a context instruction, then use that (provided
154   // it has been inserted).
155   if (CxtI && CxtI->getParent())
156     return CxtI;
157 
158   // If the value is really an already-inserted instruction, then use that.
159   CxtI = dyn_cast<Instruction>(V);
160   if (CxtI && CxtI->getParent())
161     return CxtI;
162 
163   return nullptr;
164 }
165 
166 static void computeKnownBits(const Value *V, KnownBits &Known,
167                              unsigned Depth, const Query &Q);
168 
169 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
170                             const DataLayout &DL, unsigned Depth,
171                             AssumptionCache *AC, const Instruction *CxtI,
172                             const DominatorTree *DT,
173                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
174   ::computeKnownBits(V, Known, Depth,
175                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
176 }
177 
178 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
179                                   const Query &Q);
180 
181 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
182                                  unsigned Depth, AssumptionCache *AC,
183                                  const Instruction *CxtI,
184                                  const DominatorTree *DT,
185                                  OptimizationRemarkEmitter *ORE,
186                                  bool UseInstrInfo) {
187   return ::computeKnownBits(
188       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
189 }
190 
191 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
192                                const DataLayout &DL, AssumptionCache *AC,
193                                const Instruction *CxtI, const DominatorTree *DT,
194                                bool UseInstrInfo) {
195   assert(LHS->getType() == RHS->getType() &&
196          "LHS and RHS should have the same type");
197   assert(LHS->getType()->isIntOrIntVectorTy() &&
198          "LHS and RHS should be integers");
199   // Look for an inverted mask: (X & ~M) op (Y & M).
200   Value *M;
201   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
202       match(RHS, m_c_And(m_Specific(M), m_Value())))
203     return true;
204   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
205       match(LHS, m_c_And(m_Specific(M), m_Value())))
206     return true;
207   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
208   KnownBits LHSKnown(IT->getBitWidth());
209   KnownBits RHSKnown(IT->getBitWidth());
210   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
211   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
212   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
213 }
214 
215 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
216   for (const User *U : CxtI->users()) {
217     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
218       if (IC->isEquality())
219         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
220           if (C->isNullValue())
221             continue;
222     return false;
223   }
224   return true;
225 }
226 
227 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
228                                    const Query &Q);
229 
230 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
231                                   bool OrZero, unsigned Depth,
232                                   AssumptionCache *AC, const Instruction *CxtI,
233                                   const DominatorTree *DT, bool UseInstrInfo) {
234   return ::isKnownToBeAPowerOfTwo(
235       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
236 }
237 
238 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
239 
240 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
241                           AssumptionCache *AC, const Instruction *CxtI,
242                           const DominatorTree *DT, bool UseInstrInfo) {
243   return ::isKnownNonZero(V, Depth,
244                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
245 }
246 
247 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
248                               unsigned Depth, AssumptionCache *AC,
249                               const Instruction *CxtI, const DominatorTree *DT,
250                               bool UseInstrInfo) {
251   KnownBits Known =
252       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
253   return Known.isNonNegative();
254 }
255 
256 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
257                            AssumptionCache *AC, const Instruction *CxtI,
258                            const DominatorTree *DT, bool UseInstrInfo) {
259   if (auto *CI = dyn_cast<ConstantInt>(V))
260     return CI->getValue().isStrictlyPositive();
261 
262   // TODO: We'd doing two recursive queries here.  We should factor this such
263   // that only a single query is needed.
264   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
265          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
266 }
267 
268 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
269                            AssumptionCache *AC, const Instruction *CxtI,
270                            const DominatorTree *DT, bool UseInstrInfo) {
271   KnownBits Known =
272       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
273   return Known.isNegative();
274 }
275 
276 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
277 
278 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
279                            const DataLayout &DL, AssumptionCache *AC,
280                            const Instruction *CxtI, const DominatorTree *DT,
281                            bool UseInstrInfo) {
282   return ::isKnownNonEqual(V1, V2,
283                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
284                                  UseInstrInfo, /*ORE=*/nullptr));
285 }
286 
287 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
288                               const Query &Q);
289 
290 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
291                              const DataLayout &DL, unsigned Depth,
292                              AssumptionCache *AC, const Instruction *CxtI,
293                              const DominatorTree *DT, bool UseInstrInfo) {
294   return ::MaskedValueIsZero(
295       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
296 }
297 
298 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
299                                    const Query &Q);
300 
301 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
302                                   unsigned Depth, AssumptionCache *AC,
303                                   const Instruction *CxtI,
304                                   const DominatorTree *DT, bool UseInstrInfo) {
305   return ::ComputeNumSignBits(
306       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
307 }
308 
309 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
310                                    bool NSW,
311                                    KnownBits &KnownOut, KnownBits &Known2,
312                                    unsigned Depth, const Query &Q) {
313   unsigned BitWidth = KnownOut.getBitWidth();
314 
315   // If an initial sequence of bits in the result is not needed, the
316   // corresponding bits in the operands are not needed.
317   KnownBits LHSKnown(BitWidth);
318   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
319   computeKnownBits(Op1, Known2, Depth + 1, Q);
320 
321   KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
322 }
323 
324 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
325                                 KnownBits &Known, KnownBits &Known2,
326                                 unsigned Depth, const Query &Q) {
327   unsigned BitWidth = Known.getBitWidth();
328   computeKnownBits(Op1, Known, Depth + 1, Q);
329   computeKnownBits(Op0, Known2, Depth + 1, Q);
330 
331   bool isKnownNegative = false;
332   bool isKnownNonNegative = false;
333   // If the multiplication is known not to overflow, compute the sign bit.
334   if (NSW) {
335     if (Op0 == Op1) {
336       // The product of a number with itself is non-negative.
337       isKnownNonNegative = true;
338     } else {
339       bool isKnownNonNegativeOp1 = Known.isNonNegative();
340       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
341       bool isKnownNegativeOp1 = Known.isNegative();
342       bool isKnownNegativeOp0 = Known2.isNegative();
343       // The product of two numbers with the same sign is non-negative.
344       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
345         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
346       // The product of a negative number and a non-negative number is either
347       // negative or zero.
348       if (!isKnownNonNegative)
349         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
350                            isKnownNonZero(Op0, Depth, Q)) ||
351                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
352                            isKnownNonZero(Op1, Depth, Q));
353     }
354   }
355 
356   assert(!Known.hasConflict() && !Known2.hasConflict());
357   // Compute a conservative estimate for high known-0 bits.
358   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
359                              Known2.countMinLeadingZeros(),
360                              BitWidth) - BitWidth;
361   LeadZ = std::min(LeadZ, BitWidth);
362 
363   // The result of the bottom bits of an integer multiply can be
364   // inferred by looking at the bottom bits of both operands and
365   // multiplying them together.
366   // We can infer at least the minimum number of known trailing bits
367   // of both operands. Depending on number of trailing zeros, we can
368   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
369   // a and b are divisible by m and n respectively.
370   // We then calculate how many of those bits are inferrable and set
371   // the output. For example, the i8 mul:
372   //  a = XXXX1100 (12)
373   //  b = XXXX1110 (14)
374   // We know the bottom 3 bits are zero since the first can be divided by
375   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
376   // Applying the multiplication to the trimmed arguments gets:
377   //    XX11 (3)
378   //    X111 (7)
379   // -------
380   //    XX11
381   //   XX11
382   //  XX11
383   // XX11
384   // -------
385   // XXXXX01
386   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
387   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
388   // The proof for this can be described as:
389   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
390   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
391   //                    umin(countTrailingZeros(C2), C6) +
392   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
393   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
394   // %aa = shl i8 %a, C5
395   // %bb = shl i8 %b, C6
396   // %aaa = or i8 %aa, C1
397   // %bbb = or i8 %bb, C2
398   // %mul = mul i8 %aaa, %bbb
399   // %mask = and i8 %mul, C7
400   //   =>
401   // %mask = i8 ((C1*C2)&C7)
402   // Where C5, C6 describe the known bits of %a, %b
403   // C1, C2 describe the known bottom bits of %a, %b.
404   // C7 describes the mask of the known bits of the result.
405   APInt Bottom0 = Known.One;
406   APInt Bottom1 = Known2.One;
407 
408   // How many times we'd be able to divide each argument by 2 (shr by 1).
409   // This gives us the number of trailing zeros on the multiplication result.
410   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
411   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
412   unsigned TrailZero0 = Known.countMinTrailingZeros();
413   unsigned TrailZero1 = Known2.countMinTrailingZeros();
414   unsigned TrailZ = TrailZero0 + TrailZero1;
415 
416   // Figure out the fewest known-bits operand.
417   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
418                                       TrailBitsKnown1 - TrailZero1);
419   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
420 
421   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
422                       Bottom1.getLoBits(TrailBitsKnown1);
423 
424   Known.resetAll();
425   Known.Zero.setHighBits(LeadZ);
426   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
427   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
428 
429   // Only make use of no-wrap flags if we failed to compute the sign bit
430   // directly.  This matters if the multiplication always overflows, in
431   // which case we prefer to follow the result of the direct computation,
432   // though as the program is invoking undefined behaviour we can choose
433   // whatever we like here.
434   if (isKnownNonNegative && !Known.isNegative())
435     Known.makeNonNegative();
436   else if (isKnownNegative && !Known.isNonNegative())
437     Known.makeNegative();
438 }
439 
440 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
441                                              KnownBits &Known) {
442   unsigned BitWidth = Known.getBitWidth();
443   unsigned NumRanges = Ranges.getNumOperands() / 2;
444   assert(NumRanges >= 1);
445 
446   Known.Zero.setAllBits();
447   Known.One.setAllBits();
448 
449   for (unsigned i = 0; i < NumRanges; ++i) {
450     ConstantInt *Lower =
451         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
452     ConstantInt *Upper =
453         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
454     ConstantRange Range(Lower->getValue(), Upper->getValue());
455 
456     // The first CommonPrefixBits of all values in Range are equal.
457     unsigned CommonPrefixBits =
458         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
459 
460     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
461     Known.One &= Range.getUnsignedMax() & Mask;
462     Known.Zero &= ~Range.getUnsignedMax() & Mask;
463   }
464 }
465 
466 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
467   SmallVector<const Value *, 16> WorkSet(1, I);
468   SmallPtrSet<const Value *, 32> Visited;
469   SmallPtrSet<const Value *, 16> EphValues;
470 
471   // The instruction defining an assumption's condition itself is always
472   // considered ephemeral to that assumption (even if it has other
473   // non-ephemeral users). See r246696's test case for an example.
474   if (is_contained(I->operands(), E))
475     return true;
476 
477   while (!WorkSet.empty()) {
478     const Value *V = WorkSet.pop_back_val();
479     if (!Visited.insert(V).second)
480       continue;
481 
482     // If all uses of this value are ephemeral, then so is this value.
483     if (llvm::all_of(V->users(), [&](const User *U) {
484                                    return EphValues.count(U);
485                                  })) {
486       if (V == E)
487         return true;
488 
489       if (V == I || isSafeToSpeculativelyExecute(V)) {
490        EphValues.insert(V);
491        if (const User *U = dyn_cast<User>(V))
492          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
493               J != JE; ++J)
494            WorkSet.push_back(*J);
495       }
496     }
497   }
498 
499   return false;
500 }
501 
502 // Is this an intrinsic that cannot be speculated but also cannot trap?
503 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
504   if (const CallInst *CI = dyn_cast<CallInst>(I))
505     if (Function *F = CI->getCalledFunction())
506       switch (F->getIntrinsicID()) {
507       default: break;
508       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
509       case Intrinsic::assume:
510       case Intrinsic::sideeffect:
511       case Intrinsic::dbg_declare:
512       case Intrinsic::dbg_value:
513       case Intrinsic::dbg_label:
514       case Intrinsic::invariant_start:
515       case Intrinsic::invariant_end:
516       case Intrinsic::lifetime_start:
517       case Intrinsic::lifetime_end:
518       case Intrinsic::objectsize:
519       case Intrinsic::ptr_annotation:
520       case Intrinsic::var_annotation:
521         return true;
522       }
523 
524   return false;
525 }
526 
527 bool llvm::isValidAssumeForContext(const Instruction *Inv,
528                                    const Instruction *CxtI,
529                                    const DominatorTree *DT) {
530   // There are two restrictions on the use of an assume:
531   //  1. The assume must dominate the context (or the control flow must
532   //     reach the assume whenever it reaches the context).
533   //  2. The context must not be in the assume's set of ephemeral values
534   //     (otherwise we will use the assume to prove that the condition
535   //     feeding the assume is trivially true, thus causing the removal of
536   //     the assume).
537 
538   if (DT) {
539     if (DT->dominates(Inv, CxtI))
540       return true;
541   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
542     // We don't have a DT, but this trivially dominates.
543     return true;
544   }
545 
546   // With or without a DT, the only remaining case we will check is if the
547   // instructions are in the same BB.  Give up if that is not the case.
548   if (Inv->getParent() != CxtI->getParent())
549     return false;
550 
551   // If we have a dom tree, then we now know that the assume doesn't dominate
552   // the other instruction.  If we don't have a dom tree then we can check if
553   // the assume is first in the BB.
554   if (!DT) {
555     // Search forward from the assume until we reach the context (or the end
556     // of the block); the common case is that the assume will come first.
557     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
558          IE = Inv->getParent()->end(); I != IE; ++I)
559       if (&*I == CxtI)
560         return true;
561   }
562 
563   // Don't let an assume affect itself - this would cause the problems
564   // `isEphemeralValueOf` is trying to prevent, and it would also make
565   // the loop below go out of bounds.
566   if (Inv == CxtI)
567     return false;
568 
569   // The context comes first, but they're both in the same block.
570   // Make sure there is nothing in between that might interrupt
571   // the control flow, not even CxtI itself.
572   for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
573     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
574       return false;
575 
576   return !isEphemeralValueOf(Inv, CxtI);
577 }
578 
579 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
580   // Use of assumptions is context-sensitive. If we don't have a context, we
581   // cannot use them!
582   if (!Q.AC || !Q.CxtI)
583     return false;
584 
585   // Note that the patterns below need to be kept in sync with the code
586   // in AssumptionCache::updateAffectedValues.
587 
588   auto CmpExcludesZero = [V](ICmpInst *Cmp) {
589     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
590 
591     Value *RHS;
592     CmpInst::Predicate Pred;
593     if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
594       return false;
595     // Canonicalize 'v' to be on the LHS of the comparison.
596     if (Cmp->getOperand(1) != RHS)
597       Pred = CmpInst::getSwappedPredicate(Pred);
598 
599     // assume(v u> y) -> assume(v != 0)
600     if (Pred == ICmpInst::ICMP_UGT)
601       return true;
602 
603     // assume(v != 0)
604     // We special-case this one to ensure that we handle `assume(v != null)`.
605     if (Pred == ICmpInst::ICMP_NE)
606       return match(RHS, m_Zero());
607 
608     // All other predicates - rely on generic ConstantRange handling.
609     ConstantInt *CI;
610     if (!match(RHS, m_ConstantInt(CI)))
611       return false;
612     ConstantRange RHSRange(CI->getValue());
613     ConstantRange TrueValues =
614         ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
615     return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
616   };
617 
618   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
619     if (!AssumeVH)
620       continue;
621     CallInst *I = cast<CallInst>(AssumeVH);
622     assert(I->getFunction() == Q.CxtI->getFunction() &&
623            "Got assumption for the wrong function!");
624     if (Q.isExcluded(I))
625       continue;
626 
627     // Warning: This loop can end up being somewhat performance sensitive.
628     // We're running this loop for once for each value queried resulting in a
629     // runtime of ~O(#assumes * #values).
630 
631     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
632            "must be an assume intrinsic");
633 
634     Value *Arg = I->getArgOperand(0);
635     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
636     if (!Cmp)
637       continue;
638 
639     if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
640       return true;
641   }
642 
643   return false;
644 }
645 
646 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
647                                        unsigned Depth, const Query &Q) {
648   // Use of assumptions is context-sensitive. If we don't have a context, we
649   // cannot use them!
650   if (!Q.AC || !Q.CxtI)
651     return;
652 
653   unsigned BitWidth = Known.getBitWidth();
654 
655   // Note that the patterns below need to be kept in sync with the code
656   // in AssumptionCache::updateAffectedValues.
657 
658   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
659     if (!AssumeVH)
660       continue;
661     CallInst *I = cast<CallInst>(AssumeVH);
662     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
663            "Got assumption for the wrong function!");
664     if (Q.isExcluded(I))
665       continue;
666 
667     // Warning: This loop can end up being somewhat performance sensitive.
668     // We're running this loop for once for each value queried resulting in a
669     // runtime of ~O(#assumes * #values).
670 
671     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
672            "must be an assume intrinsic");
673 
674     Value *Arg = I->getArgOperand(0);
675 
676     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
677       assert(BitWidth == 1 && "assume operand is not i1?");
678       Known.setAllOnes();
679       return;
680     }
681     if (match(Arg, m_Not(m_Specific(V))) &&
682         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
683       assert(BitWidth == 1 && "assume operand is not i1?");
684       Known.setAllZero();
685       return;
686     }
687 
688     // The remaining tests are all recursive, so bail out if we hit the limit.
689     if (Depth == MaxDepth)
690       continue;
691 
692     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
693     if (!Cmp)
694       continue;
695 
696     Value *A, *B;
697     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
698 
699     CmpInst::Predicate Pred;
700     uint64_t C;
701     switch (Cmp->getPredicate()) {
702     default:
703       break;
704     case ICmpInst::ICMP_EQ:
705       // assume(v = a)
706       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
707           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
708         KnownBits RHSKnown(BitWidth);
709         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
710         Known.Zero |= RHSKnown.Zero;
711         Known.One  |= RHSKnown.One;
712       // assume(v & b = a)
713       } else if (match(Cmp,
714                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
715                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
716         KnownBits RHSKnown(BitWidth);
717         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
718         KnownBits MaskKnown(BitWidth);
719         computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
720 
721         // For those bits in the mask that are known to be one, we can propagate
722         // known bits from the RHS to V.
723         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
724         Known.One  |= RHSKnown.One  & MaskKnown.One;
725       // assume(~(v & b) = a)
726       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
727                                      m_Value(A))) &&
728                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729         KnownBits RHSKnown(BitWidth);
730         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
731         KnownBits MaskKnown(BitWidth);
732         computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
733 
734         // For those bits in the mask that are known to be one, we can propagate
735         // inverted known bits from the RHS to V.
736         Known.Zero |= RHSKnown.One  & MaskKnown.One;
737         Known.One  |= RHSKnown.Zero & MaskKnown.One;
738       // assume(v | b = a)
739       } else if (match(Cmp,
740                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
741                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
742         KnownBits RHSKnown(BitWidth);
743         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
744         KnownBits BKnown(BitWidth);
745         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
746 
747         // For those bits in B that are known to be zero, we can propagate known
748         // bits from the RHS to V.
749         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
750         Known.One  |= RHSKnown.One  & BKnown.Zero;
751       // assume(~(v | b) = a)
752       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
753                                      m_Value(A))) &&
754                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
755         KnownBits RHSKnown(BitWidth);
756         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
757         KnownBits BKnown(BitWidth);
758         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
759 
760         // For those bits in B that are known to be zero, we can propagate
761         // inverted known bits from the RHS to V.
762         Known.Zero |= RHSKnown.One  & BKnown.Zero;
763         Known.One  |= RHSKnown.Zero & BKnown.Zero;
764       // assume(v ^ b = a)
765       } else if (match(Cmp,
766                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
767                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
768         KnownBits RHSKnown(BitWidth);
769         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
770         KnownBits BKnown(BitWidth);
771         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
772 
773         // For those bits in B that are known to be zero, we can propagate known
774         // bits from the RHS to V. For those bits in B that are known to be one,
775         // we can propagate inverted known bits from the RHS to V.
776         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
777         Known.One  |= RHSKnown.One  & BKnown.Zero;
778         Known.Zero |= RHSKnown.One  & BKnown.One;
779         Known.One  |= RHSKnown.Zero & BKnown.One;
780       // assume(~(v ^ b) = a)
781       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
782                                      m_Value(A))) &&
783                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
784         KnownBits RHSKnown(BitWidth);
785         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
786         KnownBits BKnown(BitWidth);
787         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
788 
789         // For those bits in B that are known to be zero, we can propagate
790         // inverted known bits from the RHS to V. For those bits in B that are
791         // known to be one, we can propagate known bits from the RHS to V.
792         Known.Zero |= RHSKnown.One  & BKnown.Zero;
793         Known.One  |= RHSKnown.Zero & BKnown.Zero;
794         Known.Zero |= RHSKnown.Zero & BKnown.One;
795         Known.One  |= RHSKnown.One  & BKnown.One;
796       // assume(v << c = a)
797       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
798                                      m_Value(A))) &&
799                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
800         KnownBits RHSKnown(BitWidth);
801         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
802         // For those bits in RHS that are known, we can propagate them to known
803         // bits in V shifted to the right by C.
804         RHSKnown.Zero.lshrInPlace(C);
805         Known.Zero |= RHSKnown.Zero;
806         RHSKnown.One.lshrInPlace(C);
807         Known.One  |= RHSKnown.One;
808       // assume(~(v << c) = a)
809       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
810                                      m_Value(A))) &&
811                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
812         KnownBits RHSKnown(BitWidth);
813         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
814         // For those bits in RHS that are known, we can propagate them inverted
815         // to known bits in V shifted to the right by C.
816         RHSKnown.One.lshrInPlace(C);
817         Known.Zero |= RHSKnown.One;
818         RHSKnown.Zero.lshrInPlace(C);
819         Known.One  |= RHSKnown.Zero;
820       // assume(v >> c = a)
821       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
822                                      m_Value(A))) &&
823                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
824         KnownBits RHSKnown(BitWidth);
825         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
826         // For those bits in RHS that are known, we can propagate them to known
827         // bits in V shifted to the right by C.
828         Known.Zero |= RHSKnown.Zero << C;
829         Known.One  |= RHSKnown.One  << C;
830       // assume(~(v >> c) = a)
831       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
832                                      m_Value(A))) &&
833                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
834         KnownBits RHSKnown(BitWidth);
835         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
836         // For those bits in RHS that are known, we can propagate them inverted
837         // to known bits in V shifted to the right by C.
838         Known.Zero |= RHSKnown.One  << C;
839         Known.One  |= RHSKnown.Zero << C;
840       }
841       break;
842     case ICmpInst::ICMP_SGE:
843       // assume(v >=_s c) where c is non-negative
844       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
845           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
846         KnownBits RHSKnown(BitWidth);
847         computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
848 
849         if (RHSKnown.isNonNegative()) {
850           // We know that the sign bit is zero.
851           Known.makeNonNegative();
852         }
853       }
854       break;
855     case ICmpInst::ICMP_SGT:
856       // assume(v >_s c) where c is at least -1.
857       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
858           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
859         KnownBits RHSKnown(BitWidth);
860         computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
861 
862         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
863           // We know that the sign bit is zero.
864           Known.makeNonNegative();
865         }
866       }
867       break;
868     case ICmpInst::ICMP_SLE:
869       // assume(v <=_s c) where c is negative
870       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
871           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
872         KnownBits RHSKnown(BitWidth);
873         computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
874 
875         if (RHSKnown.isNegative()) {
876           // We know that the sign bit is one.
877           Known.makeNegative();
878         }
879       }
880       break;
881     case ICmpInst::ICMP_SLT:
882       // assume(v <_s c) where c is non-positive
883       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
884           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
885         KnownBits RHSKnown(BitWidth);
886         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
887 
888         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
889           // We know that the sign bit is one.
890           Known.makeNegative();
891         }
892       }
893       break;
894     case ICmpInst::ICMP_ULE:
895       // assume(v <=_u c)
896       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
897           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
898         KnownBits RHSKnown(BitWidth);
899         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
900 
901         // Whatever high bits in c are zero are known to be zero.
902         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
903       }
904       break;
905     case ICmpInst::ICMP_ULT:
906       // assume(v <_u c)
907       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
908           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
909         KnownBits RHSKnown(BitWidth);
910         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
911 
912         // If the RHS is known zero, then this assumption must be wrong (nothing
913         // is unsigned less than zero). Signal a conflict and get out of here.
914         if (RHSKnown.isZero()) {
915           Known.Zero.setAllBits();
916           Known.One.setAllBits();
917           break;
918         }
919 
920         // Whatever high bits in c are zero are known to be zero (if c is a power
921         // of 2, then one more).
922         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
923           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
924         else
925           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
926       }
927       break;
928     }
929   }
930 
931   // If assumptions conflict with each other or previous known bits, then we
932   // have a logical fallacy. It's possible that the assumption is not reachable,
933   // so this isn't a real bug. On the other hand, the program may have undefined
934   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
935   // clear out the known bits, try to warn the user, and hope for the best.
936   if (Known.Zero.intersects(Known.One)) {
937     Known.resetAll();
938 
939     if (Q.ORE)
940       Q.ORE->emit([&]() {
941         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
942         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
943                                           CxtI)
944                << "Detected conflicting code assumptions. Program may "
945                   "have undefined behavior, or compiler may have "
946                   "internal error.";
947       });
948   }
949 }
950 
951 /// Compute known bits from a shift operator, including those with a
952 /// non-constant shift amount. Known is the output of this function. Known2 is a
953 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
954 /// operator-specific functions that, given the known-zero or known-one bits
955 /// respectively, and a shift amount, compute the implied known-zero or
956 /// known-one bits of the shift operator's result respectively for that shift
957 /// amount. The results from calling KZF and KOF are conservatively combined for
958 /// all permitted shift amounts.
959 static void computeKnownBitsFromShiftOperator(
960     const Operator *I, KnownBits &Known, KnownBits &Known2,
961     unsigned Depth, const Query &Q,
962     function_ref<APInt(const APInt &, unsigned)> KZF,
963     function_ref<APInt(const APInt &, unsigned)> KOF) {
964   unsigned BitWidth = Known.getBitWidth();
965 
966   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
967     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
968 
969     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
970     Known.Zero = KZF(Known.Zero, ShiftAmt);
971     Known.One  = KOF(Known.One, ShiftAmt);
972     // If the known bits conflict, this must be an overflowing left shift, so
973     // the shift result is poison. We can return anything we want. Choose 0 for
974     // the best folding opportunity.
975     if (Known.hasConflict())
976       Known.setAllZero();
977 
978     return;
979   }
980 
981   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
982 
983   // If the shift amount could be greater than or equal to the bit-width of the
984   // LHS, the value could be poison, but bail out because the check below is
985   // expensive. TODO: Should we just carry on?
986   if (Known.getMaxValue().uge(BitWidth)) {
987     Known.resetAll();
988     return;
989   }
990 
991   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
992   // BitWidth > 64 and any upper bits are known, we'll end up returning the
993   // limit value (which implies all bits are known).
994   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
995   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
996 
997   // It would be more-clearly correct to use the two temporaries for this
998   // calculation. Reusing the APInts here to prevent unnecessary allocations.
999   Known.resetAll();
1000 
1001   // If we know the shifter operand is nonzero, we can sometimes infer more
1002   // known bits. However this is expensive to compute, so be lazy about it and
1003   // only compute it when absolutely necessary.
1004   Optional<bool> ShifterOperandIsNonZero;
1005 
1006   // Early exit if we can't constrain any well-defined shift amount.
1007   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1008       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1009     ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
1010     if (!*ShifterOperandIsNonZero)
1011       return;
1012   }
1013 
1014   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1015 
1016   Known.Zero.setAllBits();
1017   Known.One.setAllBits();
1018   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1019     // Combine the shifted known input bits only for those shift amounts
1020     // compatible with its known constraints.
1021     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1022       continue;
1023     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1024       continue;
1025     // If we know the shifter is nonzero, we may be able to infer more known
1026     // bits. This check is sunk down as far as possible to avoid the expensive
1027     // call to isKnownNonZero if the cheaper checks above fail.
1028     if (ShiftAmt == 0) {
1029       if (!ShifterOperandIsNonZero.hasValue())
1030         ShifterOperandIsNonZero =
1031             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
1032       if (*ShifterOperandIsNonZero)
1033         continue;
1034     }
1035 
1036     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
1037     Known.One  &= KOF(Known2.One, ShiftAmt);
1038   }
1039 
1040   // If the known bits conflict, the result is poison. Return a 0 and hope the
1041   // caller can further optimize that.
1042   if (Known.hasConflict())
1043     Known.setAllZero();
1044 }
1045 
1046 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
1047                                          unsigned Depth, const Query &Q) {
1048   unsigned BitWidth = Known.getBitWidth();
1049 
1050   KnownBits Known2(Known);
1051   switch (I->getOpcode()) {
1052   default: break;
1053   case Instruction::Load:
1054     if (MDNode *MD =
1055             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1056       computeKnownBitsFromRangeMetadata(*MD, Known);
1057     break;
1058   case Instruction::And: {
1059     // If either the LHS or the RHS are Zero, the result is zero.
1060     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1061     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1062 
1063     // Output known-1 bits are only known if set in both the LHS & RHS.
1064     Known.One &= Known2.One;
1065     // Output known-0 are known to be clear if zero in either the LHS | RHS.
1066     Known.Zero |= Known2.Zero;
1067 
1068     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1069     // here we handle the more general case of adding any odd number by
1070     // matching the form add(x, add(x, y)) where y is odd.
1071     // TODO: This could be generalized to clearing any bit set in y where the
1072     // following bit is known to be unset in y.
1073     Value *X = nullptr, *Y = nullptr;
1074     if (!Known.Zero[0] && !Known.One[0] &&
1075         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1076       Known2.resetAll();
1077       computeKnownBits(Y, Known2, Depth + 1, Q);
1078       if (Known2.countMinTrailingOnes() > 0)
1079         Known.Zero.setBit(0);
1080     }
1081     break;
1082   }
1083   case Instruction::Or:
1084     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1085     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1086 
1087     // Output known-0 bits are only known if clear in both the LHS & RHS.
1088     Known.Zero &= Known2.Zero;
1089     // Output known-1 are known to be set if set in either the LHS | RHS.
1090     Known.One |= Known2.One;
1091     break;
1092   case Instruction::Xor: {
1093     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1094     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1095 
1096     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1097     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1098     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1099     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1100     Known.Zero = std::move(KnownZeroOut);
1101     break;
1102   }
1103   case Instruction::Mul: {
1104     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1105     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1106                         Known2, Depth, Q);
1107     break;
1108   }
1109   case Instruction::UDiv: {
1110     // For the purposes of computing leading zeros we can conservatively
1111     // treat a udiv as a logical right shift by the power of 2 known to
1112     // be less than the denominator.
1113     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1114     unsigned LeadZ = Known2.countMinLeadingZeros();
1115 
1116     Known2.resetAll();
1117     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1118     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1119     if (RHSMaxLeadingZeros != BitWidth)
1120       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1121 
1122     Known.Zero.setHighBits(LeadZ);
1123     break;
1124   }
1125   case Instruction::Select: {
1126     const Value *LHS = nullptr, *RHS = nullptr;
1127     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1128     if (SelectPatternResult::isMinOrMax(SPF)) {
1129       computeKnownBits(RHS, Known, Depth + 1, Q);
1130       computeKnownBits(LHS, Known2, Depth + 1, Q);
1131     } else {
1132       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1133       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1134     }
1135 
1136     unsigned MaxHighOnes = 0;
1137     unsigned MaxHighZeros = 0;
1138     if (SPF == SPF_SMAX) {
1139       // If both sides are negative, the result is negative.
1140       if (Known.isNegative() && Known2.isNegative())
1141         // We can derive a lower bound on the result by taking the max of the
1142         // leading one bits.
1143         MaxHighOnes =
1144             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1145       // If either side is non-negative, the result is non-negative.
1146       else if (Known.isNonNegative() || Known2.isNonNegative())
1147         MaxHighZeros = 1;
1148     } else if (SPF == SPF_SMIN) {
1149       // If both sides are non-negative, the result is non-negative.
1150       if (Known.isNonNegative() && Known2.isNonNegative())
1151         // We can derive an upper bound on the result by taking the max of the
1152         // leading zero bits.
1153         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1154                                 Known2.countMinLeadingZeros());
1155       // If either side is negative, the result is negative.
1156       else if (Known.isNegative() || Known2.isNegative())
1157         MaxHighOnes = 1;
1158     } else if (SPF == SPF_UMAX) {
1159       // We can derive a lower bound on the result by taking the max of the
1160       // leading one bits.
1161       MaxHighOnes =
1162           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1163     } else if (SPF == SPF_UMIN) {
1164       // We can derive an upper bound on the result by taking the max of the
1165       // leading zero bits.
1166       MaxHighZeros =
1167           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1168     } else if (SPF == SPF_ABS) {
1169       // RHS from matchSelectPattern returns the negation part of abs pattern.
1170       // If the negate has an NSW flag we can assume the sign bit of the result
1171       // will be 0 because that makes abs(INT_MIN) undefined.
1172       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1173           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1174         MaxHighZeros = 1;
1175     }
1176 
1177     // Only known if known in both the LHS and RHS.
1178     Known.One &= Known2.One;
1179     Known.Zero &= Known2.Zero;
1180     if (MaxHighOnes > 0)
1181       Known.One.setHighBits(MaxHighOnes);
1182     if (MaxHighZeros > 0)
1183       Known.Zero.setHighBits(MaxHighZeros);
1184     break;
1185   }
1186   case Instruction::FPTrunc:
1187   case Instruction::FPExt:
1188   case Instruction::FPToUI:
1189   case Instruction::FPToSI:
1190   case Instruction::SIToFP:
1191   case Instruction::UIToFP:
1192     break; // Can't work with floating point.
1193   case Instruction::PtrToInt:
1194   case Instruction::IntToPtr:
1195     // Fall through and handle them the same as zext/trunc.
1196     LLVM_FALLTHROUGH;
1197   case Instruction::ZExt:
1198   case Instruction::Trunc: {
1199     Type *SrcTy = I->getOperand(0)->getType();
1200 
1201     unsigned SrcBitWidth;
1202     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1203     // which fall through here.
1204     Type *ScalarTy = SrcTy->getScalarType();
1205     SrcBitWidth = ScalarTy->isPointerTy() ?
1206       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1207       Q.DL.getTypeSizeInBits(ScalarTy);
1208 
1209     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1210     Known = Known.zextOrTrunc(SrcBitWidth, false);
1211     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1212     Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1213     break;
1214   }
1215   case Instruction::BitCast: {
1216     Type *SrcTy = I->getOperand(0)->getType();
1217     if (SrcTy->isIntOrPtrTy() &&
1218         // TODO: For now, not handling conversions like:
1219         // (bitcast i64 %x to <2 x i32>)
1220         !I->getType()->isVectorTy()) {
1221       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1222       break;
1223     }
1224     break;
1225   }
1226   case Instruction::SExt: {
1227     // Compute the bits in the result that are not present in the input.
1228     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1229 
1230     Known = Known.trunc(SrcBitWidth);
1231     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1232     // If the sign bit of the input is known set or clear, then we know the
1233     // top bits of the result.
1234     Known = Known.sext(BitWidth);
1235     break;
1236   }
1237   case Instruction::Shl: {
1238     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1239     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1240     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1241       APInt KZResult = KnownZero << ShiftAmt;
1242       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1243       // If this shift has "nsw" keyword, then the result is either a poison
1244       // value or has the same sign bit as the first operand.
1245       if (NSW && KnownZero.isSignBitSet())
1246         KZResult.setSignBit();
1247       return KZResult;
1248     };
1249 
1250     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1251       APInt KOResult = KnownOne << ShiftAmt;
1252       if (NSW && KnownOne.isSignBitSet())
1253         KOResult.setSignBit();
1254       return KOResult;
1255     };
1256 
1257     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1258     break;
1259   }
1260   case Instruction::LShr: {
1261     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1262     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1263       APInt KZResult = KnownZero.lshr(ShiftAmt);
1264       // High bits known zero.
1265       KZResult.setHighBits(ShiftAmt);
1266       return KZResult;
1267     };
1268 
1269     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1270       return KnownOne.lshr(ShiftAmt);
1271     };
1272 
1273     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1274     break;
1275   }
1276   case Instruction::AShr: {
1277     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1278     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1279       return KnownZero.ashr(ShiftAmt);
1280     };
1281 
1282     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1283       return KnownOne.ashr(ShiftAmt);
1284     };
1285 
1286     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1287     break;
1288   }
1289   case Instruction::Sub: {
1290     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1291     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1292                            Known, Known2, Depth, Q);
1293     break;
1294   }
1295   case Instruction::Add: {
1296     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1297     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1298                            Known, Known2, Depth, Q);
1299     break;
1300   }
1301   case Instruction::SRem:
1302     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1303       APInt RA = Rem->getValue().abs();
1304       if (RA.isPowerOf2()) {
1305         APInt LowBits = RA - 1;
1306         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1307 
1308         // The low bits of the first operand are unchanged by the srem.
1309         Known.Zero = Known2.Zero & LowBits;
1310         Known.One = Known2.One & LowBits;
1311 
1312         // If the first operand is non-negative or has all low bits zero, then
1313         // the upper bits are all zero.
1314         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1315           Known.Zero |= ~LowBits;
1316 
1317         // If the first operand is negative and not all low bits are zero, then
1318         // the upper bits are all one.
1319         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1320           Known.One |= ~LowBits;
1321 
1322         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1323         break;
1324       }
1325     }
1326 
1327     // The sign bit is the LHS's sign bit, except when the result of the
1328     // remainder is zero.
1329     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1330     // If it's known zero, our sign bit is also zero.
1331     if (Known2.isNonNegative())
1332       Known.makeNonNegative();
1333 
1334     break;
1335   case Instruction::URem: {
1336     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1337       const APInt &RA = Rem->getValue();
1338       if (RA.isPowerOf2()) {
1339         APInt LowBits = (RA - 1);
1340         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1341         Known.Zero |= ~LowBits;
1342         Known.One &= LowBits;
1343         break;
1344       }
1345     }
1346 
1347     // Since the result is less than or equal to either operand, any leading
1348     // zero bits in either operand must also exist in the result.
1349     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1350     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1351 
1352     unsigned Leaders =
1353         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1354     Known.resetAll();
1355     Known.Zero.setHighBits(Leaders);
1356     break;
1357   }
1358 
1359   case Instruction::Alloca: {
1360     const AllocaInst *AI = cast<AllocaInst>(I);
1361     unsigned Align = AI->getAlignment();
1362     if (Align == 0)
1363       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1364 
1365     if (Align > 0)
1366       Known.Zero.setLowBits(countTrailingZeros(Align));
1367     break;
1368   }
1369   case Instruction::GetElementPtr: {
1370     // Analyze all of the subscripts of this getelementptr instruction
1371     // to determine if we can prove known low zero bits.
1372     KnownBits LocalKnown(BitWidth);
1373     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1374     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1375 
1376     gep_type_iterator GTI = gep_type_begin(I);
1377     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1378       Value *Index = I->getOperand(i);
1379       if (StructType *STy = GTI.getStructTypeOrNull()) {
1380         // Handle struct member offset arithmetic.
1381 
1382         // Handle case when index is vector zeroinitializer
1383         Constant *CIndex = cast<Constant>(Index);
1384         if (CIndex->isZeroValue())
1385           continue;
1386 
1387         if (CIndex->getType()->isVectorTy())
1388           Index = CIndex->getSplatValue();
1389 
1390         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1391         const StructLayout *SL = Q.DL.getStructLayout(STy);
1392         uint64_t Offset = SL->getElementOffset(Idx);
1393         TrailZ = std::min<unsigned>(TrailZ,
1394                                     countTrailingZeros(Offset));
1395       } else {
1396         // Handle array index arithmetic.
1397         Type *IndexedTy = GTI.getIndexedType();
1398         if (!IndexedTy->isSized()) {
1399           TrailZ = 0;
1400           break;
1401         }
1402         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1403         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1404         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1405         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1406         TrailZ = std::min(TrailZ,
1407                           unsigned(countTrailingZeros(TypeSize) +
1408                                    LocalKnown.countMinTrailingZeros()));
1409       }
1410     }
1411 
1412     Known.Zero.setLowBits(TrailZ);
1413     break;
1414   }
1415   case Instruction::PHI: {
1416     const PHINode *P = cast<PHINode>(I);
1417     // Handle the case of a simple two-predecessor recurrence PHI.
1418     // There's a lot more that could theoretically be done here, but
1419     // this is sufficient to catch some interesting cases.
1420     if (P->getNumIncomingValues() == 2) {
1421       for (unsigned i = 0; i != 2; ++i) {
1422         Value *L = P->getIncomingValue(i);
1423         Value *R = P->getIncomingValue(!i);
1424         Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1425         Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1426         Operator *LU = dyn_cast<Operator>(L);
1427         if (!LU)
1428           continue;
1429         unsigned Opcode = LU->getOpcode();
1430         // Check for operations that have the property that if
1431         // both their operands have low zero bits, the result
1432         // will have low zero bits.
1433         if (Opcode == Instruction::Add ||
1434             Opcode == Instruction::Sub ||
1435             Opcode == Instruction::And ||
1436             Opcode == Instruction::Or ||
1437             Opcode == Instruction::Mul) {
1438           Value *LL = LU->getOperand(0);
1439           Value *LR = LU->getOperand(1);
1440           // Find a recurrence.
1441           if (LL == I)
1442             L = LR;
1443           else if (LR == I)
1444             L = LL;
1445           else
1446             continue; // Check for recurrence with L and R flipped.
1447 
1448           // Change the context instruction to the "edge" that flows into the
1449           // phi. This is important because that is where the value is actually
1450           // "evaluated" even though it is used later somewhere else. (see also
1451           // D69571).
1452           Query RecQ = Q;
1453 
1454           // Ok, we have a PHI of the form L op= R. Check for low
1455           // zero bits.
1456           RecQ.CxtI = RInst;
1457           computeKnownBits(R, Known2, Depth + 1, RecQ);
1458 
1459           // We need to take the minimum number of known bits
1460           KnownBits Known3(Known);
1461           RecQ.CxtI = LInst;
1462           computeKnownBits(L, Known3, Depth + 1, RecQ);
1463 
1464           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1465                                          Known3.countMinTrailingZeros()));
1466 
1467           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1468           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1469             // If initial value of recurrence is nonnegative, and we are adding
1470             // a nonnegative number with nsw, the result can only be nonnegative
1471             // or poison value regardless of the number of times we execute the
1472             // add in phi recurrence. If initial value is negative and we are
1473             // adding a negative number with nsw, the result can only be
1474             // negative or poison value. Similar arguments apply to sub and mul.
1475             //
1476             // (add non-negative, non-negative) --> non-negative
1477             // (add negative, negative) --> negative
1478             if (Opcode == Instruction::Add) {
1479               if (Known2.isNonNegative() && Known3.isNonNegative())
1480                 Known.makeNonNegative();
1481               else if (Known2.isNegative() && Known3.isNegative())
1482                 Known.makeNegative();
1483             }
1484 
1485             // (sub nsw non-negative, negative) --> non-negative
1486             // (sub nsw negative, non-negative) --> negative
1487             else if (Opcode == Instruction::Sub && LL == I) {
1488               if (Known2.isNonNegative() && Known3.isNegative())
1489                 Known.makeNonNegative();
1490               else if (Known2.isNegative() && Known3.isNonNegative())
1491                 Known.makeNegative();
1492             }
1493 
1494             // (mul nsw non-negative, non-negative) --> non-negative
1495             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1496                      Known3.isNonNegative())
1497               Known.makeNonNegative();
1498           }
1499 
1500           break;
1501         }
1502       }
1503     }
1504 
1505     // Unreachable blocks may have zero-operand PHI nodes.
1506     if (P->getNumIncomingValues() == 0)
1507       break;
1508 
1509     // Otherwise take the unions of the known bit sets of the operands,
1510     // taking conservative care to avoid excessive recursion.
1511     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1512       // Skip if every incoming value references to ourself.
1513       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1514         break;
1515 
1516       Known.Zero.setAllBits();
1517       Known.One.setAllBits();
1518       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1519         Value *IncValue = P->getIncomingValue(u);
1520         // Skip direct self references.
1521         if (IncValue == P) continue;
1522 
1523         // Change the context instruction to the "edge" that flows into the
1524         // phi. This is important because that is where the value is actually
1525         // "evaluated" even though it is used later somewhere else. (see also
1526         // D69571).
1527         Query RecQ = Q;
1528         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1529 
1530         Known2 = KnownBits(BitWidth);
1531         // Recurse, but cap the recursion to one level, because we don't
1532         // want to waste time spinning around in loops.
1533         computeKnownBits(IncValue, Known2, MaxDepth - 1, RecQ);
1534         Known.Zero &= Known2.Zero;
1535         Known.One &= Known2.One;
1536         // If all bits have been ruled out, there's no need to check
1537         // more operands.
1538         if (!Known.Zero && !Known.One)
1539           break;
1540       }
1541     }
1542     break;
1543   }
1544   case Instruction::Call:
1545   case Instruction::Invoke:
1546     // If range metadata is attached to this call, set known bits from that,
1547     // and then intersect with known bits based on other properties of the
1548     // function.
1549     if (MDNode *MD =
1550             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1551       computeKnownBitsFromRangeMetadata(*MD, Known);
1552     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1553       computeKnownBits(RV, Known2, Depth + 1, Q);
1554       Known.Zero |= Known2.Zero;
1555       Known.One |= Known2.One;
1556     }
1557     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1558       switch (II->getIntrinsicID()) {
1559       default: break;
1560       case Intrinsic::bitreverse:
1561         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1562         Known.Zero |= Known2.Zero.reverseBits();
1563         Known.One |= Known2.One.reverseBits();
1564         break;
1565       case Intrinsic::bswap:
1566         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1567         Known.Zero |= Known2.Zero.byteSwap();
1568         Known.One |= Known2.One.byteSwap();
1569         break;
1570       case Intrinsic::ctlz: {
1571         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1572         // If we have a known 1, its position is our upper bound.
1573         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1574         // If this call is undefined for 0, the result will be less than 2^n.
1575         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1576           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1577         unsigned LowBits = Log2_32(PossibleLZ)+1;
1578         Known.Zero.setBitsFrom(LowBits);
1579         break;
1580       }
1581       case Intrinsic::cttz: {
1582         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1583         // If we have a known 1, its position is our upper bound.
1584         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1585         // If this call is undefined for 0, the result will be less than 2^n.
1586         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1587           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1588         unsigned LowBits = Log2_32(PossibleTZ)+1;
1589         Known.Zero.setBitsFrom(LowBits);
1590         break;
1591       }
1592       case Intrinsic::ctpop: {
1593         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1594         // We can bound the space the count needs.  Also, bits known to be zero
1595         // can't contribute to the population.
1596         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1597         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1598         Known.Zero.setBitsFrom(LowBits);
1599         // TODO: we could bound KnownOne using the lower bound on the number
1600         // of bits which might be set provided by popcnt KnownOne2.
1601         break;
1602       }
1603       case Intrinsic::fshr:
1604       case Intrinsic::fshl: {
1605         const APInt *SA;
1606         if (!match(I->getOperand(2), m_APInt(SA)))
1607           break;
1608 
1609         // Normalize to funnel shift left.
1610         uint64_t ShiftAmt = SA->urem(BitWidth);
1611         if (II->getIntrinsicID() == Intrinsic::fshr)
1612           ShiftAmt = BitWidth - ShiftAmt;
1613 
1614         KnownBits Known3(Known);
1615         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1616         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1617 
1618         Known.Zero =
1619             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1620         Known.One =
1621             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1622         break;
1623       }
1624       case Intrinsic::uadd_sat:
1625       case Intrinsic::usub_sat: {
1626         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1627         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1628         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1629 
1630         // Add: Leading ones of either operand are preserved.
1631         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1632         // as leading zeros in the result.
1633         unsigned LeadingKnown;
1634         if (IsAdd)
1635           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1636                                   Known2.countMinLeadingOnes());
1637         else
1638           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1639                                   Known2.countMinLeadingOnes());
1640 
1641         Known = KnownBits::computeForAddSub(
1642             IsAdd, /* NSW */ false, Known, Known2);
1643 
1644         // We select between the operation result and all-ones/zero
1645         // respectively, so we can preserve known ones/zeros.
1646         if (IsAdd) {
1647           Known.One.setHighBits(LeadingKnown);
1648           Known.Zero.clearAllBits();
1649         } else {
1650           Known.Zero.setHighBits(LeadingKnown);
1651           Known.One.clearAllBits();
1652         }
1653         break;
1654       }
1655       case Intrinsic::x86_sse42_crc32_64_64:
1656         Known.Zero.setBitsFrom(32);
1657         break;
1658       }
1659     }
1660     break;
1661   case Instruction::ExtractElement:
1662     // Look through extract element. At the moment we keep this simple and skip
1663     // tracking the specific element. But at least we might find information
1664     // valid for all elements of the vector (for example if vector is sign
1665     // extended, shifted, etc).
1666     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1667     break;
1668   case Instruction::ExtractValue:
1669     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1670       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1671       if (EVI->getNumIndices() != 1) break;
1672       if (EVI->getIndices()[0] == 0) {
1673         switch (II->getIntrinsicID()) {
1674         default: break;
1675         case Intrinsic::uadd_with_overflow:
1676         case Intrinsic::sadd_with_overflow:
1677           computeKnownBitsAddSub(true, II->getArgOperand(0),
1678                                  II->getArgOperand(1), false, Known, Known2,
1679                                  Depth, Q);
1680           break;
1681         case Intrinsic::usub_with_overflow:
1682         case Intrinsic::ssub_with_overflow:
1683           computeKnownBitsAddSub(false, II->getArgOperand(0),
1684                                  II->getArgOperand(1), false, Known, Known2,
1685                                  Depth, Q);
1686           break;
1687         case Intrinsic::umul_with_overflow:
1688         case Intrinsic::smul_with_overflow:
1689           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1690                               Known, Known2, Depth, Q);
1691           break;
1692         }
1693       }
1694     }
1695   }
1696 }
1697 
1698 /// Determine which bits of V are known to be either zero or one and return
1699 /// them.
1700 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1701   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1702   computeKnownBits(V, Known, Depth, Q);
1703   return Known;
1704 }
1705 
1706 /// Determine which bits of V are known to be either zero or one and return
1707 /// them in the Known bit set.
1708 ///
1709 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1710 /// we cannot optimize based on the assumption that it is zero without changing
1711 /// it to be an explicit zero.  If we don't change it to zero, other code could
1712 /// optimized based on the contradictory assumption that it is non-zero.
1713 /// Because instcombine aggressively folds operations with undef args anyway,
1714 /// this won't lose us code quality.
1715 ///
1716 /// This function is defined on values with integer type, values with pointer
1717 /// type, and vectors of integers.  In the case
1718 /// where V is a vector, known zero, and known one values are the
1719 /// same width as the vector element, and the bit is set only if it is true
1720 /// for all of the elements in the vector.
1721 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1722                       const Query &Q) {
1723   assert(V && "No Value?");
1724   assert(Depth <= MaxDepth && "Limit Search Depth");
1725   unsigned BitWidth = Known.getBitWidth();
1726 
1727   assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1728           V->getType()->isPtrOrPtrVectorTy()) &&
1729          "Not integer or pointer type!");
1730 
1731   Type *ScalarTy = V->getType()->getScalarType();
1732   unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1733     Q.DL.getPointerTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1734   assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1735   (void)BitWidth;
1736   (void)ExpectedWidth;
1737 
1738   const APInt *C;
1739   if (match(V, m_APInt(C))) {
1740     // We know all of the bits for a scalar constant or a splat vector constant!
1741     Known.One = *C;
1742     Known.Zero = ~Known.One;
1743     return;
1744   }
1745   // Null and aggregate-zero are all-zeros.
1746   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1747     Known.setAllZero();
1748     return;
1749   }
1750   // Handle a constant vector by taking the intersection of the known bits of
1751   // each element.
1752   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1753     // We know that CDS must be a vector of integers. Take the intersection of
1754     // each element.
1755     Known.Zero.setAllBits(); Known.One.setAllBits();
1756     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1757       APInt Elt = CDS->getElementAsAPInt(i);
1758       Known.Zero &= ~Elt;
1759       Known.One &= Elt;
1760     }
1761     return;
1762   }
1763 
1764   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1765     // We know that CV must be a vector of integers. Take the intersection of
1766     // each element.
1767     Known.Zero.setAllBits(); Known.One.setAllBits();
1768     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1769       Constant *Element = CV->getAggregateElement(i);
1770       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1771       if (!ElementCI) {
1772         Known.resetAll();
1773         return;
1774       }
1775       const APInt &Elt = ElementCI->getValue();
1776       Known.Zero &= ~Elt;
1777       Known.One &= Elt;
1778     }
1779     return;
1780   }
1781 
1782   // Start out not knowing anything.
1783   Known.resetAll();
1784 
1785   // We can't imply anything about undefs.
1786   if (isa<UndefValue>(V))
1787     return;
1788 
1789   // There's no point in looking through other users of ConstantData for
1790   // assumptions.  Confirm that we've handled them all.
1791   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1792 
1793   // Limit search depth.
1794   // All recursive calls that increase depth must come after this.
1795   if (Depth == MaxDepth)
1796     return;
1797 
1798   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1799   // the bits of its aliasee.
1800   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1801     if (!GA->isInterposable())
1802       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1803     return;
1804   }
1805 
1806   if (const Operator *I = dyn_cast<Operator>(V))
1807     computeKnownBitsFromOperator(I, Known, Depth, Q);
1808 
1809   // Aligned pointers have trailing zeros - refine Known.Zero set
1810   if (V->getType()->isPointerTy()) {
1811     const MaybeAlign Align = V->getPointerAlignment(Q.DL);
1812     if (Align)
1813       Known.Zero.setLowBits(countTrailingZeros(Align->value()));
1814   }
1815 
1816   // computeKnownBitsFromAssume strictly refines Known.
1817   // Therefore, we run them after computeKnownBitsFromOperator.
1818 
1819   // Check whether a nearby assume intrinsic can determine some known bits.
1820   computeKnownBitsFromAssume(V, Known, Depth, Q);
1821 
1822   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1823 }
1824 
1825 /// Return true if the given value is known to have exactly one
1826 /// bit set when defined. For vectors return true if every element is known to
1827 /// be a power of two when defined. Supports values with integer or pointer
1828 /// types and vectors of integers.
1829 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1830                             const Query &Q) {
1831   assert(Depth <= MaxDepth && "Limit Search Depth");
1832 
1833   // Attempt to match against constants.
1834   if (OrZero && match(V, m_Power2OrZero()))
1835       return true;
1836   if (match(V, m_Power2()))
1837       return true;
1838 
1839   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1840   // it is shifted off the end then the result is undefined.
1841   if (match(V, m_Shl(m_One(), m_Value())))
1842     return true;
1843 
1844   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1845   // the bottom.  If it is shifted off the bottom then the result is undefined.
1846   if (match(V, m_LShr(m_SignMask(), m_Value())))
1847     return true;
1848 
1849   // The remaining tests are all recursive, so bail out if we hit the limit.
1850   if (Depth++ == MaxDepth)
1851     return false;
1852 
1853   Value *X = nullptr, *Y = nullptr;
1854   // A shift left or a logical shift right of a power of two is a power of two
1855   // or zero.
1856   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1857                  match(V, m_LShr(m_Value(X), m_Value()))))
1858     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1859 
1860   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1861     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1862 
1863   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1864     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1865            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1866 
1867   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1868     // A power of two and'd with anything is a power of two or zero.
1869     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1870         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1871       return true;
1872     // X & (-X) is always a power of two or zero.
1873     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1874       return true;
1875     return false;
1876   }
1877 
1878   // Adding a power-of-two or zero to the same power-of-two or zero yields
1879   // either the original power-of-two, a larger power-of-two or zero.
1880   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1881     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1882     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1883         Q.IIQ.hasNoSignedWrap(VOBO)) {
1884       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1885           match(X, m_And(m_Value(), m_Specific(Y))))
1886         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1887           return true;
1888       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1889           match(Y, m_And(m_Value(), m_Specific(X))))
1890         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1891           return true;
1892 
1893       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1894       KnownBits LHSBits(BitWidth);
1895       computeKnownBits(X, LHSBits, Depth, Q);
1896 
1897       KnownBits RHSBits(BitWidth);
1898       computeKnownBits(Y, RHSBits, Depth, Q);
1899       // If i8 V is a power of two or zero:
1900       //  ZeroBits: 1 1 1 0 1 1 1 1
1901       // ~ZeroBits: 0 0 0 1 0 0 0 0
1902       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1903         // If OrZero isn't set, we cannot give back a zero result.
1904         // Make sure either the LHS or RHS has a bit set.
1905         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1906           return true;
1907     }
1908   }
1909 
1910   // An exact divide or right shift can only shift off zero bits, so the result
1911   // is a power of two only if the first operand is a power of two and not
1912   // copying a sign bit (sdiv int_min, 2).
1913   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1914       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1915     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1916                                   Depth, Q);
1917   }
1918 
1919   return false;
1920 }
1921 
1922 /// Test whether a GEP's result is known to be non-null.
1923 ///
1924 /// Uses properties inherent in a GEP to try to determine whether it is known
1925 /// to be non-null.
1926 ///
1927 /// Currently this routine does not support vector GEPs.
1928 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1929                               const Query &Q) {
1930   const Function *F = nullptr;
1931   if (const Instruction *I = dyn_cast<Instruction>(GEP))
1932     F = I->getFunction();
1933 
1934   if (!GEP->isInBounds() ||
1935       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
1936     return false;
1937 
1938   // FIXME: Support vector-GEPs.
1939   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1940 
1941   // If the base pointer is non-null, we cannot walk to a null address with an
1942   // inbounds GEP in address space zero.
1943   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1944     return true;
1945 
1946   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1947   // If so, then the GEP cannot produce a null pointer, as doing so would
1948   // inherently violate the inbounds contract within address space zero.
1949   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1950        GTI != GTE; ++GTI) {
1951     // Struct types are easy -- they must always be indexed by a constant.
1952     if (StructType *STy = GTI.getStructTypeOrNull()) {
1953       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1954       unsigned ElementIdx = OpC->getZExtValue();
1955       const StructLayout *SL = Q.DL.getStructLayout(STy);
1956       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1957       if (ElementOffset > 0)
1958         return true;
1959       continue;
1960     }
1961 
1962     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1963     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1964       continue;
1965 
1966     // Fast path the constant operand case both for efficiency and so we don't
1967     // increment Depth when just zipping down an all-constant GEP.
1968     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1969       if (!OpC->isZero())
1970         return true;
1971       continue;
1972     }
1973 
1974     // We post-increment Depth here because while isKnownNonZero increments it
1975     // as well, when we pop back up that increment won't persist. We don't want
1976     // to recurse 10k times just because we have 10k GEP operands. We don't
1977     // bail completely out because we want to handle constant GEPs regardless
1978     // of depth.
1979     if (Depth++ >= MaxDepth)
1980       continue;
1981 
1982     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1983       return true;
1984   }
1985 
1986   return false;
1987 }
1988 
1989 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1990                                                   const Instruction *CtxI,
1991                                                   const DominatorTree *DT) {
1992   if (isa<Constant>(V))
1993     return false;
1994 
1995   if (!CtxI || !DT)
1996     return false;
1997 
1998   unsigned NumUsesExplored = 0;
1999   for (auto *U : V->users()) {
2000     // Avoid massive lists
2001     if (NumUsesExplored >= DomConditionsMaxUses)
2002       break;
2003     NumUsesExplored++;
2004 
2005     // If the value is used as an argument to a call or invoke, then argument
2006     // attributes may provide an answer about null-ness.
2007     if (auto CS = ImmutableCallSite(U))
2008       if (auto *CalledFunc = CS.getCalledFunction())
2009         for (const Argument &Arg : CalledFunc->args())
2010           if (CS.getArgOperand(Arg.getArgNo()) == V &&
2011               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
2012             return true;
2013 
2014     // If the value is used as a load/store, then the pointer must be non null.
2015     if (V == getLoadStorePointerOperand(U)) {
2016       const Instruction *I = cast<Instruction>(U);
2017       if (!NullPointerIsDefined(I->getFunction(),
2018                                 V->getType()->getPointerAddressSpace()) &&
2019           DT->dominates(I, CtxI))
2020         return true;
2021     }
2022 
2023     // Consider only compare instructions uniquely controlling a branch
2024     CmpInst::Predicate Pred;
2025     if (!match(const_cast<User *>(U),
2026                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
2027         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
2028       continue;
2029 
2030     SmallVector<const User *, 4> WorkList;
2031     SmallPtrSet<const User *, 4> Visited;
2032     for (auto *CmpU : U->users()) {
2033       assert(WorkList.empty() && "Should be!");
2034       if (Visited.insert(CmpU).second)
2035         WorkList.push_back(CmpU);
2036 
2037       while (!WorkList.empty()) {
2038         auto *Curr = WorkList.pop_back_val();
2039 
2040         // If a user is an AND, add all its users to the work list. We only
2041         // propagate "pred != null" condition through AND because it is only
2042         // correct to assume that all conditions of AND are met in true branch.
2043         // TODO: Support similar logic of OR and EQ predicate?
2044         if (Pred == ICmpInst::ICMP_NE)
2045           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
2046             if (BO->getOpcode() == Instruction::And) {
2047               for (auto *BOU : BO->users())
2048                 if (Visited.insert(BOU).second)
2049                   WorkList.push_back(BOU);
2050               continue;
2051             }
2052 
2053         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2054           assert(BI->isConditional() && "uses a comparison!");
2055 
2056           BasicBlock *NonNullSuccessor =
2057               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
2058           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2059           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2060             return true;
2061         } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
2062                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2063           return true;
2064         }
2065       }
2066     }
2067   }
2068 
2069   return false;
2070 }
2071 
2072 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2073 /// ensure that the value it's attached to is never Value?  'RangeType' is
2074 /// is the type of the value described by the range.
2075 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2076   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2077   assert(NumRanges >= 1);
2078   for (unsigned i = 0; i < NumRanges; ++i) {
2079     ConstantInt *Lower =
2080         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2081     ConstantInt *Upper =
2082         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2083     ConstantRange Range(Lower->getValue(), Upper->getValue());
2084     if (Range.contains(Value))
2085       return false;
2086   }
2087   return true;
2088 }
2089 
2090 /// Return true if the given value is known to be non-zero when defined. For
2091 /// vectors, return true if every element is known to be non-zero when
2092 /// defined. For pointers, if the context instruction and dominator tree are
2093 /// specified, perform context-sensitive analysis and return true if the
2094 /// pointer couldn't possibly be null at the specified instruction.
2095 /// Supports values with integer or pointer type and vectors of integers.
2096 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
2097   if (auto *C = dyn_cast<Constant>(V)) {
2098     if (C->isNullValue())
2099       return false;
2100     if (isa<ConstantInt>(C))
2101       // Must be non-zero due to null test above.
2102       return true;
2103 
2104     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2105       // See the comment for IntToPtr/PtrToInt instructions below.
2106       if (CE->getOpcode() == Instruction::IntToPtr ||
2107           CE->getOpcode() == Instruction::PtrToInt)
2108         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2109             Q.DL.getTypeSizeInBits(CE->getType()))
2110           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2111     }
2112 
2113     // For constant vectors, check that all elements are undefined or known
2114     // non-zero to determine that the whole vector is known non-zero.
2115     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2116       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2117         Constant *Elt = C->getAggregateElement(i);
2118         if (!Elt || Elt->isNullValue())
2119           return false;
2120         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2121           return false;
2122       }
2123       return true;
2124     }
2125 
2126     // A global variable in address space 0 is non null unless extern weak
2127     // or an absolute symbol reference. Other address spaces may have null as a
2128     // valid address for a global, so we can't assume anything.
2129     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2130       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2131           GV->getType()->getAddressSpace() == 0)
2132         return true;
2133     } else
2134       return false;
2135   }
2136 
2137   if (auto *I = dyn_cast<Instruction>(V)) {
2138     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2139       // If the possible ranges don't contain zero, then the value is
2140       // definitely non-zero.
2141       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2142         const APInt ZeroValue(Ty->getBitWidth(), 0);
2143         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2144           return true;
2145       }
2146     }
2147   }
2148 
2149   if (isKnownNonZeroFromAssume(V, Q))
2150     return true;
2151 
2152   // Some of the tests below are recursive, so bail out if we hit the limit.
2153   if (Depth++ >= MaxDepth)
2154     return false;
2155 
2156   // Check for pointer simplifications.
2157   if (V->getType()->isPointerTy()) {
2158     // Alloca never returns null, malloc might.
2159     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2160       return true;
2161 
2162     // A byval, inalloca, or nonnull argument is never null.
2163     if (const Argument *A = dyn_cast<Argument>(V))
2164       if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2165         return true;
2166 
2167     // A Load tagged with nonnull metadata is never null.
2168     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2169       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2170         return true;
2171 
2172     if (const auto *Call = dyn_cast<CallBase>(V)) {
2173       if (Call->isReturnNonNull())
2174         return true;
2175       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2176         return isKnownNonZero(RP, Depth, Q);
2177     }
2178   }
2179 
2180   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2181     return true;
2182 
2183   // Check for recursive pointer simplifications.
2184   if (V->getType()->isPointerTy()) {
2185     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2186     // do not alter the value, or at least not the nullness property of the
2187     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2188     //
2189     // Note that we have to take special care to avoid looking through
2190     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2191     // as casts that can alter the value, e.g., AddrSpaceCasts.
2192     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2193       if (isGEPKnownNonNull(GEP, Depth, Q))
2194         return true;
2195 
2196     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2197       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2198 
2199     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2200       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2201           Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2202         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2203   }
2204 
2205   // Similar to int2ptr above, we can look through ptr2int here if the cast
2206   // is a no-op or an extend and not a truncate.
2207   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2208     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2209         Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2210       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2211 
2212   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2213 
2214   // X | Y != 0 if X != 0 or Y != 0.
2215   Value *X = nullptr, *Y = nullptr;
2216   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2217     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2218 
2219   // ext X != 0 if X != 0.
2220   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2221     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2222 
2223   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2224   // if the lowest bit is shifted off the end.
2225   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2226     // shl nuw can't remove any non-zero bits.
2227     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2228     if (Q.IIQ.hasNoUnsignedWrap(BO))
2229       return isKnownNonZero(X, Depth, Q);
2230 
2231     KnownBits Known(BitWidth);
2232     computeKnownBits(X, Known, Depth, Q);
2233     if (Known.One[0])
2234       return true;
2235   }
2236   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2237   // defined if the sign bit is shifted off the end.
2238   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2239     // shr exact can only shift out zero bits.
2240     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2241     if (BO->isExact())
2242       return isKnownNonZero(X, Depth, Q);
2243 
2244     KnownBits Known = computeKnownBits(X, Depth, Q);
2245     if (Known.isNegative())
2246       return true;
2247 
2248     // If the shifter operand is a constant, and all of the bits shifted
2249     // out are known to be zero, and X is known non-zero then at least one
2250     // non-zero bit must remain.
2251     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2252       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2253       // Is there a known one in the portion not shifted out?
2254       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2255         return true;
2256       // Are all the bits to be shifted out known zero?
2257       if (Known.countMinTrailingZeros() >= ShiftVal)
2258         return isKnownNonZero(X, Depth, Q);
2259     }
2260   }
2261   // div exact can only produce a zero if the dividend is zero.
2262   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2263     return isKnownNonZero(X, Depth, Q);
2264   }
2265   // X + Y.
2266   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2267     KnownBits XKnown = computeKnownBits(X, Depth, Q);
2268     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2269 
2270     // If X and Y are both non-negative (as signed values) then their sum is not
2271     // zero unless both X and Y are zero.
2272     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2273       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2274         return true;
2275 
2276     // If X and Y are both negative (as signed values) then their sum is not
2277     // zero unless both X and Y equal INT_MIN.
2278     if (XKnown.isNegative() && YKnown.isNegative()) {
2279       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2280       // The sign bit of X is set.  If some other bit is set then X is not equal
2281       // to INT_MIN.
2282       if (XKnown.One.intersects(Mask))
2283         return true;
2284       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2285       // to INT_MIN.
2286       if (YKnown.One.intersects(Mask))
2287         return true;
2288     }
2289 
2290     // The sum of a non-negative number and a power of two is not zero.
2291     if (XKnown.isNonNegative() &&
2292         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2293       return true;
2294     if (YKnown.isNonNegative() &&
2295         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2296       return true;
2297   }
2298   // X * Y.
2299   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2300     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2301     // If X and Y are non-zero then so is X * Y as long as the multiplication
2302     // does not overflow.
2303     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2304         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2305       return true;
2306   }
2307   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2308   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2309     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2310         isKnownNonZero(SI->getFalseValue(), Depth, Q))
2311       return true;
2312   }
2313   // PHI
2314   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2315     // Try and detect a recurrence that monotonically increases from a
2316     // starting value, as these are common as induction variables.
2317     if (PN->getNumIncomingValues() == 2) {
2318       Value *Start = PN->getIncomingValue(0);
2319       Value *Induction = PN->getIncomingValue(1);
2320       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2321         std::swap(Start, Induction);
2322       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2323         if (!C->isZero() && !C->isNegative()) {
2324           ConstantInt *X;
2325           if (Q.IIQ.UseInstrInfo &&
2326               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2327                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2328               !X->isNegative())
2329             return true;
2330         }
2331       }
2332     }
2333     // Check if all incoming values are non-zero constant.
2334     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2335       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2336     });
2337     if (AllNonZeroConstants)
2338       return true;
2339   }
2340 
2341   KnownBits Known(BitWidth);
2342   computeKnownBits(V, Known, Depth, Q);
2343   return Known.One != 0;
2344 }
2345 
2346 /// Return true if V2 == V1 + X, where X is known non-zero.
2347 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2348   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2349   if (!BO || BO->getOpcode() != Instruction::Add)
2350     return false;
2351   Value *Op = nullptr;
2352   if (V2 == BO->getOperand(0))
2353     Op = BO->getOperand(1);
2354   else if (V2 == BO->getOperand(1))
2355     Op = BO->getOperand(0);
2356   else
2357     return false;
2358   return isKnownNonZero(Op, 0, Q);
2359 }
2360 
2361 /// Return true if it is known that V1 != V2.
2362 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2363   if (V1 == V2)
2364     return false;
2365   if (V1->getType() != V2->getType())
2366     // We can't look through casts yet.
2367     return false;
2368   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2369     return true;
2370 
2371   if (V1->getType()->isIntOrIntVectorTy()) {
2372     // Are any known bits in V1 contradictory to known bits in V2? If V1
2373     // has a known zero where V2 has a known one, they must not be equal.
2374     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2375     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2376 
2377     if (Known1.Zero.intersects(Known2.One) ||
2378         Known2.Zero.intersects(Known1.One))
2379       return true;
2380   }
2381   return false;
2382 }
2383 
2384 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2385 /// simplify operations downstream. Mask is known to be zero for bits that V
2386 /// cannot have.
2387 ///
2388 /// This function is defined on values with integer type, values with pointer
2389 /// type, and vectors of integers.  In the case
2390 /// where V is a vector, the mask, known zero, and known one values are the
2391 /// same width as the vector element, and the bit is set only if it is true
2392 /// for all of the elements in the vector.
2393 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2394                        const Query &Q) {
2395   KnownBits Known(Mask.getBitWidth());
2396   computeKnownBits(V, Known, Depth, Q);
2397   return Mask.isSubsetOf(Known.Zero);
2398 }
2399 
2400 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2401 // Returns the input and lower/upper bounds.
2402 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2403                                 const APInt *&CLow, const APInt *&CHigh) {
2404   assert(isa<Operator>(Select) &&
2405          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2406          "Input should be a Select!");
2407 
2408   const Value *LHS = nullptr, *RHS = nullptr;
2409   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2410   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2411     return false;
2412 
2413   if (!match(RHS, m_APInt(CLow)))
2414     return false;
2415 
2416   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2417   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2418   if (getInverseMinMaxFlavor(SPF) != SPF2)
2419     return false;
2420 
2421   if (!match(RHS2, m_APInt(CHigh)))
2422     return false;
2423 
2424   if (SPF == SPF_SMIN)
2425     std::swap(CLow, CHigh);
2426 
2427   In = LHS2;
2428   return CLow->sle(*CHigh);
2429 }
2430 
2431 /// For vector constants, loop over the elements and find the constant with the
2432 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2433 /// or if any element was not analyzed; otherwise, return the count for the
2434 /// element with the minimum number of sign bits.
2435 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2436                                                  unsigned TyBits) {
2437   const auto *CV = dyn_cast<Constant>(V);
2438   if (!CV || !CV->getType()->isVectorTy())
2439     return 0;
2440 
2441   unsigned MinSignBits = TyBits;
2442   unsigned NumElts = CV->getType()->getVectorNumElements();
2443   for (unsigned i = 0; i != NumElts; ++i) {
2444     // If we find a non-ConstantInt, bail out.
2445     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2446     if (!Elt)
2447       return 0;
2448 
2449     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2450   }
2451 
2452   return MinSignBits;
2453 }
2454 
2455 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2456                                        const Query &Q);
2457 
2458 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2459                                    const Query &Q) {
2460   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2461   assert(Result > 0 && "At least one sign bit needs to be present!");
2462   return Result;
2463 }
2464 
2465 /// Return the number of times the sign bit of the register is replicated into
2466 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2467 /// (itself), but other cases can give us information. For example, immediately
2468 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2469 /// other, so we return 3. For vectors, return the number of sign bits for the
2470 /// vector element with the minimum number of known sign bits.
2471 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2472                                        const Query &Q) {
2473   assert(Depth <= MaxDepth && "Limit Search Depth");
2474 
2475   // We return the minimum number of sign bits that are guaranteed to be present
2476   // in V, so for undef we have to conservatively return 1.  We don't have the
2477   // same behavior for poison though -- that's a FIXME today.
2478 
2479   Type *ScalarTy = V->getType()->getScalarType();
2480   unsigned TyBits = ScalarTy->isPointerTy() ?
2481     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2482     Q.DL.getTypeSizeInBits(ScalarTy);
2483 
2484   unsigned Tmp, Tmp2;
2485   unsigned FirstAnswer = 1;
2486 
2487   // Note that ConstantInt is handled by the general computeKnownBits case
2488   // below.
2489 
2490   if (Depth == MaxDepth)
2491     return 1;  // Limit search depth.
2492 
2493   if (auto *U = dyn_cast<Operator>(V)) {
2494     switch (Operator::getOpcode(V)) {
2495     default: break;
2496     case Instruction::SExt:
2497       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2498       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2499 
2500     case Instruction::SDiv: {
2501       const APInt *Denominator;
2502       // sdiv X, C -> adds log(C) sign bits.
2503       if (match(U->getOperand(1), m_APInt(Denominator))) {
2504 
2505         // Ignore non-positive denominator.
2506         if (!Denominator->isStrictlyPositive())
2507           break;
2508 
2509         // Calculate the incoming numerator bits.
2510         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2511 
2512         // Add floor(log(C)) bits to the numerator bits.
2513         return std::min(TyBits, NumBits + Denominator->logBase2());
2514       }
2515       break;
2516     }
2517 
2518     case Instruction::SRem: {
2519       const APInt *Denominator;
2520       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2521       // positive constant.  This let us put a lower bound on the number of sign
2522       // bits.
2523       if (match(U->getOperand(1), m_APInt(Denominator))) {
2524 
2525         // Ignore non-positive denominator.
2526         if (!Denominator->isStrictlyPositive())
2527           break;
2528 
2529         // Calculate the incoming numerator bits. SRem by a positive constant
2530         // can't lower the number of sign bits.
2531         unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2532 
2533         // Calculate the leading sign bit constraints by examining the
2534         // denominator.  Given that the denominator is positive, there are two
2535         // cases:
2536         //
2537         //  1. the numerator is positive. The result range is [0,C) and [0,C) u<
2538         //     (1 << ceilLogBase2(C)).
2539         //
2540         //  2. the numerator is negative. Then the result range is (-C,0] and
2541         //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2542         //
2543         // Thus a lower bound on the number of sign bits is `TyBits -
2544         // ceilLogBase2(C)`.
2545 
2546         unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2547         return std::max(NumrBits, ResBits);
2548       }
2549       break;
2550     }
2551 
2552     case Instruction::AShr: {
2553       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2554       // ashr X, C   -> adds C sign bits.  Vectors too.
2555       const APInt *ShAmt;
2556       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2557         if (ShAmt->uge(TyBits))
2558           break; // Bad shift.
2559         unsigned ShAmtLimited = ShAmt->getZExtValue();
2560         Tmp += ShAmtLimited;
2561         if (Tmp > TyBits) Tmp = TyBits;
2562       }
2563       return Tmp;
2564     }
2565     case Instruction::Shl: {
2566       const APInt *ShAmt;
2567       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2568         // shl destroys sign bits.
2569         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2570         if (ShAmt->uge(TyBits) ||   // Bad shift.
2571             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2572         Tmp2 = ShAmt->getZExtValue();
2573         return Tmp - Tmp2;
2574       }
2575       break;
2576     }
2577     case Instruction::And:
2578     case Instruction::Or:
2579     case Instruction::Xor: // NOT is handled here.
2580       // Logical binary ops preserve the number of sign bits at the worst.
2581       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2582       if (Tmp != 1) {
2583         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2584         FirstAnswer = std::min(Tmp, Tmp2);
2585         // We computed what we know about the sign bits as our first
2586         // answer. Now proceed to the generic code that uses
2587         // computeKnownBits, and pick whichever answer is better.
2588       }
2589       break;
2590 
2591     case Instruction::Select: {
2592       // If we have a clamp pattern, we know that the number of sign bits will
2593       // be the minimum of the clamp min/max range.
2594       const Value *X;
2595       const APInt *CLow, *CHigh;
2596       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2597         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2598 
2599       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2600       if (Tmp == 1) break;
2601       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2602       return std::min(Tmp, Tmp2);
2603     }
2604 
2605     case Instruction::Add:
2606       // Add can have at most one carry bit.  Thus we know that the output
2607       // is, at worst, one more bit than the inputs.
2608       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2609       if (Tmp == 1) break;
2610 
2611       // Special case decrementing a value (ADD X, -1):
2612       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2613         if (CRHS->isAllOnesValue()) {
2614           KnownBits Known(TyBits);
2615           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2616 
2617           // If the input is known to be 0 or 1, the output is 0/-1, which is
2618           // all sign bits set.
2619           if ((Known.Zero | 1).isAllOnesValue())
2620             return TyBits;
2621 
2622           // If we are subtracting one from a positive number, there is no carry
2623           // out of the result.
2624           if (Known.isNonNegative())
2625             return Tmp;
2626         }
2627 
2628       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2629       if (Tmp2 == 1) break;
2630       return std::min(Tmp, Tmp2) - 1;
2631 
2632     case Instruction::Sub:
2633       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2634       if (Tmp2 == 1) break;
2635 
2636       // Handle NEG.
2637       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2638         if (CLHS->isNullValue()) {
2639           KnownBits Known(TyBits);
2640           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2641           // If the input is known to be 0 or 1, the output is 0/-1, which is
2642           // all sign bits set.
2643           if ((Known.Zero | 1).isAllOnesValue())
2644             return TyBits;
2645 
2646           // If the input is known to be positive (the sign bit is known clear),
2647           // the output of the NEG has the same number of sign bits as the
2648           // input.
2649           if (Known.isNonNegative())
2650             return Tmp2;
2651 
2652           // Otherwise, we treat this like a SUB.
2653         }
2654 
2655       // Sub can have at most one carry bit.  Thus we know that the output
2656       // is, at worst, one more bit than the inputs.
2657       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2658       if (Tmp == 1) break;
2659       return std::min(Tmp, Tmp2) - 1;
2660 
2661     case Instruction::Mul: {
2662       // The output of the Mul can be at most twice the valid bits in the
2663       // inputs.
2664       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2665       if (SignBitsOp0 == 1) break;
2666       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2667       if (SignBitsOp1 == 1) break;
2668       unsigned OutValidBits =
2669           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2670       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2671     }
2672 
2673     case Instruction::PHI: {
2674       const PHINode *PN = cast<PHINode>(U);
2675       unsigned NumIncomingValues = PN->getNumIncomingValues();
2676       // Don't analyze large in-degree PHIs.
2677       if (NumIncomingValues > 4) break;
2678       // Unreachable blocks may have zero-operand PHI nodes.
2679       if (NumIncomingValues == 0) break;
2680 
2681       // Take the minimum of all incoming values.  This can't infinitely loop
2682       // because of our depth threshold.
2683       Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2684       for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2685         if (Tmp == 1) return Tmp;
2686         Tmp = std::min(
2687             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2688       }
2689       return Tmp;
2690     }
2691 
2692     case Instruction::Trunc:
2693       // FIXME: it's tricky to do anything useful for this, but it is an
2694       // important case for targets like X86.
2695       break;
2696 
2697     case Instruction::ExtractElement:
2698       // Look through extract element. At the moment we keep this simple and
2699       // skip tracking the specific element. But at least we might find
2700       // information valid for all elements of the vector (for example if vector
2701       // is sign extended, shifted, etc).
2702       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2703 
2704     case Instruction::ShuffleVector: {
2705       // TODO: This is copied almost directly from the SelectionDAG version of
2706       //       ComputeNumSignBits. It would be better if we could share common
2707       //       code. If not, make sure that changes are translated to the DAG.
2708 
2709       // Collect the minimum number of sign bits that are shared by every vector
2710       // element referenced by the shuffle.
2711       auto *Shuf = cast<ShuffleVectorInst>(U);
2712       int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2713       int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2714       APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2715       for (int i = 0; i != NumMaskElts; ++i) {
2716         int M = Shuf->getMaskValue(i);
2717         assert(M < NumElts * 2 && "Invalid shuffle mask constant");
2718         // For undef elements, we don't know anything about the common state of
2719         // the shuffle result.
2720         if (M == -1)
2721           return 1;
2722         if (M < NumElts)
2723           DemandedLHS.setBit(M % NumElts);
2724         else
2725           DemandedRHS.setBit(M % NumElts);
2726       }
2727       Tmp = std::numeric_limits<unsigned>::max();
2728       if (!!DemandedLHS)
2729         Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2730       if (!!DemandedRHS) {
2731         Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2732         Tmp = std::min(Tmp, Tmp2);
2733       }
2734       // If we don't know anything, early out and try computeKnownBits
2735       // fall-back.
2736       if (Tmp == 1)
2737         break;
2738       assert(Tmp <= V->getType()->getScalarSizeInBits() &&
2739              "Failed to determine minimum sign bits");
2740       return Tmp;
2741     }
2742     }
2743   }
2744 
2745   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2746   // use this information.
2747 
2748   // If we can examine all elements of a vector constant successfully, we're
2749   // done (we can't do any better than that). If not, keep trying.
2750   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2751     return VecSignBits;
2752 
2753   KnownBits Known(TyBits);
2754   computeKnownBits(V, Known, Depth, Q);
2755 
2756   // If we know that the sign bit is either zero or one, determine the number of
2757   // identical bits in the top of the input value.
2758   return std::max(FirstAnswer, Known.countMinSignBits());
2759 }
2760 
2761 /// This function computes the integer multiple of Base that equals V.
2762 /// If successful, it returns true and returns the multiple in
2763 /// Multiple. If unsuccessful, it returns false. It looks
2764 /// through SExt instructions only if LookThroughSExt is true.
2765 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2766                            bool LookThroughSExt, unsigned Depth) {
2767   assert(V && "No Value?");
2768   assert(Depth <= MaxDepth && "Limit Search Depth");
2769   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2770 
2771   Type *T = V->getType();
2772 
2773   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2774 
2775   if (Base == 0)
2776     return false;
2777 
2778   if (Base == 1) {
2779     Multiple = V;
2780     return true;
2781   }
2782 
2783   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2784   Constant *BaseVal = ConstantInt::get(T, Base);
2785   if (CO && CO == BaseVal) {
2786     // Multiple is 1.
2787     Multiple = ConstantInt::get(T, 1);
2788     return true;
2789   }
2790 
2791   if (CI && CI->getZExtValue() % Base == 0) {
2792     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2793     return true;
2794   }
2795 
2796   if (Depth == MaxDepth) return false;  // Limit search depth.
2797 
2798   Operator *I = dyn_cast<Operator>(V);
2799   if (!I) return false;
2800 
2801   switch (I->getOpcode()) {
2802   default: break;
2803   case Instruction::SExt:
2804     if (!LookThroughSExt) return false;
2805     // otherwise fall through to ZExt
2806     LLVM_FALLTHROUGH;
2807   case Instruction::ZExt:
2808     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2809                            LookThroughSExt, Depth+1);
2810   case Instruction::Shl:
2811   case Instruction::Mul: {
2812     Value *Op0 = I->getOperand(0);
2813     Value *Op1 = I->getOperand(1);
2814 
2815     if (I->getOpcode() == Instruction::Shl) {
2816       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2817       if (!Op1CI) return false;
2818       // Turn Op0 << Op1 into Op0 * 2^Op1
2819       APInt Op1Int = Op1CI->getValue();
2820       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2821       APInt API(Op1Int.getBitWidth(), 0);
2822       API.setBit(BitToSet);
2823       Op1 = ConstantInt::get(V->getContext(), API);
2824     }
2825 
2826     Value *Mul0 = nullptr;
2827     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2828       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2829         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2830           if (Op1C->getType()->getPrimitiveSizeInBits() <
2831               MulC->getType()->getPrimitiveSizeInBits())
2832             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2833           if (Op1C->getType()->getPrimitiveSizeInBits() >
2834               MulC->getType()->getPrimitiveSizeInBits())
2835             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2836 
2837           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2838           Multiple = ConstantExpr::getMul(MulC, Op1C);
2839           return true;
2840         }
2841 
2842       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2843         if (Mul0CI->getValue() == 1) {
2844           // V == Base * Op1, so return Op1
2845           Multiple = Op1;
2846           return true;
2847         }
2848     }
2849 
2850     Value *Mul1 = nullptr;
2851     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2852       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2853         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2854           if (Op0C->getType()->getPrimitiveSizeInBits() <
2855               MulC->getType()->getPrimitiveSizeInBits())
2856             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2857           if (Op0C->getType()->getPrimitiveSizeInBits() >
2858               MulC->getType()->getPrimitiveSizeInBits())
2859             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2860 
2861           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2862           Multiple = ConstantExpr::getMul(MulC, Op0C);
2863           return true;
2864         }
2865 
2866       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2867         if (Mul1CI->getValue() == 1) {
2868           // V == Base * Op0, so return Op0
2869           Multiple = Op0;
2870           return true;
2871         }
2872     }
2873   }
2874   }
2875 
2876   // We could not determine if V is a multiple of Base.
2877   return false;
2878 }
2879 
2880 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2881                                             const TargetLibraryInfo *TLI) {
2882   const Function *F = ICS.getCalledFunction();
2883   if (!F)
2884     return Intrinsic::not_intrinsic;
2885 
2886   if (F->isIntrinsic())
2887     return F->getIntrinsicID();
2888 
2889   if (!TLI)
2890     return Intrinsic::not_intrinsic;
2891 
2892   LibFunc Func;
2893   // We're going to make assumptions on the semantics of the functions, check
2894   // that the target knows that it's available in this environment and it does
2895   // not have local linkage.
2896   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2897     return Intrinsic::not_intrinsic;
2898 
2899   if (!ICS.onlyReadsMemory())
2900     return Intrinsic::not_intrinsic;
2901 
2902   // Otherwise check if we have a call to a function that can be turned into a
2903   // vector intrinsic.
2904   switch (Func) {
2905   default:
2906     break;
2907   case LibFunc_sin:
2908   case LibFunc_sinf:
2909   case LibFunc_sinl:
2910     return Intrinsic::sin;
2911   case LibFunc_cos:
2912   case LibFunc_cosf:
2913   case LibFunc_cosl:
2914     return Intrinsic::cos;
2915   case LibFunc_exp:
2916   case LibFunc_expf:
2917   case LibFunc_expl:
2918     return Intrinsic::exp;
2919   case LibFunc_exp2:
2920   case LibFunc_exp2f:
2921   case LibFunc_exp2l:
2922     return Intrinsic::exp2;
2923   case LibFunc_log:
2924   case LibFunc_logf:
2925   case LibFunc_logl:
2926     return Intrinsic::log;
2927   case LibFunc_log10:
2928   case LibFunc_log10f:
2929   case LibFunc_log10l:
2930     return Intrinsic::log10;
2931   case LibFunc_log2:
2932   case LibFunc_log2f:
2933   case LibFunc_log2l:
2934     return Intrinsic::log2;
2935   case LibFunc_fabs:
2936   case LibFunc_fabsf:
2937   case LibFunc_fabsl:
2938     return Intrinsic::fabs;
2939   case LibFunc_fmin:
2940   case LibFunc_fminf:
2941   case LibFunc_fminl:
2942     return Intrinsic::minnum;
2943   case LibFunc_fmax:
2944   case LibFunc_fmaxf:
2945   case LibFunc_fmaxl:
2946     return Intrinsic::maxnum;
2947   case LibFunc_copysign:
2948   case LibFunc_copysignf:
2949   case LibFunc_copysignl:
2950     return Intrinsic::copysign;
2951   case LibFunc_floor:
2952   case LibFunc_floorf:
2953   case LibFunc_floorl:
2954     return Intrinsic::floor;
2955   case LibFunc_ceil:
2956   case LibFunc_ceilf:
2957   case LibFunc_ceill:
2958     return Intrinsic::ceil;
2959   case LibFunc_trunc:
2960   case LibFunc_truncf:
2961   case LibFunc_truncl:
2962     return Intrinsic::trunc;
2963   case LibFunc_rint:
2964   case LibFunc_rintf:
2965   case LibFunc_rintl:
2966     return Intrinsic::rint;
2967   case LibFunc_nearbyint:
2968   case LibFunc_nearbyintf:
2969   case LibFunc_nearbyintl:
2970     return Intrinsic::nearbyint;
2971   case LibFunc_round:
2972   case LibFunc_roundf:
2973   case LibFunc_roundl:
2974     return Intrinsic::round;
2975   case LibFunc_pow:
2976   case LibFunc_powf:
2977   case LibFunc_powl:
2978     return Intrinsic::pow;
2979   case LibFunc_sqrt:
2980   case LibFunc_sqrtf:
2981   case LibFunc_sqrtl:
2982     return Intrinsic::sqrt;
2983   }
2984 
2985   return Intrinsic::not_intrinsic;
2986 }
2987 
2988 /// Return true if we can prove that the specified FP value is never equal to
2989 /// -0.0.
2990 ///
2991 /// NOTE: this function will need to be revisited when we support non-default
2992 /// rounding modes!
2993 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2994                                 unsigned Depth) {
2995   if (auto *CFP = dyn_cast<ConstantFP>(V))
2996     return !CFP->getValueAPF().isNegZero();
2997 
2998   // Limit search depth.
2999   if (Depth == MaxDepth)
3000     return false;
3001 
3002   auto *Op = dyn_cast<Operator>(V);
3003   if (!Op)
3004     return false;
3005 
3006   // Check if the nsz fast-math flag is set.
3007   if (auto *FPO = dyn_cast<FPMathOperator>(Op))
3008     if (FPO->hasNoSignedZeros())
3009       return true;
3010 
3011   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3012   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3013     return true;
3014 
3015   // sitofp and uitofp turn into +0.0 for zero.
3016   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3017     return true;
3018 
3019   if (auto *Call = dyn_cast<CallInst>(Op)) {
3020     Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
3021     switch (IID) {
3022     default:
3023       break;
3024     // sqrt(-0.0) = -0.0, no other negative results are possible.
3025     case Intrinsic::sqrt:
3026     case Intrinsic::canonicalize:
3027       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3028     // fabs(x) != -0.0
3029     case Intrinsic::fabs:
3030       return true;
3031     }
3032   }
3033 
3034   return false;
3035 }
3036 
3037 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3038 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3039 /// bit despite comparing equal.
3040 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3041                                             const TargetLibraryInfo *TLI,
3042                                             bool SignBitOnly,
3043                                             unsigned Depth) {
3044   // TODO: This function does not do the right thing when SignBitOnly is true
3045   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3046   // which flips the sign bits of NaNs.  See
3047   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3048 
3049   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3050     return !CFP->getValueAPF().isNegative() ||
3051            (!SignBitOnly && CFP->getValueAPF().isZero());
3052   }
3053 
3054   // Handle vector of constants.
3055   if (auto *CV = dyn_cast<Constant>(V)) {
3056     if (CV->getType()->isVectorTy()) {
3057       unsigned NumElts = CV->getType()->getVectorNumElements();
3058       for (unsigned i = 0; i != NumElts; ++i) {
3059         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3060         if (!CFP)
3061           return false;
3062         if (CFP->getValueAPF().isNegative() &&
3063             (SignBitOnly || !CFP->getValueAPF().isZero()))
3064           return false;
3065       }
3066 
3067       // All non-negative ConstantFPs.
3068       return true;
3069     }
3070   }
3071 
3072   if (Depth == MaxDepth)
3073     return false; // Limit search depth.
3074 
3075   const Operator *I = dyn_cast<Operator>(V);
3076   if (!I)
3077     return false;
3078 
3079   switch (I->getOpcode()) {
3080   default:
3081     break;
3082   // Unsigned integers are always nonnegative.
3083   case Instruction::UIToFP:
3084     return true;
3085   case Instruction::FMul:
3086     // x*x is always non-negative or a NaN.
3087     if (I->getOperand(0) == I->getOperand(1) &&
3088         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3089       return true;
3090 
3091     LLVM_FALLTHROUGH;
3092   case Instruction::FAdd:
3093   case Instruction::FDiv:
3094   case Instruction::FRem:
3095     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3096                                            Depth + 1) &&
3097            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3098                                            Depth + 1);
3099   case Instruction::Select:
3100     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3101                                            Depth + 1) &&
3102            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3103                                            Depth + 1);
3104   case Instruction::FPExt:
3105   case Instruction::FPTrunc:
3106     // Widening/narrowing never change sign.
3107     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3108                                            Depth + 1);
3109   case Instruction::ExtractElement:
3110     // Look through extract element. At the moment we keep this simple and skip
3111     // tracking the specific element. But at least we might find information
3112     // valid for all elements of the vector.
3113     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3114                                            Depth + 1);
3115   case Instruction::Call:
3116     const auto *CI = cast<CallInst>(I);
3117     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3118     switch (IID) {
3119     default:
3120       break;
3121     case Intrinsic::maxnum:
3122       return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3123               cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
3124                                               SignBitOnly, Depth + 1)) ||
3125             (isKnownNeverNaN(I->getOperand(1), TLI) &&
3126               cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3127                                               SignBitOnly, Depth + 1));
3128 
3129     case Intrinsic::maximum:
3130       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3131                                              Depth + 1) ||
3132              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3133                                              Depth + 1);
3134     case Intrinsic::minnum:
3135     case Intrinsic::minimum:
3136       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3137                                              Depth + 1) &&
3138              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3139                                              Depth + 1);
3140     case Intrinsic::exp:
3141     case Intrinsic::exp2:
3142     case Intrinsic::fabs:
3143       return true;
3144 
3145     case Intrinsic::sqrt:
3146       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3147       if (!SignBitOnly)
3148         return true;
3149       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3150                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3151 
3152     case Intrinsic::powi:
3153       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3154         // powi(x,n) is non-negative if n is even.
3155         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3156           return true;
3157       }
3158       // TODO: This is not correct.  Given that exp is an integer, here are the
3159       // ways that pow can return a negative value:
3160       //
3161       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3162       //   pow(-0, exp)   --> -inf if exp is negative odd.
3163       //   pow(-0, exp)   --> -0 if exp is positive odd.
3164       //   pow(-inf, exp) --> -0 if exp is negative odd.
3165       //   pow(-inf, exp) --> -inf if exp is positive odd.
3166       //
3167       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3168       // but we must return false if x == -0.  Unfortunately we do not currently
3169       // have a way of expressing this constraint.  See details in
3170       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3171       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3172                                              Depth + 1);
3173 
3174     case Intrinsic::fma:
3175     case Intrinsic::fmuladd:
3176       // x*x+y is non-negative if y is non-negative.
3177       return I->getOperand(0) == I->getOperand(1) &&
3178              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3179              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3180                                              Depth + 1);
3181     }
3182     break;
3183   }
3184   return false;
3185 }
3186 
3187 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3188                                        const TargetLibraryInfo *TLI) {
3189   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3190 }
3191 
3192 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3193   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3194 }
3195 
3196 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3197                                 unsigned Depth) {
3198   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3199 
3200   // If we're told that infinities won't happen, assume they won't.
3201   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3202     if (FPMathOp->hasNoInfs())
3203       return true;
3204 
3205   // Handle scalar constants.
3206   if (auto *CFP = dyn_cast<ConstantFP>(V))
3207     return !CFP->isInfinity();
3208 
3209   if (Depth == MaxDepth)
3210     return false;
3211 
3212   if (auto *Inst = dyn_cast<Instruction>(V)) {
3213     switch (Inst->getOpcode()) {
3214     case Instruction::Select: {
3215       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3216              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3217     }
3218     case Instruction::UIToFP:
3219       // If the input type fits into the floating type the result is finite.
3220       return ilogb(APFloat::getLargest(
3221                  Inst->getType()->getScalarType()->getFltSemantics())) >=
3222              (int)Inst->getOperand(0)->getType()->getScalarSizeInBits();
3223     default:
3224       break;
3225     }
3226   }
3227 
3228   // Bail out for constant expressions, but try to handle vector constants.
3229   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3230     return false;
3231 
3232   // For vectors, verify that each element is not infinity.
3233   unsigned NumElts = V->getType()->getVectorNumElements();
3234   for (unsigned i = 0; i != NumElts; ++i) {
3235     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3236     if (!Elt)
3237       return false;
3238     if (isa<UndefValue>(Elt))
3239       continue;
3240     auto *CElt = dyn_cast<ConstantFP>(Elt);
3241     if (!CElt || CElt->isInfinity())
3242       return false;
3243   }
3244   // All elements were confirmed non-infinity or undefined.
3245   return true;
3246 }
3247 
3248 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3249                            unsigned Depth) {
3250   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3251 
3252   // If we're told that NaNs won't happen, assume they won't.
3253   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3254     if (FPMathOp->hasNoNaNs())
3255       return true;
3256 
3257   // Handle scalar constants.
3258   if (auto *CFP = dyn_cast<ConstantFP>(V))
3259     return !CFP->isNaN();
3260 
3261   if (Depth == MaxDepth)
3262     return false;
3263 
3264   if (auto *Inst = dyn_cast<Instruction>(V)) {
3265     switch (Inst->getOpcode()) {
3266     case Instruction::FAdd:
3267     case Instruction::FSub:
3268       // Adding positive and negative infinity produces NaN.
3269       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3270              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3271              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3272               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3273 
3274     case Instruction::FMul:
3275       // Zero multiplied with infinity produces NaN.
3276       // FIXME: If neither side can be zero fmul never produces NaN.
3277       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3278              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3279              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3280              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3281 
3282     case Instruction::FDiv:
3283     case Instruction::FRem:
3284       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3285       return false;
3286 
3287     case Instruction::Select: {
3288       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3289              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3290     }
3291     case Instruction::SIToFP:
3292     case Instruction::UIToFP:
3293       return true;
3294     case Instruction::FPTrunc:
3295     case Instruction::FPExt:
3296       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3297     default:
3298       break;
3299     }
3300   }
3301 
3302   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3303     switch (II->getIntrinsicID()) {
3304     case Intrinsic::canonicalize:
3305     case Intrinsic::fabs:
3306     case Intrinsic::copysign:
3307     case Intrinsic::exp:
3308     case Intrinsic::exp2:
3309     case Intrinsic::floor:
3310     case Intrinsic::ceil:
3311     case Intrinsic::trunc:
3312     case Intrinsic::rint:
3313     case Intrinsic::nearbyint:
3314     case Intrinsic::round:
3315       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3316     case Intrinsic::sqrt:
3317       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3318              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3319     case Intrinsic::minnum:
3320     case Intrinsic::maxnum:
3321       // If either operand is not NaN, the result is not NaN.
3322       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3323              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3324     default:
3325       return false;
3326     }
3327   }
3328 
3329   // Bail out for constant expressions, but try to handle vector constants.
3330   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3331     return false;
3332 
3333   // For vectors, verify that each element is not NaN.
3334   unsigned NumElts = V->getType()->getVectorNumElements();
3335   for (unsigned i = 0; i != NumElts; ++i) {
3336     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3337     if (!Elt)
3338       return false;
3339     if (isa<UndefValue>(Elt))
3340       continue;
3341     auto *CElt = dyn_cast<ConstantFP>(Elt);
3342     if (!CElt || CElt->isNaN())
3343       return false;
3344   }
3345   // All elements were confirmed not-NaN or undefined.
3346   return true;
3347 }
3348 
3349 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3350 
3351   // All byte-wide stores are splatable, even of arbitrary variables.
3352   if (V->getType()->isIntegerTy(8))
3353     return V;
3354 
3355   LLVMContext &Ctx = V->getContext();
3356 
3357   // Undef don't care.
3358   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3359   if (isa<UndefValue>(V))
3360     return UndefInt8;
3361 
3362   const uint64_t Size = DL.getTypeStoreSize(V->getType());
3363   if (!Size)
3364     return UndefInt8;
3365 
3366   Constant *C = dyn_cast<Constant>(V);
3367   if (!C) {
3368     // Conceptually, we could handle things like:
3369     //   %a = zext i8 %X to i16
3370     //   %b = shl i16 %a, 8
3371     //   %c = or i16 %a, %b
3372     // but until there is an example that actually needs this, it doesn't seem
3373     // worth worrying about.
3374     return nullptr;
3375   }
3376 
3377   // Handle 'null' ConstantArrayZero etc.
3378   if (C->isNullValue())
3379     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3380 
3381   // Constant floating-point values can be handled as integer values if the
3382   // corresponding integer value is "byteable".  An important case is 0.0.
3383   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3384     Type *Ty = nullptr;
3385     if (CFP->getType()->isHalfTy())
3386       Ty = Type::getInt16Ty(Ctx);
3387     else if (CFP->getType()->isFloatTy())
3388       Ty = Type::getInt32Ty(Ctx);
3389     else if (CFP->getType()->isDoubleTy())
3390       Ty = Type::getInt64Ty(Ctx);
3391     // Don't handle long double formats, which have strange constraints.
3392     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3393               : nullptr;
3394   }
3395 
3396   // We can handle constant integers that are multiple of 8 bits.
3397   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3398     if (CI->getBitWidth() % 8 == 0) {
3399       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3400       if (!CI->getValue().isSplat(8))
3401         return nullptr;
3402       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3403     }
3404   }
3405 
3406   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3407     if (CE->getOpcode() == Instruction::IntToPtr) {
3408       auto PS = DL.getPointerSizeInBits(
3409           cast<PointerType>(CE->getType())->getAddressSpace());
3410       return isBytewiseValue(
3411           ConstantExpr::getIntegerCast(CE->getOperand(0),
3412                                        Type::getIntNTy(Ctx, PS), false),
3413           DL);
3414     }
3415   }
3416 
3417   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3418     if (LHS == RHS)
3419       return LHS;
3420     if (!LHS || !RHS)
3421       return nullptr;
3422     if (LHS == UndefInt8)
3423       return RHS;
3424     if (RHS == UndefInt8)
3425       return LHS;
3426     return nullptr;
3427   };
3428 
3429   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3430     Value *Val = UndefInt8;
3431     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3432       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3433         return nullptr;
3434     return Val;
3435   }
3436 
3437   if (isa<ConstantAggregate>(C)) {
3438     Value *Val = UndefInt8;
3439     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3440       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3441         return nullptr;
3442     return Val;
3443   }
3444 
3445   // Don't try to handle the handful of other constants.
3446   return nullptr;
3447 }
3448 
3449 // This is the recursive version of BuildSubAggregate. It takes a few different
3450 // arguments. Idxs is the index within the nested struct From that we are
3451 // looking at now (which is of type IndexedType). IdxSkip is the number of
3452 // indices from Idxs that should be left out when inserting into the resulting
3453 // struct. To is the result struct built so far, new insertvalue instructions
3454 // build on that.
3455 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3456                                 SmallVectorImpl<unsigned> &Idxs,
3457                                 unsigned IdxSkip,
3458                                 Instruction *InsertBefore) {
3459   StructType *STy = dyn_cast<StructType>(IndexedType);
3460   if (STy) {
3461     // Save the original To argument so we can modify it
3462     Value *OrigTo = To;
3463     // General case, the type indexed by Idxs is a struct
3464     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3465       // Process each struct element recursively
3466       Idxs.push_back(i);
3467       Value *PrevTo = To;
3468       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3469                              InsertBefore);
3470       Idxs.pop_back();
3471       if (!To) {
3472         // Couldn't find any inserted value for this index? Cleanup
3473         while (PrevTo != OrigTo) {
3474           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3475           PrevTo = Del->getAggregateOperand();
3476           Del->eraseFromParent();
3477         }
3478         // Stop processing elements
3479         break;
3480       }
3481     }
3482     // If we successfully found a value for each of our subaggregates
3483     if (To)
3484       return To;
3485   }
3486   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3487   // the struct's elements had a value that was inserted directly. In the latter
3488   // case, perhaps we can't determine each of the subelements individually, but
3489   // we might be able to find the complete struct somewhere.
3490 
3491   // Find the value that is at that particular spot
3492   Value *V = FindInsertedValue(From, Idxs);
3493 
3494   if (!V)
3495     return nullptr;
3496 
3497   // Insert the value in the new (sub) aggregate
3498   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3499                                  "tmp", InsertBefore);
3500 }
3501 
3502 // This helper takes a nested struct and extracts a part of it (which is again a
3503 // struct) into a new value. For example, given the struct:
3504 // { a, { b, { c, d }, e } }
3505 // and the indices "1, 1" this returns
3506 // { c, d }.
3507 //
3508 // It does this by inserting an insertvalue for each element in the resulting
3509 // struct, as opposed to just inserting a single struct. This will only work if
3510 // each of the elements of the substruct are known (ie, inserted into From by an
3511 // insertvalue instruction somewhere).
3512 //
3513 // All inserted insertvalue instructions are inserted before InsertBefore
3514 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3515                                 Instruction *InsertBefore) {
3516   assert(InsertBefore && "Must have someplace to insert!");
3517   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3518                                                              idx_range);
3519   Value *To = UndefValue::get(IndexedType);
3520   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3521   unsigned IdxSkip = Idxs.size();
3522 
3523   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3524 }
3525 
3526 /// Given an aggregate and a sequence of indices, see if the scalar value
3527 /// indexed is already around as a register, for example if it was inserted
3528 /// directly into the aggregate.
3529 ///
3530 /// If InsertBefore is not null, this function will duplicate (modified)
3531 /// insertvalues when a part of a nested struct is extracted.
3532 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3533                                Instruction *InsertBefore) {
3534   // Nothing to index? Just return V then (this is useful at the end of our
3535   // recursion).
3536   if (idx_range.empty())
3537     return V;
3538   // We have indices, so V should have an indexable type.
3539   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3540          "Not looking at a struct or array?");
3541   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3542          "Invalid indices for type?");
3543 
3544   if (Constant *C = dyn_cast<Constant>(V)) {
3545     C = C->getAggregateElement(idx_range[0]);
3546     if (!C) return nullptr;
3547     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3548   }
3549 
3550   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3551     // Loop the indices for the insertvalue instruction in parallel with the
3552     // requested indices
3553     const unsigned *req_idx = idx_range.begin();
3554     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3555          i != e; ++i, ++req_idx) {
3556       if (req_idx == idx_range.end()) {
3557         // We can't handle this without inserting insertvalues
3558         if (!InsertBefore)
3559           return nullptr;
3560 
3561         // The requested index identifies a part of a nested aggregate. Handle
3562         // this specially. For example,
3563         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3564         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3565         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3566         // This can be changed into
3567         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3568         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3569         // which allows the unused 0,0 element from the nested struct to be
3570         // removed.
3571         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3572                                  InsertBefore);
3573       }
3574 
3575       // This insert value inserts something else than what we are looking for.
3576       // See if the (aggregate) value inserted into has the value we are
3577       // looking for, then.
3578       if (*req_idx != *i)
3579         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3580                                  InsertBefore);
3581     }
3582     // If we end up here, the indices of the insertvalue match with those
3583     // requested (though possibly only partially). Now we recursively look at
3584     // the inserted value, passing any remaining indices.
3585     return FindInsertedValue(I->getInsertedValueOperand(),
3586                              makeArrayRef(req_idx, idx_range.end()),
3587                              InsertBefore);
3588   }
3589 
3590   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3591     // If we're extracting a value from an aggregate that was extracted from
3592     // something else, we can extract from that something else directly instead.
3593     // However, we will need to chain I's indices with the requested indices.
3594 
3595     // Calculate the number of indices required
3596     unsigned size = I->getNumIndices() + idx_range.size();
3597     // Allocate some space to put the new indices in
3598     SmallVector<unsigned, 5> Idxs;
3599     Idxs.reserve(size);
3600     // Add indices from the extract value instruction
3601     Idxs.append(I->idx_begin(), I->idx_end());
3602 
3603     // Add requested indices
3604     Idxs.append(idx_range.begin(), idx_range.end());
3605 
3606     assert(Idxs.size() == size
3607            && "Number of indices added not correct?");
3608 
3609     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3610   }
3611   // Otherwise, we don't know (such as, extracting from a function return value
3612   // or load instruction)
3613   return nullptr;
3614 }
3615 
3616 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3617                                        unsigned CharSize) {
3618   // Make sure the GEP has exactly three arguments.
3619   if (GEP->getNumOperands() != 3)
3620     return false;
3621 
3622   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3623   // CharSize.
3624   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3625   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3626     return false;
3627 
3628   // Check to make sure that the first operand of the GEP is an integer and
3629   // has value 0 so that we are sure we're indexing into the initializer.
3630   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3631   if (!FirstIdx || !FirstIdx->isZero())
3632     return false;
3633 
3634   return true;
3635 }
3636 
3637 bool llvm::getConstantDataArrayInfo(const Value *V,
3638                                     ConstantDataArraySlice &Slice,
3639                                     unsigned ElementSize, uint64_t Offset) {
3640   assert(V);
3641 
3642   // Look through bitcast instructions and geps.
3643   V = V->stripPointerCasts();
3644 
3645   // If the value is a GEP instruction or constant expression, treat it as an
3646   // offset.
3647   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3648     // The GEP operator should be based on a pointer to string constant, and is
3649     // indexing into the string constant.
3650     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3651       return false;
3652 
3653     // If the second index isn't a ConstantInt, then this is a variable index
3654     // into the array.  If this occurs, we can't say anything meaningful about
3655     // the string.
3656     uint64_t StartIdx = 0;
3657     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3658       StartIdx = CI->getZExtValue();
3659     else
3660       return false;
3661     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3662                                     StartIdx + Offset);
3663   }
3664 
3665   // The GEP instruction, constant or instruction, must reference a global
3666   // variable that is a constant and is initialized. The referenced constant
3667   // initializer is the array that we'll use for optimization.
3668   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3669   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3670     return false;
3671 
3672   const ConstantDataArray *Array;
3673   ArrayType *ArrayTy;
3674   if (GV->getInitializer()->isNullValue()) {
3675     Type *GVTy = GV->getValueType();
3676     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3677       // A zeroinitializer for the array; there is no ConstantDataArray.
3678       Array = nullptr;
3679     } else {
3680       const DataLayout &DL = GV->getParent()->getDataLayout();
3681       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3682       uint64_t Length = SizeInBytes / (ElementSize / 8);
3683       if (Length <= Offset)
3684         return false;
3685 
3686       Slice.Array = nullptr;
3687       Slice.Offset = 0;
3688       Slice.Length = Length - Offset;
3689       return true;
3690     }
3691   } else {
3692     // This must be a ConstantDataArray.
3693     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3694     if (!Array)
3695       return false;
3696     ArrayTy = Array->getType();
3697   }
3698   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3699     return false;
3700 
3701   uint64_t NumElts = ArrayTy->getArrayNumElements();
3702   if (Offset > NumElts)
3703     return false;
3704 
3705   Slice.Array = Array;
3706   Slice.Offset = Offset;
3707   Slice.Length = NumElts - Offset;
3708   return true;
3709 }
3710 
3711 /// This function computes the length of a null-terminated C string pointed to
3712 /// by V. If successful, it returns true and returns the string in Str.
3713 /// If unsuccessful, it returns false.
3714 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3715                                  uint64_t Offset, bool TrimAtNul) {
3716   ConstantDataArraySlice Slice;
3717   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3718     return false;
3719 
3720   if (Slice.Array == nullptr) {
3721     if (TrimAtNul) {
3722       Str = StringRef();
3723       return true;
3724     }
3725     if (Slice.Length == 1) {
3726       Str = StringRef("", 1);
3727       return true;
3728     }
3729     // We cannot instantiate a StringRef as we do not have an appropriate string
3730     // of 0s at hand.
3731     return false;
3732   }
3733 
3734   // Start out with the entire array in the StringRef.
3735   Str = Slice.Array->getAsString();
3736   // Skip over 'offset' bytes.
3737   Str = Str.substr(Slice.Offset);
3738 
3739   if (TrimAtNul) {
3740     // Trim off the \0 and anything after it.  If the array is not nul
3741     // terminated, we just return the whole end of string.  The client may know
3742     // some other way that the string is length-bound.
3743     Str = Str.substr(0, Str.find('\0'));
3744   }
3745   return true;
3746 }
3747 
3748 // These next two are very similar to the above, but also look through PHI
3749 // nodes.
3750 // TODO: See if we can integrate these two together.
3751 
3752 /// If we can compute the length of the string pointed to by
3753 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3754 static uint64_t GetStringLengthH(const Value *V,
3755                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3756                                  unsigned CharSize) {
3757   // Look through noop bitcast instructions.
3758   V = V->stripPointerCasts();
3759 
3760   // If this is a PHI node, there are two cases: either we have already seen it
3761   // or we haven't.
3762   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3763     if (!PHIs.insert(PN).second)
3764       return ~0ULL;  // already in the set.
3765 
3766     // If it was new, see if all the input strings are the same length.
3767     uint64_t LenSoFar = ~0ULL;
3768     for (Value *IncValue : PN->incoming_values()) {
3769       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3770       if (Len == 0) return 0; // Unknown length -> unknown.
3771 
3772       if (Len == ~0ULL) continue;
3773 
3774       if (Len != LenSoFar && LenSoFar != ~0ULL)
3775         return 0;    // Disagree -> unknown.
3776       LenSoFar = Len;
3777     }
3778 
3779     // Success, all agree.
3780     return LenSoFar;
3781   }
3782 
3783   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3784   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3785     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3786     if (Len1 == 0) return 0;
3787     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3788     if (Len2 == 0) return 0;
3789     if (Len1 == ~0ULL) return Len2;
3790     if (Len2 == ~0ULL) return Len1;
3791     if (Len1 != Len2) return 0;
3792     return Len1;
3793   }
3794 
3795   // Otherwise, see if we can read the string.
3796   ConstantDataArraySlice Slice;
3797   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3798     return 0;
3799 
3800   if (Slice.Array == nullptr)
3801     return 1;
3802 
3803   // Search for nul characters
3804   unsigned NullIndex = 0;
3805   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3806     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3807       break;
3808   }
3809 
3810   return NullIndex + 1;
3811 }
3812 
3813 /// If we can compute the length of the string pointed to by
3814 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3815 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3816   if (!V->getType()->isPointerTy())
3817     return 0;
3818 
3819   SmallPtrSet<const PHINode*, 32> PHIs;
3820   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3821   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3822   // an empty string as a length.
3823   return Len == ~0ULL ? 1 : Len;
3824 }
3825 
3826 const Value *
3827 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
3828                                            bool MustPreserveNullness) {
3829   assert(Call &&
3830          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
3831   if (const Value *RV = Call->getReturnedArgOperand())
3832     return RV;
3833   // This can be used only as a aliasing property.
3834   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3835           Call, MustPreserveNullness))
3836     return Call->getArgOperand(0);
3837   return nullptr;
3838 }
3839 
3840 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3841     const CallBase *Call, bool MustPreserveNullness) {
3842   return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3843          Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
3844          Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
3845          Call->getIntrinsicID() == Intrinsic::aarch64_tagp ||
3846          (!MustPreserveNullness &&
3847           Call->getIntrinsicID() == Intrinsic::ptrmask);
3848 }
3849 
3850 /// \p PN defines a loop-variant pointer to an object.  Check if the
3851 /// previous iteration of the loop was referring to the same object as \p PN.
3852 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3853                                          const LoopInfo *LI) {
3854   // Find the loop-defined value.
3855   Loop *L = LI->getLoopFor(PN->getParent());
3856   if (PN->getNumIncomingValues() != 2)
3857     return true;
3858 
3859   // Find the value from previous iteration.
3860   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3861   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3862     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3863   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3864     return true;
3865 
3866   // If a new pointer is loaded in the loop, the pointer references a different
3867   // object in every iteration.  E.g.:
3868   //    for (i)
3869   //       int *p = a[i];
3870   //       ...
3871   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3872     if (!L->isLoopInvariant(Load->getPointerOperand()))
3873       return false;
3874   return true;
3875 }
3876 
3877 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3878                                  unsigned MaxLookup) {
3879   if (!V->getType()->isPointerTy())
3880     return V;
3881   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3882     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3883       V = GEP->getPointerOperand();
3884     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3885                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3886       V = cast<Operator>(V)->getOperand(0);
3887     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3888       if (GA->isInterposable())
3889         return V;
3890       V = GA->getAliasee();
3891     } else if (isa<AllocaInst>(V)) {
3892       // An alloca can't be further simplified.
3893       return V;
3894     } else {
3895       if (auto *Call = dyn_cast<CallBase>(V)) {
3896         // CaptureTracking can know about special capturing properties of some
3897         // intrinsics like launder.invariant.group, that can't be expressed with
3898         // the attributes, but have properties like returning aliasing pointer.
3899         // Because some analysis may assume that nocaptured pointer is not
3900         // returned from some special intrinsic (because function would have to
3901         // be marked with returns attribute), it is crucial to use this function
3902         // because it should be in sync with CaptureTracking. Not using it may
3903         // cause weird miscompilations where 2 aliasing pointers are assumed to
3904         // noalias.
3905         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
3906           V = RP;
3907           continue;
3908         }
3909       }
3910 
3911       // See if InstructionSimplify knows any relevant tricks.
3912       if (Instruction *I = dyn_cast<Instruction>(V))
3913         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3914         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3915           V = Simplified;
3916           continue;
3917         }
3918 
3919       return V;
3920     }
3921     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3922   }
3923   return V;
3924 }
3925 
3926 void llvm::GetUnderlyingObjects(const Value *V,
3927                                 SmallVectorImpl<const Value *> &Objects,
3928                                 const DataLayout &DL, LoopInfo *LI,
3929                                 unsigned MaxLookup) {
3930   SmallPtrSet<const Value *, 4> Visited;
3931   SmallVector<const Value *, 4> Worklist;
3932   Worklist.push_back(V);
3933   do {
3934     const Value *P = Worklist.pop_back_val();
3935     P = GetUnderlyingObject(P, DL, MaxLookup);
3936 
3937     if (!Visited.insert(P).second)
3938       continue;
3939 
3940     if (auto *SI = dyn_cast<SelectInst>(P)) {
3941       Worklist.push_back(SI->getTrueValue());
3942       Worklist.push_back(SI->getFalseValue());
3943       continue;
3944     }
3945 
3946     if (auto *PN = dyn_cast<PHINode>(P)) {
3947       // If this PHI changes the underlying object in every iteration of the
3948       // loop, don't look through it.  Consider:
3949       //   int **A;
3950       //   for (i) {
3951       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3952       //     Curr = A[i];
3953       //     *Prev, *Curr;
3954       //
3955       // Prev is tracking Curr one iteration behind so they refer to different
3956       // underlying objects.
3957       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3958           isSameUnderlyingObjectInLoop(PN, LI))
3959         for (Value *IncValue : PN->incoming_values())
3960           Worklist.push_back(IncValue);
3961       continue;
3962     }
3963 
3964     Objects.push_back(P);
3965   } while (!Worklist.empty());
3966 }
3967 
3968 /// This is the function that does the work of looking through basic
3969 /// ptrtoint+arithmetic+inttoptr sequences.
3970 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3971   do {
3972     if (const Operator *U = dyn_cast<Operator>(V)) {
3973       // If we find a ptrtoint, we can transfer control back to the
3974       // regular getUnderlyingObjectFromInt.
3975       if (U->getOpcode() == Instruction::PtrToInt)
3976         return U->getOperand(0);
3977       // If we find an add of a constant, a multiplied value, or a phi, it's
3978       // likely that the other operand will lead us to the base
3979       // object. We don't have to worry about the case where the
3980       // object address is somehow being computed by the multiply,
3981       // because our callers only care when the result is an
3982       // identifiable object.
3983       if (U->getOpcode() != Instruction::Add ||
3984           (!isa<ConstantInt>(U->getOperand(1)) &&
3985            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3986            !isa<PHINode>(U->getOperand(1))))
3987         return V;
3988       V = U->getOperand(0);
3989     } else {
3990       return V;
3991     }
3992     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3993   } while (true);
3994 }
3995 
3996 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3997 /// ptrtoint+arithmetic+inttoptr sequences.
3998 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3999 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4000                           SmallVectorImpl<Value *> &Objects,
4001                           const DataLayout &DL) {
4002   SmallPtrSet<const Value *, 16> Visited;
4003   SmallVector<const Value *, 4> Working(1, V);
4004   do {
4005     V = Working.pop_back_val();
4006 
4007     SmallVector<const Value *, 4> Objs;
4008     GetUnderlyingObjects(V, Objs, DL);
4009 
4010     for (const Value *V : Objs) {
4011       if (!Visited.insert(V).second)
4012         continue;
4013       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4014         const Value *O =
4015           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4016         if (O->getType()->isPointerTy()) {
4017           Working.push_back(O);
4018           continue;
4019         }
4020       }
4021       // If GetUnderlyingObjects fails to find an identifiable object,
4022       // getUnderlyingObjectsForCodeGen also fails for safety.
4023       if (!isIdentifiedObject(V)) {
4024         Objects.clear();
4025         return false;
4026       }
4027       Objects.push_back(const_cast<Value *>(V));
4028     }
4029   } while (!Working.empty());
4030   return true;
4031 }
4032 
4033 /// Return true if the only users of this pointer are lifetime markers.
4034 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4035   for (const User *U : V->users()) {
4036     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4037     if (!II) return false;
4038 
4039     if (!II->isLifetimeStartOrEnd())
4040       return false;
4041   }
4042   return true;
4043 }
4044 
4045 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4046   if (!LI.isUnordered())
4047     return true;
4048   const Function &F = *LI.getFunction();
4049   // Speculative load may create a race that did not exist in the source.
4050   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4051     // Speculative load may load data from dirty regions.
4052     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4053     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4054 }
4055 
4056 
4057 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4058                                         const Instruction *CtxI,
4059                                         const DominatorTree *DT) {
4060   const Operator *Inst = dyn_cast<Operator>(V);
4061   if (!Inst)
4062     return false;
4063 
4064   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4065     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4066       if (C->canTrap())
4067         return false;
4068 
4069   switch (Inst->getOpcode()) {
4070   default:
4071     return true;
4072   case Instruction::UDiv:
4073   case Instruction::URem: {
4074     // x / y is undefined if y == 0.
4075     const APInt *V;
4076     if (match(Inst->getOperand(1), m_APInt(V)))
4077       return *V != 0;
4078     return false;
4079   }
4080   case Instruction::SDiv:
4081   case Instruction::SRem: {
4082     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4083     const APInt *Numerator, *Denominator;
4084     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4085       return false;
4086     // We cannot hoist this division if the denominator is 0.
4087     if (*Denominator == 0)
4088       return false;
4089     // It's safe to hoist if the denominator is not 0 or -1.
4090     if (*Denominator != -1)
4091       return true;
4092     // At this point we know that the denominator is -1.  It is safe to hoist as
4093     // long we know that the numerator is not INT_MIN.
4094     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4095       return !Numerator->isMinSignedValue();
4096     // The numerator *might* be MinSignedValue.
4097     return false;
4098   }
4099   case Instruction::Load: {
4100     const LoadInst *LI = cast<LoadInst>(Inst);
4101     if (mustSuppressSpeculation(*LI))
4102       return false;
4103     const DataLayout &DL = LI->getModule()->getDataLayout();
4104     return isDereferenceableAndAlignedPointer(
4105         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4106         DL, CtxI, DT);
4107   }
4108   case Instruction::Call: {
4109     auto *CI = cast<const CallInst>(Inst);
4110     const Function *Callee = CI->getCalledFunction();
4111 
4112     // The called function could have undefined behavior or side-effects, even
4113     // if marked readnone nounwind.
4114     return Callee && Callee->isSpeculatable();
4115   }
4116   case Instruction::VAArg:
4117   case Instruction::Alloca:
4118   case Instruction::Invoke:
4119   case Instruction::CallBr:
4120   case Instruction::PHI:
4121   case Instruction::Store:
4122   case Instruction::Ret:
4123   case Instruction::Br:
4124   case Instruction::IndirectBr:
4125   case Instruction::Switch:
4126   case Instruction::Unreachable:
4127   case Instruction::Fence:
4128   case Instruction::AtomicRMW:
4129   case Instruction::AtomicCmpXchg:
4130   case Instruction::LandingPad:
4131   case Instruction::Resume:
4132   case Instruction::CatchSwitch:
4133   case Instruction::CatchPad:
4134   case Instruction::CatchRet:
4135   case Instruction::CleanupPad:
4136   case Instruction::CleanupRet:
4137     return false; // Misc instructions which have effects
4138   }
4139 }
4140 
4141 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4142   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4143 }
4144 
4145 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4146 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4147   switch (OR) {
4148     case ConstantRange::OverflowResult::MayOverflow:
4149       return OverflowResult::MayOverflow;
4150     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4151       return OverflowResult::AlwaysOverflowsLow;
4152     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4153       return OverflowResult::AlwaysOverflowsHigh;
4154     case ConstantRange::OverflowResult::NeverOverflows:
4155       return OverflowResult::NeverOverflows;
4156   }
4157   llvm_unreachable("Unknown OverflowResult");
4158 }
4159 
4160 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4161 static ConstantRange computeConstantRangeIncludingKnownBits(
4162     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4163     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4164     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4165   KnownBits Known = computeKnownBits(
4166       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4167   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4168   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4169   ConstantRange::PreferredRangeType RangeType =
4170       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4171   return CR1.intersectWith(CR2, RangeType);
4172 }
4173 
4174 OverflowResult llvm::computeOverflowForUnsignedMul(
4175     const Value *LHS, const Value *RHS, const DataLayout &DL,
4176     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4177     bool UseInstrInfo) {
4178   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4179                                         nullptr, UseInstrInfo);
4180   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4181                                         nullptr, UseInstrInfo);
4182   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4183   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4184   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4185 }
4186 
4187 OverflowResult
4188 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4189                                   const DataLayout &DL, AssumptionCache *AC,
4190                                   const Instruction *CxtI,
4191                                   const DominatorTree *DT, bool UseInstrInfo) {
4192   // Multiplying n * m significant bits yields a result of n + m significant
4193   // bits. If the total number of significant bits does not exceed the
4194   // result bit width (minus 1), there is no overflow.
4195   // This means if we have enough leading sign bits in the operands
4196   // we can guarantee that the result does not overflow.
4197   // Ref: "Hacker's Delight" by Henry Warren
4198   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4199 
4200   // Note that underestimating the number of sign bits gives a more
4201   // conservative answer.
4202   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4203                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4204 
4205   // First handle the easy case: if we have enough sign bits there's
4206   // definitely no overflow.
4207   if (SignBits > BitWidth + 1)
4208     return OverflowResult::NeverOverflows;
4209 
4210   // There are two ambiguous cases where there can be no overflow:
4211   //   SignBits == BitWidth + 1    and
4212   //   SignBits == BitWidth
4213   // The second case is difficult to check, therefore we only handle the
4214   // first case.
4215   if (SignBits == BitWidth + 1) {
4216     // It overflows only when both arguments are negative and the true
4217     // product is exactly the minimum negative number.
4218     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4219     // For simplicity we just check if at least one side is not negative.
4220     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4221                                           nullptr, UseInstrInfo);
4222     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4223                                           nullptr, UseInstrInfo);
4224     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4225       return OverflowResult::NeverOverflows;
4226   }
4227   return OverflowResult::MayOverflow;
4228 }
4229 
4230 OverflowResult llvm::computeOverflowForUnsignedAdd(
4231     const Value *LHS, const Value *RHS, const DataLayout &DL,
4232     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4233     bool UseInstrInfo) {
4234   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4235       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4236       nullptr, UseInstrInfo);
4237   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4238       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4239       nullptr, UseInstrInfo);
4240   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4241 }
4242 
4243 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4244                                                   const Value *RHS,
4245                                                   const AddOperator *Add,
4246                                                   const DataLayout &DL,
4247                                                   AssumptionCache *AC,
4248                                                   const Instruction *CxtI,
4249                                                   const DominatorTree *DT) {
4250   if (Add && Add->hasNoSignedWrap()) {
4251     return OverflowResult::NeverOverflows;
4252   }
4253 
4254   // If LHS and RHS each have at least two sign bits, the addition will look
4255   // like
4256   //
4257   // XX..... +
4258   // YY.....
4259   //
4260   // If the carry into the most significant position is 0, X and Y can't both
4261   // be 1 and therefore the carry out of the addition is also 0.
4262   //
4263   // If the carry into the most significant position is 1, X and Y can't both
4264   // be 0 and therefore the carry out of the addition is also 1.
4265   //
4266   // Since the carry into the most significant position is always equal to
4267   // the carry out of the addition, there is no signed overflow.
4268   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4269       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4270     return OverflowResult::NeverOverflows;
4271 
4272   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4273       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4274   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4275       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4276   OverflowResult OR =
4277       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4278   if (OR != OverflowResult::MayOverflow)
4279     return OR;
4280 
4281   // The remaining code needs Add to be available. Early returns if not so.
4282   if (!Add)
4283     return OverflowResult::MayOverflow;
4284 
4285   // If the sign of Add is the same as at least one of the operands, this add
4286   // CANNOT overflow. If this can be determined from the known bits of the
4287   // operands the above signedAddMayOverflow() check will have already done so.
4288   // The only other way to improve on the known bits is from an assumption, so
4289   // call computeKnownBitsFromAssume() directly.
4290   bool LHSOrRHSKnownNonNegative =
4291       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4292   bool LHSOrRHSKnownNegative =
4293       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4294   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4295     KnownBits AddKnown(LHSRange.getBitWidth());
4296     computeKnownBitsFromAssume(
4297         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4298     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4299         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4300       return OverflowResult::NeverOverflows;
4301   }
4302 
4303   return OverflowResult::MayOverflow;
4304 }
4305 
4306 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4307                                                    const Value *RHS,
4308                                                    const DataLayout &DL,
4309                                                    AssumptionCache *AC,
4310                                                    const Instruction *CxtI,
4311                                                    const DominatorTree *DT) {
4312   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4313       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4314   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4315       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4316   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4317 }
4318 
4319 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4320                                                  const Value *RHS,
4321                                                  const DataLayout &DL,
4322                                                  AssumptionCache *AC,
4323                                                  const Instruction *CxtI,
4324                                                  const DominatorTree *DT) {
4325   // If LHS and RHS each have at least two sign bits, the subtraction
4326   // cannot overflow.
4327   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4328       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4329     return OverflowResult::NeverOverflows;
4330 
4331   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4332       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4333   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4334       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4335   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4336 }
4337 
4338 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4339                                      const DominatorTree &DT) {
4340   SmallVector<const BranchInst *, 2> GuardingBranches;
4341   SmallVector<const ExtractValueInst *, 2> Results;
4342 
4343   for (const User *U : WO->users()) {
4344     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4345       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4346 
4347       if (EVI->getIndices()[0] == 0)
4348         Results.push_back(EVI);
4349       else {
4350         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4351 
4352         for (const auto *U : EVI->users())
4353           if (const auto *B = dyn_cast<BranchInst>(U)) {
4354             assert(B->isConditional() && "How else is it using an i1?");
4355             GuardingBranches.push_back(B);
4356           }
4357       }
4358     } else {
4359       // We are using the aggregate directly in a way we don't want to analyze
4360       // here (storing it to a global, say).
4361       return false;
4362     }
4363   }
4364 
4365   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4366     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4367     if (!NoWrapEdge.isSingleEdge())
4368       return false;
4369 
4370     // Check if all users of the add are provably no-wrap.
4371     for (const auto *Result : Results) {
4372       // If the extractvalue itself is not executed on overflow, the we don't
4373       // need to check each use separately, since domination is transitive.
4374       if (DT.dominates(NoWrapEdge, Result->getParent()))
4375         continue;
4376 
4377       for (auto &RU : Result->uses())
4378         if (!DT.dominates(NoWrapEdge, RU))
4379           return false;
4380     }
4381 
4382     return true;
4383   };
4384 
4385   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4386 }
4387 
4388 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V) {
4389   // If the value is a freeze instruction, then it can never
4390   // be undef or poison.
4391   if (isa<FreezeInst>(V))
4392     return true;
4393   // TODO: Some instructions are guaranteed to return neither undef
4394   // nor poison if their arguments are not poison/undef.
4395 
4396   // TODO: Deal with other Constant subclasses.
4397   if (isa<ConstantInt>(V) || isa<GlobalVariable>(V))
4398     return true;
4399 
4400   return false;
4401 }
4402 
4403 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4404                                                  const DataLayout &DL,
4405                                                  AssumptionCache *AC,
4406                                                  const Instruction *CxtI,
4407                                                  const DominatorTree *DT) {
4408   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4409                                        Add, DL, AC, CxtI, DT);
4410 }
4411 
4412 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4413                                                  const Value *RHS,
4414                                                  const DataLayout &DL,
4415                                                  AssumptionCache *AC,
4416                                                  const Instruction *CxtI,
4417                                                  const DominatorTree *DT) {
4418   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4419 }
4420 
4421 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4422   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
4423   // of time because it's possible for another thread to interfere with it for an
4424   // arbitrary length of time, but programs aren't allowed to rely on that.
4425 
4426   // If there is no successor, then execution can't transfer to it.
4427   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4428     return !CRI->unwindsToCaller();
4429   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4430     return !CatchSwitch->unwindsToCaller();
4431   if (isa<ResumeInst>(I))
4432     return false;
4433   if (isa<ReturnInst>(I))
4434     return false;
4435   if (isa<UnreachableInst>(I))
4436     return false;
4437 
4438   // Calls can throw, or contain an infinite loop, or kill the process.
4439   if (auto CS = ImmutableCallSite(I)) {
4440     // Call sites that throw have implicit non-local control flow.
4441     if (!CS.doesNotThrow())
4442       return false;
4443 
4444     // A function which doens't throw and has "willreturn" attribute will
4445     // always return.
4446     if (CS.hasFnAttr(Attribute::WillReturn))
4447       return true;
4448 
4449     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4450     // etc. and thus not return.  However, LLVM already assumes that
4451     //
4452     //  - Thread exiting actions are modeled as writes to memory invisible to
4453     //    the program.
4454     //
4455     //  - Loops that don't have side effects (side effects are volatile/atomic
4456     //    stores and IO) always terminate (see http://llvm.org/PR965).
4457     //    Furthermore IO itself is also modeled as writes to memory invisible to
4458     //    the program.
4459     //
4460     // We rely on those assumptions here, and use the memory effects of the call
4461     // target as a proxy for checking that it always returns.
4462 
4463     // FIXME: This isn't aggressive enough; a call which only writes to a global
4464     // is guaranteed to return.
4465     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory();
4466   }
4467 
4468   // Other instructions return normally.
4469   return true;
4470 }
4471 
4472 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4473   // TODO: This is slightly conservative for invoke instruction since exiting
4474   // via an exception *is* normal control for them.
4475   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4476     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4477       return false;
4478   return true;
4479 }
4480 
4481 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
4482                                                   const Loop *L) {
4483   // The loop header is guaranteed to be executed for every iteration.
4484   //
4485   // FIXME: Relax this constraint to cover all basic blocks that are
4486   // guaranteed to be executed at every iteration.
4487   if (I->getParent() != L->getHeader()) return false;
4488 
4489   for (const Instruction &LI : *L->getHeader()) {
4490     if (&LI == I) return true;
4491     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4492   }
4493   llvm_unreachable("Instruction not contained in its own parent basic block.");
4494 }
4495 
4496 bool llvm::propagatesFullPoison(const Instruction *I) {
4497   // TODO: This should include all instructions apart from phis, selects and
4498   // call-like instructions.
4499   switch (I->getOpcode()) {
4500   case Instruction::Add:
4501   case Instruction::Sub:
4502   case Instruction::Xor:
4503   case Instruction::Trunc:
4504   case Instruction::BitCast:
4505   case Instruction::AddrSpaceCast:
4506   case Instruction::Mul:
4507   case Instruction::Shl:
4508   case Instruction::GetElementPtr:
4509     // These operations all propagate poison unconditionally. Note that poison
4510     // is not any particular value, so xor or subtraction of poison with
4511     // itself still yields poison, not zero.
4512     return true;
4513 
4514   case Instruction::AShr:
4515   case Instruction::SExt:
4516     // For these operations, one bit of the input is replicated across
4517     // multiple output bits. A replicated poison bit is still poison.
4518     return true;
4519 
4520   case Instruction::ICmp:
4521     // Comparing poison with any value yields poison.  This is why, for
4522     // instance, x s< (x +nsw 1) can be folded to true.
4523     return true;
4524 
4525   default:
4526     return false;
4527   }
4528 }
4529 
4530 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4531   switch (I->getOpcode()) {
4532     case Instruction::Store:
4533       return cast<StoreInst>(I)->getPointerOperand();
4534 
4535     case Instruction::Load:
4536       return cast<LoadInst>(I)->getPointerOperand();
4537 
4538     case Instruction::AtomicCmpXchg:
4539       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4540 
4541     case Instruction::AtomicRMW:
4542       return cast<AtomicRMWInst>(I)->getPointerOperand();
4543 
4544     case Instruction::UDiv:
4545     case Instruction::SDiv:
4546     case Instruction::URem:
4547     case Instruction::SRem:
4548       return I->getOperand(1);
4549 
4550     default:
4551       // Note: It's really tempting to think that a conditional branch or
4552       // switch should be listed here, but that's incorrect.  It's not
4553       // branching off of poison which is UB, it is executing a side effecting
4554       // instruction which follows the branch.
4555       return nullptr;
4556   }
4557 }
4558 
4559 bool llvm::mustTriggerUB(const Instruction *I,
4560                          const SmallSet<const Value *, 16>& KnownPoison) {
4561   auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
4562   return (NotPoison && KnownPoison.count(NotPoison));
4563 }
4564 
4565 
4566 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4567   // We currently only look for uses of poison values within the same basic
4568   // block, as that makes it easier to guarantee that the uses will be
4569   // executed given that PoisonI is executed.
4570   //
4571   // FIXME: Expand this to consider uses beyond the same basic block. To do
4572   // this, look out for the distinction between post-dominance and strong
4573   // post-dominance.
4574   const BasicBlock *BB = PoisonI->getParent();
4575 
4576   // Set of instructions that we have proved will yield poison if PoisonI
4577   // does.
4578   SmallSet<const Value *, 16> YieldsPoison;
4579   SmallSet<const BasicBlock *, 4> Visited;
4580   YieldsPoison.insert(PoisonI);
4581   Visited.insert(PoisonI->getParent());
4582 
4583   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4584 
4585   unsigned Iter = 0;
4586   while (Iter++ < MaxDepth) {
4587     for (auto &I : make_range(Begin, End)) {
4588       if (&I != PoisonI) {
4589         if (mustTriggerUB(&I, YieldsPoison))
4590           return true;
4591         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4592           return false;
4593       }
4594 
4595       // Mark poison that propagates from I through uses of I.
4596       if (YieldsPoison.count(&I)) {
4597         for (const User *User : I.users()) {
4598           const Instruction *UserI = cast<Instruction>(User);
4599           if (propagatesFullPoison(UserI))
4600             YieldsPoison.insert(User);
4601         }
4602       }
4603     }
4604 
4605     if (auto *NextBB = BB->getSingleSuccessor()) {
4606       if (Visited.insert(NextBB).second) {
4607         BB = NextBB;
4608         Begin = BB->getFirstNonPHI()->getIterator();
4609         End = BB->end();
4610         continue;
4611       }
4612     }
4613 
4614     break;
4615   }
4616   return false;
4617 }
4618 
4619 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4620   if (FMF.noNaNs())
4621     return true;
4622 
4623   if (auto *C = dyn_cast<ConstantFP>(V))
4624     return !C->isNaN();
4625 
4626   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4627     if (!C->getElementType()->isFloatingPointTy())
4628       return false;
4629     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4630       if (C->getElementAsAPFloat(I).isNaN())
4631         return false;
4632     }
4633     return true;
4634   }
4635 
4636   return false;
4637 }
4638 
4639 static bool isKnownNonZero(const Value *V) {
4640   if (auto *C = dyn_cast<ConstantFP>(V))
4641     return !C->isZero();
4642 
4643   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4644     if (!C->getElementType()->isFloatingPointTy())
4645       return false;
4646     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4647       if (C->getElementAsAPFloat(I).isZero())
4648         return false;
4649     }
4650     return true;
4651   }
4652 
4653   return false;
4654 }
4655 
4656 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4657 /// Given non-min/max outer cmp/select from the clamp pattern this
4658 /// function recognizes if it can be substitued by a "canonical" min/max
4659 /// pattern.
4660 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4661                                                Value *CmpLHS, Value *CmpRHS,
4662                                                Value *TrueVal, Value *FalseVal,
4663                                                Value *&LHS, Value *&RHS) {
4664   // Try to match
4665   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4666   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4667   // and return description of the outer Max/Min.
4668 
4669   // First, check if select has inverse order:
4670   if (CmpRHS == FalseVal) {
4671     std::swap(TrueVal, FalseVal);
4672     Pred = CmpInst::getInversePredicate(Pred);
4673   }
4674 
4675   // Assume success now. If there's no match, callers should not use these anyway.
4676   LHS = TrueVal;
4677   RHS = FalseVal;
4678 
4679   const APFloat *FC1;
4680   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4681     return {SPF_UNKNOWN, SPNB_NA, false};
4682 
4683   const APFloat *FC2;
4684   switch (Pred) {
4685   case CmpInst::FCMP_OLT:
4686   case CmpInst::FCMP_OLE:
4687   case CmpInst::FCMP_ULT:
4688   case CmpInst::FCMP_ULE:
4689     if (match(FalseVal,
4690               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4691                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4692         FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4693       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4694     break;
4695   case CmpInst::FCMP_OGT:
4696   case CmpInst::FCMP_OGE:
4697   case CmpInst::FCMP_UGT:
4698   case CmpInst::FCMP_UGE:
4699     if (match(FalseVal,
4700               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4701                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4702         FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4703       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4704     break;
4705   default:
4706     break;
4707   }
4708 
4709   return {SPF_UNKNOWN, SPNB_NA, false};
4710 }
4711 
4712 /// Recognize variations of:
4713 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4714 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4715                                       Value *CmpLHS, Value *CmpRHS,
4716                                       Value *TrueVal, Value *FalseVal) {
4717   // Swap the select operands and predicate to match the patterns below.
4718   if (CmpRHS != TrueVal) {
4719     Pred = ICmpInst::getSwappedPredicate(Pred);
4720     std::swap(TrueVal, FalseVal);
4721   }
4722   const APInt *C1;
4723   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4724     const APInt *C2;
4725     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4726     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4727         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4728       return {SPF_SMAX, SPNB_NA, false};
4729 
4730     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4731     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4732         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4733       return {SPF_SMIN, SPNB_NA, false};
4734 
4735     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4736     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4737         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4738       return {SPF_UMAX, SPNB_NA, false};
4739 
4740     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4741     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4742         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4743       return {SPF_UMIN, SPNB_NA, false};
4744   }
4745   return {SPF_UNKNOWN, SPNB_NA, false};
4746 }
4747 
4748 /// Recognize variations of:
4749 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4750 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4751                                                Value *CmpLHS, Value *CmpRHS,
4752                                                Value *TVal, Value *FVal,
4753                                                unsigned Depth) {
4754   // TODO: Allow FP min/max with nnan/nsz.
4755   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4756 
4757   Value *A = nullptr, *B = nullptr;
4758   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4759   if (!SelectPatternResult::isMinOrMax(L.Flavor))
4760     return {SPF_UNKNOWN, SPNB_NA, false};
4761 
4762   Value *C = nullptr, *D = nullptr;
4763   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4764   if (L.Flavor != R.Flavor)
4765     return {SPF_UNKNOWN, SPNB_NA, false};
4766 
4767   // We have something like: x Pred y ? min(a, b) : min(c, d).
4768   // Try to match the compare to the min/max operations of the select operands.
4769   // First, make sure we have the right compare predicate.
4770   switch (L.Flavor) {
4771   case SPF_SMIN:
4772     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4773       Pred = ICmpInst::getSwappedPredicate(Pred);
4774       std::swap(CmpLHS, CmpRHS);
4775     }
4776     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4777       break;
4778     return {SPF_UNKNOWN, SPNB_NA, false};
4779   case SPF_SMAX:
4780     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4781       Pred = ICmpInst::getSwappedPredicate(Pred);
4782       std::swap(CmpLHS, CmpRHS);
4783     }
4784     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4785       break;
4786     return {SPF_UNKNOWN, SPNB_NA, false};
4787   case SPF_UMIN:
4788     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4789       Pred = ICmpInst::getSwappedPredicate(Pred);
4790       std::swap(CmpLHS, CmpRHS);
4791     }
4792     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4793       break;
4794     return {SPF_UNKNOWN, SPNB_NA, false};
4795   case SPF_UMAX:
4796     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4797       Pred = ICmpInst::getSwappedPredicate(Pred);
4798       std::swap(CmpLHS, CmpRHS);
4799     }
4800     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4801       break;
4802     return {SPF_UNKNOWN, SPNB_NA, false};
4803   default:
4804     return {SPF_UNKNOWN, SPNB_NA, false};
4805   }
4806 
4807   // If there is a common operand in the already matched min/max and the other
4808   // min/max operands match the compare operands (either directly or inverted),
4809   // then this is min/max of the same flavor.
4810 
4811   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4812   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4813   if (D == B) {
4814     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4815                                          match(A, m_Not(m_Specific(CmpRHS)))))
4816       return {L.Flavor, SPNB_NA, false};
4817   }
4818   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4819   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4820   if (C == B) {
4821     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4822                                          match(A, m_Not(m_Specific(CmpRHS)))))
4823       return {L.Flavor, SPNB_NA, false};
4824   }
4825   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4826   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4827   if (D == A) {
4828     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4829                                          match(B, m_Not(m_Specific(CmpRHS)))))
4830       return {L.Flavor, SPNB_NA, false};
4831   }
4832   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4833   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4834   if (C == A) {
4835     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4836                                          match(B, m_Not(m_Specific(CmpRHS)))))
4837       return {L.Flavor, SPNB_NA, false};
4838   }
4839 
4840   return {SPF_UNKNOWN, SPNB_NA, false};
4841 }
4842 
4843 /// Match non-obvious integer minimum and maximum sequences.
4844 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4845                                        Value *CmpLHS, Value *CmpRHS,
4846                                        Value *TrueVal, Value *FalseVal,
4847                                        Value *&LHS, Value *&RHS,
4848                                        unsigned Depth) {
4849   // Assume success. If there's no match, callers should not use these anyway.
4850   LHS = TrueVal;
4851   RHS = FalseVal;
4852 
4853   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4854   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4855     return SPR;
4856 
4857   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4858   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4859     return SPR;
4860 
4861   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4862     return {SPF_UNKNOWN, SPNB_NA, false};
4863 
4864   // Z = X -nsw Y
4865   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4866   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4867   if (match(TrueVal, m_Zero()) &&
4868       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4869     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4870 
4871   // Z = X -nsw Y
4872   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4873   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4874   if (match(FalseVal, m_Zero()) &&
4875       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4876     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4877 
4878   const APInt *C1;
4879   if (!match(CmpRHS, m_APInt(C1)))
4880     return {SPF_UNKNOWN, SPNB_NA, false};
4881 
4882   // An unsigned min/max can be written with a signed compare.
4883   const APInt *C2;
4884   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4885       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4886     // Is the sign bit set?
4887     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4888     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4889     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4890         C2->isMaxSignedValue())
4891       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4892 
4893     // Is the sign bit clear?
4894     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4895     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4896     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4897         C2->isMinSignedValue())
4898       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4899   }
4900 
4901   // Look through 'not' ops to find disguised signed min/max.
4902   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4903   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4904   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4905       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4906     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4907 
4908   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4909   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4910   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4911       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4912     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4913 
4914   return {SPF_UNKNOWN, SPNB_NA, false};
4915 }
4916 
4917 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4918   assert(X && Y && "Invalid operand");
4919 
4920   // X = sub (0, Y) || X = sub nsw (0, Y)
4921   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4922       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4923     return true;
4924 
4925   // Y = sub (0, X) || Y = sub nsw (0, X)
4926   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4927       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4928     return true;
4929 
4930   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4931   Value *A, *B;
4932   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4933                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4934          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4935                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4936 }
4937 
4938 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4939                                               FastMathFlags FMF,
4940                                               Value *CmpLHS, Value *CmpRHS,
4941                                               Value *TrueVal, Value *FalseVal,
4942                                               Value *&LHS, Value *&RHS,
4943                                               unsigned Depth) {
4944   if (CmpInst::isFPPredicate(Pred)) {
4945     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4946     // 0.0 operand, set the compare's 0.0 operands to that same value for the
4947     // purpose of identifying min/max. Disregard vector constants with undefined
4948     // elements because those can not be back-propagated for analysis.
4949     Value *OutputZeroVal = nullptr;
4950     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4951         !cast<Constant>(TrueVal)->containsUndefElement())
4952       OutputZeroVal = TrueVal;
4953     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4954              !cast<Constant>(FalseVal)->containsUndefElement())
4955       OutputZeroVal = FalseVal;
4956 
4957     if (OutputZeroVal) {
4958       if (match(CmpLHS, m_AnyZeroFP()))
4959         CmpLHS = OutputZeroVal;
4960       if (match(CmpRHS, m_AnyZeroFP()))
4961         CmpRHS = OutputZeroVal;
4962     }
4963   }
4964 
4965   LHS = CmpLHS;
4966   RHS = CmpRHS;
4967 
4968   // Signed zero may return inconsistent results between implementations.
4969   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4970   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4971   // Therefore, we behave conservatively and only proceed if at least one of the
4972   // operands is known to not be zero or if we don't care about signed zero.
4973   switch (Pred) {
4974   default: break;
4975   // FIXME: Include OGT/OLT/UGT/ULT.
4976   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4977   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4978     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4979         !isKnownNonZero(CmpRHS))
4980       return {SPF_UNKNOWN, SPNB_NA, false};
4981   }
4982 
4983   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4984   bool Ordered = false;
4985 
4986   // When given one NaN and one non-NaN input:
4987   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4988   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4989   //     ordered comparison fails), which could be NaN or non-NaN.
4990   // so here we discover exactly what NaN behavior is required/accepted.
4991   if (CmpInst::isFPPredicate(Pred)) {
4992     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4993     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4994 
4995     if (LHSSafe && RHSSafe) {
4996       // Both operands are known non-NaN.
4997       NaNBehavior = SPNB_RETURNS_ANY;
4998     } else if (CmpInst::isOrdered(Pred)) {
4999       // An ordered comparison will return false when given a NaN, so it
5000       // returns the RHS.
5001       Ordered = true;
5002       if (LHSSafe)
5003         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5004         NaNBehavior = SPNB_RETURNS_NAN;
5005       else if (RHSSafe)
5006         NaNBehavior = SPNB_RETURNS_OTHER;
5007       else
5008         // Completely unsafe.
5009         return {SPF_UNKNOWN, SPNB_NA, false};
5010     } else {
5011       Ordered = false;
5012       // An unordered comparison will return true when given a NaN, so it
5013       // returns the LHS.
5014       if (LHSSafe)
5015         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5016         NaNBehavior = SPNB_RETURNS_OTHER;
5017       else if (RHSSafe)
5018         NaNBehavior = SPNB_RETURNS_NAN;
5019       else
5020         // Completely unsafe.
5021         return {SPF_UNKNOWN, SPNB_NA, false};
5022     }
5023   }
5024 
5025   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5026     std::swap(CmpLHS, CmpRHS);
5027     Pred = CmpInst::getSwappedPredicate(Pred);
5028     if (NaNBehavior == SPNB_RETURNS_NAN)
5029       NaNBehavior = SPNB_RETURNS_OTHER;
5030     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5031       NaNBehavior = SPNB_RETURNS_NAN;
5032     Ordered = !Ordered;
5033   }
5034 
5035   // ([if]cmp X, Y) ? X : Y
5036   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5037     switch (Pred) {
5038     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5039     case ICmpInst::ICMP_UGT:
5040     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5041     case ICmpInst::ICMP_SGT:
5042     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5043     case ICmpInst::ICMP_ULT:
5044     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5045     case ICmpInst::ICMP_SLT:
5046     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5047     case FCmpInst::FCMP_UGT:
5048     case FCmpInst::FCMP_UGE:
5049     case FCmpInst::FCMP_OGT:
5050     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5051     case FCmpInst::FCMP_ULT:
5052     case FCmpInst::FCMP_ULE:
5053     case FCmpInst::FCMP_OLT:
5054     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5055     }
5056   }
5057 
5058   if (isKnownNegation(TrueVal, FalseVal)) {
5059     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5060     // match against either LHS or sext(LHS).
5061     auto MaybeSExtCmpLHS =
5062         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5063     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5064     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5065     if (match(TrueVal, MaybeSExtCmpLHS)) {
5066       // Set the return values. If the compare uses the negated value (-X >s 0),
5067       // swap the return values because the negated value is always 'RHS'.
5068       LHS = TrueVal;
5069       RHS = FalseVal;
5070       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5071         std::swap(LHS, RHS);
5072 
5073       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5074       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5075       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5076         return {SPF_ABS, SPNB_NA, false};
5077 
5078       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5079       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5080         return {SPF_ABS, SPNB_NA, false};
5081 
5082       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5083       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5084       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5085         return {SPF_NABS, SPNB_NA, false};
5086     }
5087     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5088       // Set the return values. If the compare uses the negated value (-X >s 0),
5089       // swap the return values because the negated value is always 'RHS'.
5090       LHS = FalseVal;
5091       RHS = TrueVal;
5092       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5093         std::swap(LHS, RHS);
5094 
5095       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5096       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5097       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5098         return {SPF_NABS, SPNB_NA, false};
5099 
5100       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5101       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5102       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5103         return {SPF_ABS, SPNB_NA, false};
5104     }
5105   }
5106 
5107   if (CmpInst::isIntPredicate(Pred))
5108     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5109 
5110   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5111   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5112   // semantics than minNum. Be conservative in such case.
5113   if (NaNBehavior != SPNB_RETURNS_ANY ||
5114       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5115        !isKnownNonZero(CmpRHS)))
5116     return {SPF_UNKNOWN, SPNB_NA, false};
5117 
5118   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5119 }
5120 
5121 /// Helps to match a select pattern in case of a type mismatch.
5122 ///
5123 /// The function processes the case when type of true and false values of a
5124 /// select instruction differs from type of the cmp instruction operands because
5125 /// of a cast instruction. The function checks if it is legal to move the cast
5126 /// operation after "select". If yes, it returns the new second value of
5127 /// "select" (with the assumption that cast is moved):
5128 /// 1. As operand of cast instruction when both values of "select" are same cast
5129 /// instructions.
5130 /// 2. As restored constant (by applying reverse cast operation) when the first
5131 /// value of the "select" is a cast operation and the second value is a
5132 /// constant.
5133 /// NOTE: We return only the new second value because the first value could be
5134 /// accessed as operand of cast instruction.
5135 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5136                               Instruction::CastOps *CastOp) {
5137   auto *Cast1 = dyn_cast<CastInst>(V1);
5138   if (!Cast1)
5139     return nullptr;
5140 
5141   *CastOp = Cast1->getOpcode();
5142   Type *SrcTy = Cast1->getSrcTy();
5143   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5144     // If V1 and V2 are both the same cast from the same type, look through V1.
5145     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5146       return Cast2->getOperand(0);
5147     return nullptr;
5148   }
5149 
5150   auto *C = dyn_cast<Constant>(V2);
5151   if (!C)
5152     return nullptr;
5153 
5154   Constant *CastedTo = nullptr;
5155   switch (*CastOp) {
5156   case Instruction::ZExt:
5157     if (CmpI->isUnsigned())
5158       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5159     break;
5160   case Instruction::SExt:
5161     if (CmpI->isSigned())
5162       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5163     break;
5164   case Instruction::Trunc:
5165     Constant *CmpConst;
5166     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5167         CmpConst->getType() == SrcTy) {
5168       // Here we have the following case:
5169       //
5170       //   %cond = cmp iN %x, CmpConst
5171       //   %tr = trunc iN %x to iK
5172       //   %narrowsel = select i1 %cond, iK %t, iK C
5173       //
5174       // We can always move trunc after select operation:
5175       //
5176       //   %cond = cmp iN %x, CmpConst
5177       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5178       //   %tr = trunc iN %widesel to iK
5179       //
5180       // Note that C could be extended in any way because we don't care about
5181       // upper bits after truncation. It can't be abs pattern, because it would
5182       // look like:
5183       //
5184       //   select i1 %cond, x, -x.
5185       //
5186       // So only min/max pattern could be matched. Such match requires widened C
5187       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5188       // CmpConst == C is checked below.
5189       CastedTo = CmpConst;
5190     } else {
5191       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5192     }
5193     break;
5194   case Instruction::FPTrunc:
5195     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5196     break;
5197   case Instruction::FPExt:
5198     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5199     break;
5200   case Instruction::FPToUI:
5201     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5202     break;
5203   case Instruction::FPToSI:
5204     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5205     break;
5206   case Instruction::UIToFP:
5207     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5208     break;
5209   case Instruction::SIToFP:
5210     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5211     break;
5212   default:
5213     break;
5214   }
5215 
5216   if (!CastedTo)
5217     return nullptr;
5218 
5219   // Make sure the cast doesn't lose any information.
5220   Constant *CastedBack =
5221       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5222   if (CastedBack != C)
5223     return nullptr;
5224 
5225   return CastedTo;
5226 }
5227 
5228 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5229                                              Instruction::CastOps *CastOp,
5230                                              unsigned Depth) {
5231   if (Depth >= MaxDepth)
5232     return {SPF_UNKNOWN, SPNB_NA, false};
5233 
5234   SelectInst *SI = dyn_cast<SelectInst>(V);
5235   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5236 
5237   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5238   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5239 
5240   Value *TrueVal = SI->getTrueValue();
5241   Value *FalseVal = SI->getFalseValue();
5242 
5243   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5244                                             CastOp, Depth);
5245 }
5246 
5247 SelectPatternResult llvm::matchDecomposedSelectPattern(
5248     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5249     Instruction::CastOps *CastOp, unsigned Depth) {
5250   CmpInst::Predicate Pred = CmpI->getPredicate();
5251   Value *CmpLHS = CmpI->getOperand(0);
5252   Value *CmpRHS = CmpI->getOperand(1);
5253   FastMathFlags FMF;
5254   if (isa<FPMathOperator>(CmpI))
5255     FMF = CmpI->getFastMathFlags();
5256 
5257   // Bail out early.
5258   if (CmpI->isEquality())
5259     return {SPF_UNKNOWN, SPNB_NA, false};
5260 
5261   // Deal with type mismatches.
5262   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5263     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5264       // If this is a potential fmin/fmax with a cast to integer, then ignore
5265       // -0.0 because there is no corresponding integer value.
5266       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5267         FMF.setNoSignedZeros();
5268       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5269                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5270                                   LHS, RHS, Depth);
5271     }
5272     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5273       // If this is a potential fmin/fmax with a cast to integer, then ignore
5274       // -0.0 because there is no corresponding integer value.
5275       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5276         FMF.setNoSignedZeros();
5277       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5278                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5279                                   LHS, RHS, Depth);
5280     }
5281   }
5282   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5283                               LHS, RHS, Depth);
5284 }
5285 
5286 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5287   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5288   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5289   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5290   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5291   if (SPF == SPF_FMINNUM)
5292     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5293   if (SPF == SPF_FMAXNUM)
5294     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5295   llvm_unreachable("unhandled!");
5296 }
5297 
5298 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5299   if (SPF == SPF_SMIN) return SPF_SMAX;
5300   if (SPF == SPF_UMIN) return SPF_UMAX;
5301   if (SPF == SPF_SMAX) return SPF_SMIN;
5302   if (SPF == SPF_UMAX) return SPF_UMIN;
5303   llvm_unreachable("unhandled!");
5304 }
5305 
5306 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5307   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5308 }
5309 
5310 /// Return true if "icmp Pred LHS RHS" is always true.
5311 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5312                             const Value *RHS, const DataLayout &DL,
5313                             unsigned Depth) {
5314   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5315   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5316     return true;
5317 
5318   switch (Pred) {
5319   default:
5320     return false;
5321 
5322   case CmpInst::ICMP_SLE: {
5323     const APInt *C;
5324 
5325     // LHS s<= LHS +_{nsw} C   if C >= 0
5326     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5327       return !C->isNegative();
5328     return false;
5329   }
5330 
5331   case CmpInst::ICMP_ULE: {
5332     const APInt *C;
5333 
5334     // LHS u<= LHS +_{nuw} C   for any C
5335     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5336       return true;
5337 
5338     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5339     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5340                                        const Value *&X,
5341                                        const APInt *&CA, const APInt *&CB) {
5342       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5343           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5344         return true;
5345 
5346       // If X & C == 0 then (X | C) == X +_{nuw} C
5347       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5348           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5349         KnownBits Known(CA->getBitWidth());
5350         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5351                          /*CxtI*/ nullptr, /*DT*/ nullptr);
5352         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5353           return true;
5354       }
5355 
5356       return false;
5357     };
5358 
5359     const Value *X;
5360     const APInt *CLHS, *CRHS;
5361     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5362       return CLHS->ule(*CRHS);
5363 
5364     return false;
5365   }
5366   }
5367 }
5368 
5369 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5370 /// ALHS ARHS" is true.  Otherwise, return None.
5371 static Optional<bool>
5372 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5373                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
5374                       const DataLayout &DL, unsigned Depth) {
5375   switch (Pred) {
5376   default:
5377     return None;
5378 
5379   case CmpInst::ICMP_SLT:
5380   case CmpInst::ICMP_SLE:
5381     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5382         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5383       return true;
5384     return None;
5385 
5386   case CmpInst::ICMP_ULT:
5387   case CmpInst::ICMP_ULE:
5388     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5389         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5390       return true;
5391     return None;
5392   }
5393 }
5394 
5395 /// Return true if the operands of the two compares match.  IsSwappedOps is true
5396 /// when the operands match, but are swapped.
5397 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5398                           const Value *BLHS, const Value *BRHS,
5399                           bool &IsSwappedOps) {
5400 
5401   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5402   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5403   return IsMatchingOps || IsSwappedOps;
5404 }
5405 
5406 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
5407 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
5408 /// Otherwise, return None if we can't infer anything.
5409 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5410                                                     CmpInst::Predicate BPred,
5411                                                     bool AreSwappedOps) {
5412   // Canonicalize the predicate as if the operands were not commuted.
5413   if (AreSwappedOps)
5414     BPred = ICmpInst::getSwappedPredicate(BPred);
5415 
5416   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5417     return true;
5418   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5419     return false;
5420 
5421   return None;
5422 }
5423 
5424 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
5425 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
5426 /// Otherwise, return None if we can't infer anything.
5427 static Optional<bool>
5428 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
5429                                  const ConstantInt *C1,
5430                                  CmpInst::Predicate BPred,
5431                                  const ConstantInt *C2) {
5432   ConstantRange DomCR =
5433       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
5434   ConstantRange CR =
5435       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
5436   ConstantRange Intersection = DomCR.intersectWith(CR);
5437   ConstantRange Difference = DomCR.difference(CR);
5438   if (Intersection.isEmptySet())
5439     return false;
5440   if (Difference.isEmptySet())
5441     return true;
5442   return None;
5443 }
5444 
5445 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
5446 /// false.  Otherwise, return None if we can't infer anything.
5447 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
5448                                          const ICmpInst *RHS,
5449                                          const DataLayout &DL, bool LHSIsTrue,
5450                                          unsigned Depth) {
5451   Value *ALHS = LHS->getOperand(0);
5452   Value *ARHS = LHS->getOperand(1);
5453   // The rest of the logic assumes the LHS condition is true.  If that's not the
5454   // case, invert the predicate to make it so.
5455   ICmpInst::Predicate APred =
5456       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5457 
5458   Value *BLHS = RHS->getOperand(0);
5459   Value *BRHS = RHS->getOperand(1);
5460   ICmpInst::Predicate BPred = RHS->getPredicate();
5461 
5462   // Can we infer anything when the two compares have matching operands?
5463   bool AreSwappedOps;
5464   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
5465     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
5466             APred, BPred, AreSwappedOps))
5467       return Implication;
5468     // No amount of additional analysis will infer the second condition, so
5469     // early exit.
5470     return None;
5471   }
5472 
5473   // Can we infer anything when the LHS operands match and the RHS operands are
5474   // constants (not necessarily matching)?
5475   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5476     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
5477             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
5478       return Implication;
5479     // No amount of additional analysis will infer the second condition, so
5480     // early exit.
5481     return None;
5482   }
5483 
5484   if (APred == BPred)
5485     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5486   return None;
5487 }
5488 
5489 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
5490 /// false.  Otherwise, return None if we can't infer anything.  We expect the
5491 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5492 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
5493                                          const ICmpInst *RHS,
5494                                          const DataLayout &DL, bool LHSIsTrue,
5495                                          unsigned Depth) {
5496   // The LHS must be an 'or' or an 'and' instruction.
5497   assert((LHS->getOpcode() == Instruction::And ||
5498           LHS->getOpcode() == Instruction::Or) &&
5499          "Expected LHS to be 'and' or 'or'.");
5500 
5501   assert(Depth <= MaxDepth && "Hit recursion limit");
5502 
5503   // If the result of an 'or' is false, then we know both legs of the 'or' are
5504   // false.  Similarly, if the result of an 'and' is true, then we know both
5505   // legs of the 'and' are true.
5506   Value *ALHS, *ARHS;
5507   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5508       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5509     // FIXME: Make this non-recursion.
5510     if (Optional<bool> Implication =
5511             isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5512       return Implication;
5513     if (Optional<bool> Implication =
5514             isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5515       return Implication;
5516     return None;
5517   }
5518   return None;
5519 }
5520 
5521 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
5522                                         const DataLayout &DL, bool LHSIsTrue,
5523                                         unsigned Depth) {
5524   // Bail out when we hit the limit.
5525   if (Depth == MaxDepth)
5526     return None;
5527 
5528   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5529   // example.
5530   if (LHS->getType() != RHS->getType())
5531     return None;
5532 
5533   Type *OpTy = LHS->getType();
5534   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
5535 
5536   // LHS ==> RHS by definition
5537   if (LHS == RHS)
5538     return LHSIsTrue;
5539 
5540   // FIXME: Extending the code below to handle vectors.
5541   if (OpTy->isVectorTy())
5542     return None;
5543 
5544   assert(OpTy->isIntegerTy(1) && "implied by above");
5545 
5546   // Both LHS and RHS are icmps.
5547   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5548   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5549   if (LHSCmp && RHSCmp)
5550     return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5551 
5552   // The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to be
5553   // an icmp. FIXME: Add support for and/or on the RHS.
5554   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
5555   if (LHSBO && RHSCmp) {
5556     if ((LHSBO->getOpcode() == Instruction::And ||
5557          LHSBO->getOpcode() == Instruction::Or))
5558       return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
5559   }
5560   return None;
5561 }
5562 
5563 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
5564                                              const Instruction *ContextI,
5565                                              const DataLayout &DL) {
5566   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
5567   if (!ContextI || !ContextI->getParent())
5568     return None;
5569 
5570   // TODO: This is a poor/cheap way to determine dominance. Should we use a
5571   // dominator tree (eg, from a SimplifyQuery) instead?
5572   const BasicBlock *ContextBB = ContextI->getParent();
5573   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
5574   if (!PredBB)
5575     return None;
5576 
5577   // We need a conditional branch in the predecessor.
5578   Value *PredCond;
5579   BasicBlock *TrueBB, *FalseBB;
5580   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
5581     return None;
5582 
5583   // The branch should get simplified. Don't bother simplifying this condition.
5584   if (TrueBB == FalseBB)
5585     return None;
5586 
5587   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
5588          "Predecessor block does not point to successor?");
5589 
5590   // Is this condition implied by the predecessor condition?
5591   bool CondIsTrue = TrueBB == ContextBB;
5592   return isImpliedCondition(PredCond, Cond, DL, CondIsTrue);
5593 }
5594 
5595 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
5596                               APInt &Upper, const InstrInfoQuery &IIQ) {
5597   unsigned Width = Lower.getBitWidth();
5598   const APInt *C;
5599   switch (BO.getOpcode()) {
5600   case Instruction::Add:
5601     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5602       // FIXME: If we have both nuw and nsw, we should reduce the range further.
5603       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5604         // 'add nuw x, C' produces [C, UINT_MAX].
5605         Lower = *C;
5606       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5607         if (C->isNegative()) {
5608           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
5609           Lower = APInt::getSignedMinValue(Width);
5610           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5611         } else {
5612           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
5613           Lower = APInt::getSignedMinValue(Width) + *C;
5614           Upper = APInt::getSignedMaxValue(Width) + 1;
5615         }
5616       }
5617     }
5618     break;
5619 
5620   case Instruction::And:
5621     if (match(BO.getOperand(1), m_APInt(C)))
5622       // 'and x, C' produces [0, C].
5623       Upper = *C + 1;
5624     break;
5625 
5626   case Instruction::Or:
5627     if (match(BO.getOperand(1), m_APInt(C)))
5628       // 'or x, C' produces [C, UINT_MAX].
5629       Lower = *C;
5630     break;
5631 
5632   case Instruction::AShr:
5633     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5634       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
5635       Lower = APInt::getSignedMinValue(Width).ashr(*C);
5636       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
5637     } else if (match(BO.getOperand(0), m_APInt(C))) {
5638       unsigned ShiftAmount = Width - 1;
5639       if (!C->isNullValue() && IIQ.isExact(&BO))
5640         ShiftAmount = C->countTrailingZeros();
5641       if (C->isNegative()) {
5642         // 'ashr C, x' produces [C, C >> (Width-1)]
5643         Lower = *C;
5644         Upper = C->ashr(ShiftAmount) + 1;
5645       } else {
5646         // 'ashr C, x' produces [C >> (Width-1), C]
5647         Lower = C->ashr(ShiftAmount);
5648         Upper = *C + 1;
5649       }
5650     }
5651     break;
5652 
5653   case Instruction::LShr:
5654     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5655       // 'lshr x, C' produces [0, UINT_MAX >> C].
5656       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
5657     } else if (match(BO.getOperand(0), m_APInt(C))) {
5658       // 'lshr C, x' produces [C >> (Width-1), C].
5659       unsigned ShiftAmount = Width - 1;
5660       if (!C->isNullValue() && IIQ.isExact(&BO))
5661         ShiftAmount = C->countTrailingZeros();
5662       Lower = C->lshr(ShiftAmount);
5663       Upper = *C + 1;
5664     }
5665     break;
5666 
5667   case Instruction::Shl:
5668     if (match(BO.getOperand(0), m_APInt(C))) {
5669       if (IIQ.hasNoUnsignedWrap(&BO)) {
5670         // 'shl nuw C, x' produces [C, C << CLZ(C)]
5671         Lower = *C;
5672         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
5673       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
5674         if (C->isNegative()) {
5675           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
5676           unsigned ShiftAmount = C->countLeadingOnes() - 1;
5677           Lower = C->shl(ShiftAmount);
5678           Upper = *C + 1;
5679         } else {
5680           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
5681           unsigned ShiftAmount = C->countLeadingZeros() - 1;
5682           Lower = *C;
5683           Upper = C->shl(ShiftAmount) + 1;
5684         }
5685       }
5686     }
5687     break;
5688 
5689   case Instruction::SDiv:
5690     if (match(BO.getOperand(1), m_APInt(C))) {
5691       APInt IntMin = APInt::getSignedMinValue(Width);
5692       APInt IntMax = APInt::getSignedMaxValue(Width);
5693       if (C->isAllOnesValue()) {
5694         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
5695         //    where C != -1 and C != 0 and C != 1
5696         Lower = IntMin + 1;
5697         Upper = IntMax + 1;
5698       } else if (C->countLeadingZeros() < Width - 1) {
5699         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
5700         //    where C != -1 and C != 0 and C != 1
5701         Lower = IntMin.sdiv(*C);
5702         Upper = IntMax.sdiv(*C);
5703         if (Lower.sgt(Upper))
5704           std::swap(Lower, Upper);
5705         Upper = Upper + 1;
5706         assert(Upper != Lower && "Upper part of range has wrapped!");
5707       }
5708     } else if (match(BO.getOperand(0), m_APInt(C))) {
5709       if (C->isMinSignedValue()) {
5710         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
5711         Lower = *C;
5712         Upper = Lower.lshr(1) + 1;
5713       } else {
5714         // 'sdiv C, x' produces [-|C|, |C|].
5715         Upper = C->abs() + 1;
5716         Lower = (-Upper) + 1;
5717       }
5718     }
5719     break;
5720 
5721   case Instruction::UDiv:
5722     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5723       // 'udiv x, C' produces [0, UINT_MAX / C].
5724       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
5725     } else if (match(BO.getOperand(0), m_APInt(C))) {
5726       // 'udiv C, x' produces [0, C].
5727       Upper = *C + 1;
5728     }
5729     break;
5730 
5731   case Instruction::SRem:
5732     if (match(BO.getOperand(1), m_APInt(C))) {
5733       // 'srem x, C' produces (-|C|, |C|).
5734       Upper = C->abs();
5735       Lower = (-Upper) + 1;
5736     }
5737     break;
5738 
5739   case Instruction::URem:
5740     if (match(BO.getOperand(1), m_APInt(C)))
5741       // 'urem x, C' produces [0, C).
5742       Upper = *C;
5743     break;
5744 
5745   default:
5746     break;
5747   }
5748 }
5749 
5750 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
5751                                   APInt &Upper) {
5752   unsigned Width = Lower.getBitWidth();
5753   const APInt *C;
5754   switch (II.getIntrinsicID()) {
5755   case Intrinsic::uadd_sat:
5756     // uadd.sat(x, C) produces [C, UINT_MAX].
5757     if (match(II.getOperand(0), m_APInt(C)) ||
5758         match(II.getOperand(1), m_APInt(C)))
5759       Lower = *C;
5760     break;
5761   case Intrinsic::sadd_sat:
5762     if (match(II.getOperand(0), m_APInt(C)) ||
5763         match(II.getOperand(1), m_APInt(C))) {
5764       if (C->isNegative()) {
5765         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
5766         Lower = APInt::getSignedMinValue(Width);
5767         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5768       } else {
5769         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
5770         Lower = APInt::getSignedMinValue(Width) + *C;
5771         Upper = APInt::getSignedMaxValue(Width) + 1;
5772       }
5773     }
5774     break;
5775   case Intrinsic::usub_sat:
5776     // usub.sat(C, x) produces [0, C].
5777     if (match(II.getOperand(0), m_APInt(C)))
5778       Upper = *C + 1;
5779     // usub.sat(x, C) produces [0, UINT_MAX - C].
5780     else if (match(II.getOperand(1), m_APInt(C)))
5781       Upper = APInt::getMaxValue(Width) - *C + 1;
5782     break;
5783   case Intrinsic::ssub_sat:
5784     if (match(II.getOperand(0), m_APInt(C))) {
5785       if (C->isNegative()) {
5786         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
5787         Lower = APInt::getSignedMinValue(Width);
5788         Upper = *C - APInt::getSignedMinValue(Width) + 1;
5789       } else {
5790         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
5791         Lower = *C - APInt::getSignedMaxValue(Width);
5792         Upper = APInt::getSignedMaxValue(Width) + 1;
5793       }
5794     } else if (match(II.getOperand(1), m_APInt(C))) {
5795       if (C->isNegative()) {
5796         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
5797         Lower = APInt::getSignedMinValue(Width) - *C;
5798         Upper = APInt::getSignedMaxValue(Width) + 1;
5799       } else {
5800         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
5801         Lower = APInt::getSignedMinValue(Width);
5802         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
5803       }
5804     }
5805     break;
5806   default:
5807     break;
5808   }
5809 }
5810 
5811 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
5812                                       APInt &Upper, const InstrInfoQuery &IIQ) {
5813   const Value *LHS = nullptr, *RHS = nullptr;
5814   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
5815   if (R.Flavor == SPF_UNKNOWN)
5816     return;
5817 
5818   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
5819 
5820   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
5821     // If the negation part of the abs (in RHS) has the NSW flag,
5822     // then the result of abs(X) is [0..SIGNED_MAX],
5823     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
5824     Lower = APInt::getNullValue(BitWidth);
5825     if (match(RHS, m_Neg(m_Specific(LHS))) &&
5826         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
5827       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5828     else
5829       Upper = APInt::getSignedMinValue(BitWidth) + 1;
5830     return;
5831   }
5832 
5833   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
5834     // The result of -abs(X) is <= 0.
5835     Lower = APInt::getSignedMinValue(BitWidth);
5836     Upper = APInt(BitWidth, 1);
5837     return;
5838   }
5839 
5840   const APInt *C;
5841   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
5842     return;
5843 
5844   switch (R.Flavor) {
5845     case SPF_UMIN:
5846       Upper = *C + 1;
5847       break;
5848     case SPF_UMAX:
5849       Lower = *C;
5850       break;
5851     case SPF_SMIN:
5852       Lower = APInt::getSignedMinValue(BitWidth);
5853       Upper = *C + 1;
5854       break;
5855     case SPF_SMAX:
5856       Lower = *C;
5857       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5858       break;
5859     default:
5860       break;
5861   }
5862 }
5863 
5864 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
5865   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
5866 
5867   const APInt *C;
5868   if (match(V, m_APInt(C)))
5869     return ConstantRange(*C);
5870 
5871   InstrInfoQuery IIQ(UseInstrInfo);
5872   unsigned BitWidth = V->getType()->getScalarSizeInBits();
5873   APInt Lower = APInt(BitWidth, 0);
5874   APInt Upper = APInt(BitWidth, 0);
5875   if (auto *BO = dyn_cast<BinaryOperator>(V))
5876     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
5877   else if (auto *II = dyn_cast<IntrinsicInst>(V))
5878     setLimitsForIntrinsic(*II, Lower, Upper);
5879   else if (auto *SI = dyn_cast<SelectInst>(V))
5880     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
5881 
5882   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
5883 
5884   if (auto *I = dyn_cast<Instruction>(V))
5885     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
5886       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
5887 
5888   return CR;
5889 }
5890 
5891 static Optional<int64_t>
5892 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
5893   // Skip over the first indices.
5894   gep_type_iterator GTI = gep_type_begin(GEP);
5895   for (unsigned i = 1; i != Idx; ++i, ++GTI)
5896     /*skip along*/;
5897 
5898   // Compute the offset implied by the rest of the indices.
5899   int64_t Offset = 0;
5900   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
5901     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
5902     if (!OpC)
5903       return None;
5904     if (OpC->isZero())
5905       continue; // No offset.
5906 
5907     // Handle struct indices, which add their field offset to the pointer.
5908     if (StructType *STy = GTI.getStructTypeOrNull()) {
5909       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
5910       continue;
5911     }
5912 
5913     // Otherwise, we have a sequential type like an array or vector.  Multiply
5914     // the index by the ElementSize.
5915     uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
5916     Offset += Size * OpC->getSExtValue();
5917   }
5918 
5919   return Offset;
5920 }
5921 
5922 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
5923                                         const DataLayout &DL) {
5924   Ptr1 = Ptr1->stripPointerCasts();
5925   Ptr2 = Ptr2->stripPointerCasts();
5926 
5927   // Handle the trivial case first.
5928   if (Ptr1 == Ptr2) {
5929     return 0;
5930   }
5931 
5932   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
5933   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
5934 
5935   // If one pointer is a GEP see if the GEP is a constant offset from the base,
5936   // as in "P" and "gep P, 1".
5937   // Also do this iteratively to handle the the following case:
5938   //   Ptr_t1 = GEP Ptr1, c1
5939   //   Ptr_t2 = GEP Ptr_t1, c2
5940   //   Ptr2 = GEP Ptr_t2, c3
5941   // where we will return c1+c2+c3.
5942   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
5943   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
5944   // are the same, and return the difference between offsets.
5945   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
5946                                  const Value *Ptr) -> Optional<int64_t> {
5947     const GEPOperator *GEP_T = GEP;
5948     int64_t OffsetVal = 0;
5949     bool HasSameBase = false;
5950     while (GEP_T) {
5951       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
5952       if (!Offset)
5953         return None;
5954       OffsetVal += *Offset;
5955       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
5956       if (Op0 == Ptr) {
5957         HasSameBase = true;
5958         break;
5959       }
5960       GEP_T = dyn_cast<GEPOperator>(Op0);
5961     }
5962     if (!HasSameBase)
5963       return None;
5964     return OffsetVal;
5965   };
5966 
5967   if (GEP1) {
5968     auto Offset = getOffsetFromBase(GEP1, Ptr2);
5969     if (Offset)
5970       return -*Offset;
5971   }
5972   if (GEP2) {
5973     auto Offset = getOffsetFromBase(GEP2, Ptr1);
5974     if (Offset)
5975       return Offset;
5976   }
5977 
5978   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
5979   // base.  After that base, they may have some number of common (and
5980   // potentially variable) indices.  After that they handle some constant
5981   // offset, which determines their offset from each other.  At this point, we
5982   // handle no other case.
5983   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
5984     return None;
5985 
5986   // Skip any common indices and track the GEP types.
5987   unsigned Idx = 1;
5988   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
5989     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
5990       break;
5991 
5992   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
5993   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
5994   if (!Offset1 || !Offset2)
5995     return None;
5996   return *Offset2 - *Offset1;
5997 }
5998