1 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
15 #define LLVM_ANALYSIS_VALUETRACKING_H
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/Analysis/SimplifyQuery.h"
19 #include "llvm/Analysis/WithCache.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/FMF.h"
23 #include "llvm/IR/InstrTypes.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include <cassert>
26 #include <cstdint>
27 
28 namespace llvm {
29 
30 class Operator;
31 class AddOperator;
32 class AllocaInst;
33 class APInt;
34 class AssumptionCache;
35 class DominatorTree;
36 class GEPOperator;
37 class LoadInst;
38 class WithOverflowInst;
39 struct KnownBits;
40 class Loop;
41 class LoopInfo;
42 class MDNode;
43 class StringRef;
44 class TargetLibraryInfo;
45 class Value;
46 
47 constexpr unsigned MaxAnalysisRecursionDepth = 6;
48 
49 /// Determine which bits of V are known to be either zero or one and return
50 /// them in the KnownZero/KnownOne bit sets.
51 ///
52 /// This function is defined on values with integer type, values with pointer
53 /// type, and vectors of integers.  In the case
54 /// where V is a vector, the known zero and known one values are the
55 /// same width as the vector element, and the bit is set only if it is true
56 /// for all of the elements in the vector.
57 void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
58                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
59                       const Instruction *CxtI = nullptr,
60                       const DominatorTree *DT = nullptr,
61                       bool UseInstrInfo = true);
62 
63 /// Returns the known bits rather than passing by reference.
64 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
65                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
66                            const Instruction *CxtI = nullptr,
67                            const DominatorTree *DT = nullptr,
68                            bool UseInstrInfo = true);
69 
70 /// Returns the known bits rather than passing by reference.
71 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
72                            const DataLayout &DL, unsigned Depth = 0,
73                            AssumptionCache *AC = nullptr,
74                            const Instruction *CxtI = nullptr,
75                            const DominatorTree *DT = nullptr,
76                            bool UseInstrInfo = true);
77 
78 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
79                            unsigned Depth, const SimplifyQuery &Q);
80 
81 KnownBits computeKnownBits(const Value *V, unsigned Depth,
82                            const SimplifyQuery &Q);
83 
84 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
85                       const SimplifyQuery &Q);
86 
87 /// Compute known bits from the range metadata.
88 /// \p KnownZero the set of bits that are known to be zero
89 /// \p KnownOne the set of bits that are known to be one
90 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
91 
92 /// Merge bits known from context-dependent facts into Known.
93 void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
94                                  unsigned Depth, const SimplifyQuery &Q);
95 
96 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
97 KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
98                                        const KnownBits &KnownLHS,
99                                        const KnownBits &KnownRHS,
100                                        unsigned Depth, const SimplifyQuery &SQ);
101 
102 /// Return true if LHS and RHS have no common bits set.
103 bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
104                          const WithCache<const Value *> &RHSCache,
105                          const SimplifyQuery &SQ);
106 
107 /// Return true if the given value is known to have exactly one bit set when
108 /// defined. For vectors return true if every element is known to be a power
109 /// of two when defined. Supports values with integer or pointer type and
110 /// vectors of integers. If 'OrZero' is set, then return true if the given
111 /// value is either a power of two or zero.
112 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
113                             bool OrZero = false, unsigned Depth = 0,
114                             AssumptionCache *AC = nullptr,
115                             const Instruction *CxtI = nullptr,
116                             const DominatorTree *DT = nullptr,
117                             bool UseInstrInfo = true);
118 
119 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
120 
121 /// Return true if the given value is known to be non-zero when defined. For
122 /// vectors, return true if every element is known to be non-zero when
123 /// defined. For pointers, if the context instruction and dominator tree are
124 /// specified, perform context-sensitive analysis and return true if the
125 /// pointer couldn't possibly be null at the specified instruction.
126 /// Supports values with integer or pointer type and vectors of integers.
127 bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
128                     AssumptionCache *AC = nullptr,
129                     const Instruction *CxtI = nullptr,
130                     const DominatorTree *DT = nullptr,
131                     bool UseInstrInfo = true);
132 
133 /// Return true if the two given values are negation.
134 /// Currently can recoginze Value pair:
135 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
136 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
137 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
138 
139 /// Returns true if the give value is known to be non-negative.
140 bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
141                         unsigned Depth = 0);
142 
143 /// Returns true if the given value is known be positive (i.e. non-negative
144 /// and non-zero).
145 bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
146                      unsigned Depth = 0);
147 
148 /// Returns true if the given value is known be negative (i.e. non-positive
149 /// and non-zero).
150 bool isKnownNegative(const Value *V, const SimplifyQuery &DL,
151                      unsigned Depth = 0);
152 
153 /// Return true if the given values are known to be non-equal when defined.
154 /// Supports scalar integer types only.
155 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
156                      AssumptionCache *AC = nullptr,
157                      const Instruction *CxtI = nullptr,
158                      const DominatorTree *DT = nullptr,
159                      bool UseInstrInfo = true);
160 
161 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
162 /// simplify operations downstream. Mask is known to be zero for bits that V
163 /// cannot have.
164 ///
165 /// This function is defined on values with integer type, values with pointer
166 /// type, and vectors of integers.  In the case
167 /// where V is a vector, the mask, known zero, and known one values are the
168 /// same width as the vector element, and the bit is set only if it is true
169 /// for all of the elements in the vector.
170 bool MaskedValueIsZero(const Value *V, const APInt &Mask,
171                        const SimplifyQuery &DL, unsigned Depth = 0);
172 
173 /// Return the number of times the sign bit of the register is replicated into
174 /// the other bits. We know that at least 1 bit is always equal to the sign
175 /// bit (itself), but other cases can give us information. For example,
176 /// immediately after an "ashr X, 2", we know that the top 3 bits are all
177 /// equal to each other, so we return 3. For vectors, return the number of
178 /// sign bits for the vector element with the mininum number of known sign
179 /// bits.
180 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
181                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
182                             const Instruction *CxtI = nullptr,
183                             const DominatorTree *DT = nullptr,
184                             bool UseInstrInfo = true);
185 
186 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
187 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
188 /// Similar to the APInt::getSignificantBits function.
189 unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
190                                    unsigned Depth = 0,
191                                    AssumptionCache *AC = nullptr,
192                                    const Instruction *CxtI = nullptr,
193                                    const DominatorTree *DT = nullptr);
194 
195 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
196 /// intrinsics are treated as-if they were intrinsics.
197 Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
198                                       const TargetLibraryInfo *TLI);
199 
200 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
201 /// same result as an fcmp with the given operands.
202 ///
203 /// If \p LookThroughSrc is true, consider the input value when computing the
204 /// mask.
205 ///
206 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
207 /// element will always be LHS.
208 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
209                                                 const Function &F, Value *LHS,
210                                                 Value *RHS,
211                                                 bool LookThroughSrc = true);
212 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
213                                                 const Function &F, Value *LHS,
214                                                 const APFloat *ConstRHS,
215                                                 bool LookThroughSrc = true);
216 
217 struct KnownFPClass {
218   /// Floating-point classes the value could be one of.
219   FPClassTest KnownFPClasses = fcAllFlags;
220 
221   /// std::nullopt if the sign bit is unknown, true if the sign bit is
222   /// definitely set or false if the sign bit is definitely unset.
223   std::optional<bool> SignBit;
224 
225   /// Return true if it's known this can never be one of the mask entries.
226   bool isKnownNever(FPClassTest Mask) const {
227     return (KnownFPClasses & Mask) == fcNone;
228   }
229 
230   bool isUnknown() const {
231     return KnownFPClasses == fcAllFlags && !SignBit;
232   }
233 
234   /// Return true if it's known this can never be a nan.
235   bool isKnownNeverNaN() const {
236     return isKnownNever(fcNan);
237   }
238 
239   /// Return true if it's known this can never be an infinity.
240   bool isKnownNeverInfinity() const {
241     return isKnownNever(fcInf);
242   }
243 
244   /// Return true if it's known this can never be +infinity.
245   bool isKnownNeverPosInfinity() const {
246     return isKnownNever(fcPosInf);
247   }
248 
249   /// Return true if it's known this can never be -infinity.
250   bool isKnownNeverNegInfinity() const {
251     return isKnownNever(fcNegInf);
252   }
253 
254   /// Return true if it's known this can never be a subnormal
255   bool isKnownNeverSubnormal() const {
256     return isKnownNever(fcSubnormal);
257   }
258 
259   /// Return true if it's known this can never be a positive subnormal
260   bool isKnownNeverPosSubnormal() const {
261     return isKnownNever(fcPosSubnormal);
262   }
263 
264   /// Return true if it's known this can never be a negative subnormal
265   bool isKnownNeverNegSubnormal() const {
266     return isKnownNever(fcNegSubnormal);
267   }
268 
269   /// Return true if it's known this can never be a zero. This means a literal
270   /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
271   bool isKnownNeverZero() const {
272     return isKnownNever(fcZero);
273   }
274 
275   /// Return true if it's known this can never be a literal positive zero.
276   bool isKnownNeverPosZero() const {
277     return isKnownNever(fcPosZero);
278   }
279 
280   /// Return true if it's known this can never be a negative zero. This means a
281   /// literal -0 and does not include denormal inputs implicitly treated as -0.
282   bool isKnownNeverNegZero() const {
283     return isKnownNever(fcNegZero);
284   }
285 
286   /// Return true if it's know this can never be interpreted as a zero. This
287   /// extends isKnownNeverZero to cover the case where the assumed
288   /// floating-point mode for the function interprets denormals as zero.
289   bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;
290 
291   /// Return true if it's know this can never be interpreted as a negative zero.
292   bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;
293 
294   /// Return true if it's know this can never be interpreted as a positive zero.
295   bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;
296 
297   static constexpr FPClassTest OrderedLessThanZeroMask =
298       fcNegSubnormal | fcNegNormal | fcNegInf;
299   static constexpr FPClassTest OrderedGreaterThanZeroMask =
300       fcPosSubnormal | fcPosNormal | fcPosInf;
301 
302   /// Return true if we can prove that the analyzed floating-point value is
303   /// either NaN or never less than -0.0.
304   ///
305   ///      NaN --> true
306   ///       +0 --> true
307   ///       -0 --> true
308   ///   x > +0 --> true
309   ///   x < -0 --> false
310   bool cannotBeOrderedLessThanZero() const {
311     return isKnownNever(OrderedLessThanZeroMask);
312   }
313 
314   /// Return true if we can prove that the analyzed floating-point value is
315   /// either NaN or never greater than -0.0.
316   ///      NaN --> true
317   ///       +0 --> true
318   ///       -0 --> true
319   ///   x > +0 --> false
320   ///   x < -0 --> true
321   bool cannotBeOrderedGreaterThanZero() const {
322     return isKnownNever(OrderedGreaterThanZeroMask);
323   }
324 
325   KnownFPClass &operator|=(const KnownFPClass &RHS) {
326     KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;
327 
328     if (SignBit != RHS.SignBit)
329       SignBit = std::nullopt;
330     return *this;
331   }
332 
333   void knownNot(FPClassTest RuleOut) {
334     KnownFPClasses = KnownFPClasses & ~RuleOut;
335   }
336 
337   void fneg() {
338     KnownFPClasses = llvm::fneg(KnownFPClasses);
339     if (SignBit)
340       SignBit = !*SignBit;
341   }
342 
343   void fabs() {
344     if (KnownFPClasses & fcNegZero)
345       KnownFPClasses |= fcPosZero;
346 
347     if (KnownFPClasses & fcNegInf)
348       KnownFPClasses |= fcPosInf;
349 
350     if (KnownFPClasses & fcNegSubnormal)
351       KnownFPClasses |= fcPosSubnormal;
352 
353     if (KnownFPClasses & fcNegNormal)
354       KnownFPClasses |= fcPosNormal;
355 
356     signBitMustBeZero();
357   }
358 
359   /// Return true if the sign bit must be 0, ignoring the sign of nans.
360   bool signBitIsZeroOrNaN() const {
361     return isKnownNever(fcNegative);
362   }
363 
364   /// Assume the sign bit is zero.
365   void signBitMustBeZero() {
366     KnownFPClasses &= (fcPositive | fcNan);
367     SignBit = false;
368   }
369 
370   void copysign(const KnownFPClass &Sign) {
371     // Don't know anything about the sign of the source. Expand the possible set
372     // to its opposite sign pair.
373     if (KnownFPClasses & fcZero)
374       KnownFPClasses |= fcZero;
375     if (KnownFPClasses & fcSubnormal)
376       KnownFPClasses |= fcSubnormal;
377     if (KnownFPClasses & fcNormal)
378       KnownFPClasses |= fcNormal;
379     if (KnownFPClasses & fcInf)
380       KnownFPClasses |= fcInf;
381 
382     // Sign bit is exactly preserved even for nans.
383     SignBit = Sign.SignBit;
384 
385     // Clear sign bits based on the input sign mask.
386     if (Sign.isKnownNever(fcPositive | fcNan) || (SignBit && *SignBit))
387       KnownFPClasses &= (fcNegative | fcNan);
388     if (Sign.isKnownNever(fcNegative | fcNan) || (SignBit && !*SignBit))
389       KnownFPClasses &= (fcPositive | fcNan);
390   }
391 
392   // Propagate knowledge that a non-NaN source implies the result can also not
393   // be a NaN. For unconstrained operations, signaling nans are not guaranteed
394   // to be quieted but cannot be introduced.
395   void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
396     if (Src.isKnownNever(fcNan)) {
397       knownNot(fcNan);
398       if (PreserveSign)
399         SignBit = Src.SignBit;
400     } else if (Src.isKnownNever(fcSNan))
401       knownNot(fcSNan);
402   }
403 
404   /// Propagate knowledge from a source value that could be a denormal or
405   /// zero. We have to be conservative since output flushing is not guaranteed,
406   /// so known-never-zero may not hold.
407   ///
408   /// This assumes a copy-like operation and will replace any currently known
409   /// information.
410   void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);
411 
412   /// Report known classes if \p Src is evaluated through a potentially
413   /// canonicalizing operation. We can assume signaling nans will not be
414   /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
415   ///
416   /// This assumes a copy-like operation and will replace any currently known
417   /// information.
418   void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
419                                   Type *Ty);
420 
421   void resetAll() { *this = KnownFPClass(); }
422 };
423 
424 inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
425   LHS |= RHS;
426   return LHS;
427 }
428 
429 inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
430   RHS |= LHS;
431   return std::move(RHS);
432 }
433 
434 /// Determine which floating-point classes are valid for \p V, and return them
435 /// in KnownFPClass bit sets.
436 ///
437 /// This function is defined on values with floating-point type, values vectors
438 /// of floating-point type, and arrays of floating-point type.
439 
440 /// \p InterestedClasses is a compile time optimization hint for which floating
441 /// point classes should be queried. Queries not specified in \p
442 /// InterestedClasses should be reliable if they are determined during the
443 /// query.
444 KnownFPClass computeKnownFPClass(
445     const Value *V, const APInt &DemandedElts, const DataLayout &DL,
446     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
447     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
448     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
449     bool UseInstrInfo = true);
450 
451 KnownFPClass computeKnownFPClass(
452     const Value *V, const DataLayout &DL,
453     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
454     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
455     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
456     bool UseInstrInfo = true);
457 
458 /// Wrapper to account for known fast math flags at the use instruction.
459 inline KnownFPClass computeKnownFPClass(
460     const Value *V, FastMathFlags FMF, const DataLayout &DL,
461     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
462     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
463     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
464     bool UseInstrInfo = true) {
465   if (FMF.noNaNs())
466     InterestedClasses &= ~fcNan;
467   if (FMF.noInfs())
468     InterestedClasses &= ~fcInf;
469 
470   KnownFPClass Result = computeKnownFPClass(V, DL, InterestedClasses, Depth,
471                                             TLI, AC, CxtI, DT, UseInstrInfo);
472 
473   if (FMF.noNaNs())
474     Result.KnownFPClasses &= ~fcNan;
475   if (FMF.noInfs())
476     Result.KnownFPClasses &= ~fcInf;
477   return Result;
478 }
479 
480 /// Return true if we can prove that the specified FP value is never equal to
481 /// -0.0. Users should use caution when considering PreserveSign
482 /// denormal-fp-math.
483 inline bool cannotBeNegativeZero(const Value *V, const DataLayout &DL,
484                                  const TargetLibraryInfo *TLI = nullptr,
485                                  unsigned Depth = 0,
486                                  AssumptionCache *AC = nullptr,
487                                  const Instruction *CtxI = nullptr,
488                                  const DominatorTree *DT = nullptr,
489                                  bool UseInstrInfo = true) {
490   KnownFPClass Known = computeKnownFPClass(V, DL, fcNegZero, Depth, TLI, AC,
491                                            CtxI, DT, UseInstrInfo);
492   return Known.isKnownNeverNegZero();
493 }
494 
495 /// Return true if we can prove that the specified FP value is either NaN or
496 /// never less than -0.0.
497 ///
498 ///      NaN --> true
499 ///       +0 --> true
500 ///       -0 --> true
501 ///   x > +0 --> true
502 ///   x < -0 --> false
503 inline bool cannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
504                                         const TargetLibraryInfo *TLI = nullptr,
505                                         unsigned Depth = 0,
506                                         AssumptionCache *AC = nullptr,
507                                         const Instruction *CtxI = nullptr,
508                                         const DominatorTree *DT = nullptr,
509                                         bool UseInstrInfo = true) {
510   KnownFPClass Known =
511       computeKnownFPClass(V, DL, KnownFPClass::OrderedLessThanZeroMask, Depth,
512                           TLI, AC, CtxI, DT, UseInstrInfo);
513   return Known.cannotBeOrderedLessThanZero();
514 }
515 
516 /// Return true if the floating-point scalar value is not an infinity or if
517 /// the floating-point vector value has no infinities. Return false if a value
518 /// could ever be infinity.
519 inline bool isKnownNeverInfinity(const Value *V, const DataLayout &DL,
520                                  const TargetLibraryInfo *TLI = nullptr,
521                                  unsigned Depth = 0,
522                                  AssumptionCache *AC = nullptr,
523                                  const Instruction *CtxI = nullptr,
524                                  const DominatorTree *DT = nullptr,
525                                  bool UseInstrInfo = true) {
526   KnownFPClass Known = computeKnownFPClass(V, DL, fcInf, Depth, TLI, AC, CtxI,
527                                            DT, UseInstrInfo);
528   return Known.isKnownNeverInfinity();
529 }
530 
531 /// Return true if the floating-point value can never contain a NaN or infinity.
532 inline bool isKnownNeverInfOrNaN(
533     const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI,
534     unsigned Depth = 0, AssumptionCache *AC = nullptr,
535     const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
536     bool UseInstrInfo = true) {
537   KnownFPClass Known = computeKnownFPClass(V, DL, fcInf | fcNan, Depth, TLI, AC,
538                                            CtxI, DT, UseInstrInfo);
539   return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
540 }
541 
542 /// Return true if the floating-point scalar value is not a NaN or if the
543 /// floating-point vector value has no NaN elements. Return false if a value
544 /// could ever be NaN.
545 inline bool isKnownNeverNaN(const Value *V, const DataLayout &DL,
546                             const TargetLibraryInfo *TLI, unsigned Depth = 0,
547                             AssumptionCache *AC = nullptr,
548                             const Instruction *CtxI = nullptr,
549                             const DominatorTree *DT = nullptr,
550                             bool UseInstrInfo = true) {
551   KnownFPClass Known = computeKnownFPClass(V, DL, fcNan, Depth, TLI, AC, CtxI,
552                                            DT, UseInstrInfo);
553   return Known.isKnownNeverNaN();
554 }
555 
556 /// Return true if we can prove that the specified FP value's sign bit is 0.
557 ///
558 ///      NaN --> true/false (depending on the NaN's sign bit)
559 ///       +0 --> true
560 ///       -0 --> false
561 ///   x > +0 --> true
562 ///   x < -0 --> false
563 bool SignBitMustBeZero(const Value *V, const DataLayout &DL,
564                        const TargetLibraryInfo *TLI);
565 
566 /// If the specified value can be set by repeating the same byte in memory,
567 /// return the i8 value that it is represented with. This is true for all i8
568 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
569 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
570 /// i16 0x1234), return null. If the value is entirely undef and padding,
571 /// return undef.
572 Value *isBytewiseValue(Value *V, const DataLayout &DL);
573 
574 /// Given an aggregate and an sequence of indices, see if the scalar value
575 /// indexed is already around as a register, for example if it were inserted
576 /// directly into the aggregate.
577 ///
578 /// If InsertBefore is not null, this function will duplicate (modified)
579 /// insertvalues when a part of a nested struct is extracted.
580 Value *FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
581                          Instruction *InsertBefore = nullptr);
582 
583 /// Analyze the specified pointer to see if it can be expressed as a base
584 /// pointer plus a constant offset. Return the base and offset to the caller.
585 ///
586 /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
587 /// creates and later unpacks the required APInt.
588 inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
589                                                const DataLayout &DL,
590                                                bool AllowNonInbounds = true) {
591   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
592   Value *Base =
593       Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
594 
595   Offset = OffsetAPInt.getSExtValue();
596   return Base;
597 }
598 inline const Value *
599 GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
600                                  const DataLayout &DL,
601                                  bool AllowNonInbounds = true) {
602   return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
603                                           AllowNonInbounds);
604 }
605 
606 /// Returns true if the GEP is based on a pointer to a string (array of
607 // \p CharSize integers) and is indexing into this string.
608 bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
609 
610 /// Represents offset+length into a ConstantDataArray.
611 struct ConstantDataArraySlice {
612   /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
613   /// initializer, it just doesn't fit the ConstantDataArray interface).
614   const ConstantDataArray *Array;
615 
616   /// Slice starts at this Offset.
617   uint64_t Offset;
618 
619   /// Length of the slice.
620   uint64_t Length;
621 
622   /// Moves the Offset and adjusts Length accordingly.
623   void move(uint64_t Delta) {
624     assert(Delta < Length);
625     Offset += Delta;
626     Length -= Delta;
627   }
628 
629   /// Convenience accessor for elements in the slice.
630   uint64_t operator[](unsigned I) const {
631     return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
632   }
633 };
634 
635 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
636 /// If successful \p Slice will point to a ConstantDataArray info object
637 /// with an appropriate offset.
638 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
639                               unsigned ElementSize, uint64_t Offset = 0);
640 
641 /// This function computes the length of a null-terminated C string pointed to
642 /// by V. If successful, it returns true and returns the string in Str. If
643 /// unsuccessful, it returns false. This does not include the trailing null
644 /// character by default. If TrimAtNul is set to false, then this returns any
645 /// trailing null characters as well as any other characters that come after
646 /// it.
647 bool getConstantStringInfo(const Value *V, StringRef &Str,
648                            bool TrimAtNul = true);
649 
650 /// If we can compute the length of the string pointed to by the specified
651 /// pointer, return 'len+1'.  If we can't, return 0.
652 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
653 
654 /// This function returns call pointer argument that is considered the same by
655 /// aliasing rules. You CAN'T use it to replace one value with another. If
656 /// \p MustPreserveNullness is true, the call must preserve the nullness of
657 /// the pointer.
658 const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
659                                                   bool MustPreserveNullness);
660 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
661                                                    bool MustPreserveNullness) {
662   return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
663       const_cast<const CallBase *>(Call), MustPreserveNullness));
664 }
665 
666 /// {launder,strip}.invariant.group returns pointer that aliases its argument,
667 /// and it only captures pointer by returning it.
668 /// These intrinsics are not marked as nocapture, because returning is
669 /// considered as capture. The arguments are not marked as returned neither,
670 /// because it would make it useless. If \p MustPreserveNullness is true,
671 /// the intrinsic must preserve the nullness of the pointer.
672 bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
673     const CallBase *Call, bool MustPreserveNullness);
674 
675 /// This method strips off any GEP address adjustments and pointer casts from
676 /// the specified value, returning the original object being addressed. Note
677 /// that the returned value has pointer type if the specified value does. If
678 /// the MaxLookup value is non-zero, it limits the number of instructions to
679 /// be stripped off.
680 const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
681 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
682   // Force const to avoid infinite recursion.
683   const Value *VConst = V;
684   return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
685 }
686 
687 /// This method is similar to getUnderlyingObject except that it can
688 /// look through phi and select instructions and return multiple objects.
689 ///
690 /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
691 /// accesses different objects in each iteration, we don't look through the
692 /// phi node. E.g. consider this loop nest:
693 ///
694 ///   int **A;
695 ///   for (i)
696 ///     for (j) {
697 ///        A[i][j] = A[i-1][j] * B[j]
698 ///     }
699 ///
700 /// This is transformed by Load-PRE to stash away A[i] for the next iteration
701 /// of the outer loop:
702 ///
703 ///   Curr = A[0];          // Prev_0
704 ///   for (i: 1..N) {
705 ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
706 ///     Curr = A[i];
707 ///     for (j: 0..N) {
708 ///        Curr[j] = Prev[j] * B[j]
709 ///     }
710 ///   }
711 ///
712 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
713 /// should not assume that Curr and Prev share the same underlying object thus
714 /// it shouldn't look through the phi above.
715 void getUnderlyingObjects(const Value *V,
716                           SmallVectorImpl<const Value *> &Objects,
717                           LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
718 
719 /// This is a wrapper around getUnderlyingObjects and adds support for basic
720 /// ptrtoint+arithmetic+inttoptr sequences.
721 bool getUnderlyingObjectsForCodeGen(const Value *V,
722                                     SmallVectorImpl<Value *> &Objects);
723 
724 /// Returns unique alloca where the value comes from, or nullptr.
725 /// If OffsetZero is true check that V points to the begining of the alloca.
726 AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
727 inline const AllocaInst *findAllocaForValue(const Value *V,
728                                             bool OffsetZero = false) {
729   return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
730 }
731 
732 /// Return true if the only users of this pointer are lifetime markers.
733 bool onlyUsedByLifetimeMarkers(const Value *V);
734 
735 /// Return true if the only users of this pointer are lifetime markers or
736 /// droppable instructions.
737 bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
738 
739 /// Return true if speculation of the given load must be suppressed to avoid
740 /// ordering or interfering with an active sanitizer.  If not suppressed,
741 /// dereferenceability and alignment must be proven separately.  Note: This
742 /// is only needed for raw reasoning; if you use the interface below
743 /// (isSafeToSpeculativelyExecute), this is handled internally.
744 bool mustSuppressSpeculation(const LoadInst &LI);
745 
746 /// Return true if the instruction does not have any effects besides
747 /// calculating the result and does not have undefined behavior.
748 ///
749 /// This method never returns true for an instruction that returns true for
750 /// mayHaveSideEffects; however, this method also does some other checks in
751 /// addition. It checks for undefined behavior, like dividing by zero or
752 /// loading from an invalid pointer (but not for undefined results, like a
753 /// shift with a shift amount larger than the width of the result). It checks
754 /// for malloc and alloca because speculatively executing them might cause a
755 /// memory leak. It also returns false for instructions related to control
756 /// flow, specifically terminators and PHI nodes.
757 ///
758 /// If the CtxI is specified this method performs context-sensitive analysis
759 /// and returns true if it is safe to execute the instruction immediately
760 /// before the CtxI.
761 ///
762 /// If the CtxI is NOT specified this method only looks at the instruction
763 /// itself and its operands, so if this method returns true, it is safe to
764 /// move the instruction as long as the correct dominance relationships for
765 /// the operands and users hold.
766 ///
767 /// This method can return true for instructions that read memory;
768 /// for such instructions, moving them may change the resulting value.
769 bool isSafeToSpeculativelyExecute(const Instruction *I,
770                                   const Instruction *CtxI = nullptr,
771                                   AssumptionCache *AC = nullptr,
772                                   const DominatorTree *DT = nullptr,
773                                   const TargetLibraryInfo *TLI = nullptr);
774 
775 /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
776 /// the actual opcode of Inst. If the provided and actual opcode differ, the
777 /// function (virtually) overrides the opcode of Inst with the provided
778 /// Opcode. There are come constraints in this case:
779 /// * If Opcode has a fixed number of operands (eg, as binary operators do),
780 ///   then Inst has to have at least as many leading operands. The function
781 ///   will ignore all trailing operands beyond that number.
782 /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
783 ///   do), then all operands are considered.
784 /// * The virtual instruction has to satisfy all typing rules of the provided
785 ///   Opcode.
786 /// * This function is pessimistic in the following sense: If one actually
787 ///   materialized the virtual instruction, then isSafeToSpeculativelyExecute
788 ///   may say that the materialized instruction is speculatable whereas this
789 ///   function may have said that the instruction wouldn't be speculatable.
790 ///   This behavior is a shortcoming in the current implementation and not
791 ///   intentional.
792 bool isSafeToSpeculativelyExecuteWithOpcode(
793     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
794     AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
795     const TargetLibraryInfo *TLI = nullptr);
796 
797 /// Returns true if the result or effects of the given instructions \p I
798 /// depend values not reachable through the def use graph.
799 /// * Memory dependence arises for example if the instruction reads from
800 ///   memory or may produce effects or undefined behaviour. Memory dependent
801 ///   instructions generally cannot be reorderd with respect to other memory
802 ///   dependent instructions.
803 /// * Control dependence arises for example if the instruction may fault
804 ///   if lifted above a throwing call or infinite loop.
805 bool mayHaveNonDefUseDependency(const Instruction &I);
806 
807 /// Return true if it is an intrinsic that cannot be speculated but also
808 /// cannot trap.
809 bool isAssumeLikeIntrinsic(const Instruction *I);
810 
811 /// Return true if it is valid to use the assumptions provided by an
812 /// assume intrinsic, I, at the point in the control-flow identified by the
813 /// context instruction, CxtI.
814 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
815                              const DominatorTree *DT = nullptr);
816 
817 enum class OverflowResult {
818   /// Always overflows in the direction of signed/unsigned min value.
819   AlwaysOverflowsLow,
820   /// Always overflows in the direction of signed/unsigned max value.
821   AlwaysOverflowsHigh,
822   /// May or may not overflow.
823   MayOverflow,
824   /// Never overflows.
825   NeverOverflows,
826 };
827 
828 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
829                                              const SimplifyQuery &SQ);
830 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
831                                            const SimplifyQuery &SQ);
832 OverflowResult
833 computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
834                               const WithCache<const Value *> &RHS,
835                               const SimplifyQuery &SQ);
836 OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
837                                            const WithCache<const Value *> &RHS,
838                                            const SimplifyQuery &SQ);
839 /// This version also leverages the sign bit of Add if known.
840 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
841                                            const SimplifyQuery &SQ);
842 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
843                                              const SimplifyQuery &SQ);
844 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
845                                            const SimplifyQuery &SQ);
846 
847 /// Returns true if the arithmetic part of the \p WO 's result is
848 /// used only along the paths control dependent on the computation
849 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
850 bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
851                                const DominatorTree &DT);
852 
853 /// Determine the possible constant range of vscale with the given bit width,
854 /// based on the vscale_range function attribute.
855 ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
856 
857 /// Determine the possible constant range of an integer or vector of integer
858 /// value. This is intended as a cheap, non-recursive check.
859 ConstantRange computeConstantRange(const Value *V, bool ForSigned,
860                                    bool UseInstrInfo = true,
861                                    AssumptionCache *AC = nullptr,
862                                    const Instruction *CtxI = nullptr,
863                                    const DominatorTree *DT = nullptr,
864                                    unsigned Depth = 0);
865 
866 /// Return true if this function can prove that the instruction I will
867 /// always transfer execution to one of its successors (including the next
868 /// instruction that follows within a basic block). E.g. this is not
869 /// guaranteed for function calls that could loop infinitely.
870 ///
871 /// In other words, this function returns false for instructions that may
872 /// transfer execution or fail to transfer execution in a way that is not
873 /// captured in the CFG nor in the sequence of instructions within a basic
874 /// block.
875 ///
876 /// Undefined behavior is assumed not to happen, so e.g. division is
877 /// guaranteed to transfer execution to the following instruction even
878 /// though division by zero might cause undefined behavior.
879 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
880 
881 /// Returns true if this block does not contain a potential implicit exit.
882 /// This is equivelent to saying that all instructions within the basic block
883 /// are guaranteed to transfer execution to their successor within the basic
884 /// block. This has the same assumptions w.r.t. undefined behavior as the
885 /// instruction variant of this function.
886 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
887 
888 /// Return true if every instruction in the range (Begin, End) is
889 /// guaranteed to transfer execution to its static successor. \p ScanLimit
890 /// bounds the search to avoid scanning huge blocks.
891 bool isGuaranteedToTransferExecutionToSuccessor(
892     BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
893     unsigned ScanLimit = 32);
894 
895 /// Same as previous, but with range expressed via iterator_range.
896 bool isGuaranteedToTransferExecutionToSuccessor(
897     iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
898 
899 /// Return true if this function can prove that the instruction I
900 /// is executed for every iteration of the loop L.
901 ///
902 /// Note that this currently only considers the loop header.
903 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
904                                             const Loop *L);
905 
906 /// Return true if \p PoisonOp's user yields poison or raises UB if its
907 /// operand \p PoisonOp is poison.
908 ///
909 /// If \p PoisonOp is a vector or an aggregate and the operation's result is a
910 /// single value, any poison element in /p PoisonOp should make the result
911 /// poison or raise UB.
912 ///
913 /// To filter out operands that raise UB on poison, you can use
914 /// getGuaranteedNonPoisonOp.
915 bool propagatesPoison(const Use &PoisonOp);
916 
917 /// Insert operands of I into Ops such that I will trigger undefined behavior
918 /// if I is executed and that operand has a poison value.
919 void getGuaranteedNonPoisonOps(const Instruction *I,
920                                SmallVectorImpl<const Value *> &Ops);
921 
922 /// Insert operands of I into Ops such that I will trigger undefined behavior
923 /// if I is executed and that operand is not a well-defined value
924 /// (i.e. has undef bits or poison).
925 void getGuaranteedWellDefinedOps(const Instruction *I,
926                                  SmallVectorImpl<const Value *> &Ops);
927 
928 /// Return true if the given instruction must trigger undefined behavior
929 /// when I is executed with any operands which appear in KnownPoison holding
930 /// a poison value at the point of execution.
931 bool mustTriggerUB(const Instruction *I,
932                    const SmallPtrSetImpl<const Value *> &KnownPoison);
933 
934 /// Return true if this function can prove that if Inst is executed
935 /// and yields a poison value or undef bits, then that will trigger
936 /// undefined behavior.
937 ///
938 /// Note that this currently only considers the basic block that is
939 /// the parent of Inst.
940 bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
941 bool programUndefinedIfPoison(const Instruction *Inst);
942 
943 /// canCreateUndefOrPoison returns true if Op can create undef or poison from
944 /// non-undef & non-poison operands.
945 /// For vectors, canCreateUndefOrPoison returns true if there is potential
946 /// poison or undef in any element of the result when vectors without
947 /// undef/poison poison are given as operands.
948 /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
949 /// true. If Op raises immediate UB but never creates poison or undef
950 /// (e.g. sdiv I, 0), canCreatePoison returns false.
951 ///
952 /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
953 /// metadata on the instruction are considered.  This can be used to see if the
954 /// instruction could still introduce undef or poison even without poison
955 /// generating flags and metadata which might be on the instruction.
956 /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
957 /// poison or undef)
958 ///
959 /// canCreatePoison returns true if Op can create poison from non-poison
960 /// operands.
961 bool canCreateUndefOrPoison(const Operator *Op,
962                             bool ConsiderFlagsAndMetadata = true);
963 bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
964 
965 /// Return true if V is poison given that ValAssumedPoison is already poison.
966 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
967 /// impliesPoison returns true.
968 bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
969 
970 /// Return true if this function can prove that V does not have undef bits
971 /// and is never poison. If V is an aggregate value or vector, check whether
972 /// all elements (except padding) are not undef or poison.
973 /// Note that this is different from canCreateUndefOrPoison because the
974 /// function assumes Op's operands are not poison/undef.
975 ///
976 /// If CtxI and DT are specified this method performs flow-sensitive analysis
977 /// and returns true if it is guaranteed to be never undef or poison
978 /// immediately before the CtxI.
979 bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
980                                       AssumptionCache *AC = nullptr,
981                                       const Instruction *CtxI = nullptr,
982                                       const DominatorTree *DT = nullptr,
983                                       unsigned Depth = 0);
984 
985 /// Returns true if V cannot be poison, but may be undef.
986 bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
987                                const Instruction *CtxI = nullptr,
988                                const DominatorTree *DT = nullptr,
989                                unsigned Depth = 0);
990 
991 /// Returns true if V cannot be undef, but may be poison.
992 bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
993                               const Instruction *CtxI = nullptr,
994                               const DominatorTree *DT = nullptr,
995                               unsigned Depth = 0);
996 
997 /// Return true if undefined behavior would provable be executed on the path to
998 /// OnPathTo if Root produced a posion result.  Note that this doesn't say
999 /// anything about whether OnPathTo is actually executed or whether Root is
1000 /// actually poison.  This can be used to assess whether a new use of Root can
1001 /// be added at a location which is control equivalent with OnPathTo (such as
1002 /// immediately before it) without introducing UB which didn't previously
1003 /// exist.  Note that a false result conveys no information.
1004 bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
1005                                    Instruction *OnPathTo,
1006                                    DominatorTree *DT);
1007 
1008 /// Specific patterns of select instructions we can match.
1009 enum SelectPatternFlavor {
1010   SPF_UNKNOWN = 0,
1011   SPF_SMIN,    /// Signed minimum
1012   SPF_UMIN,    /// Unsigned minimum
1013   SPF_SMAX,    /// Signed maximum
1014   SPF_UMAX,    /// Unsigned maximum
1015   SPF_FMINNUM, /// Floating point minnum
1016   SPF_FMAXNUM, /// Floating point maxnum
1017   SPF_ABS,     /// Absolute value
1018   SPF_NABS     /// Negated absolute value
1019 };
1020 
1021 /// Behavior when a floating point min/max is given one NaN and one
1022 /// non-NaN as input.
1023 enum SelectPatternNaNBehavior {
1024   SPNB_NA = 0,        /// NaN behavior not applicable.
1025   SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN.
1026   SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1027   SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or
1028                       /// it has been determined that no operands can
1029                       /// be NaN).
1030 };
1031 
1032 struct SelectPatternResult {
1033   SelectPatternFlavor Flavor;
1034   SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
1035                                         /// SPF_FMINNUM or SPF_FMAXNUM.
1036   bool Ordered; /// When implementing this min/max pattern as
1037                 /// fcmp; select, does the fcmp have to be
1038                 /// ordered?
1039 
1040   /// Return true if \p SPF is a min or a max pattern.
1041   static bool isMinOrMax(SelectPatternFlavor SPF) {
1042     return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
1043   }
1044 };
1045 
1046 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
1047 /// and providing the out parameter results if we successfully match.
1048 ///
1049 /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
1050 /// the negation instruction from the idiom.
1051 ///
1052 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
1053 /// not match that of the original select. If this is the case, the cast
1054 /// operation (one of Trunc,SExt,Zext) that must be done to transform the
1055 /// type of LHS and RHS into the type of V is returned in CastOp.
1056 ///
1057 /// For example:
1058 ///   %1 = icmp slt i32 %a, i32 4
1059 ///   %2 = sext i32 %a to i64
1060 ///   %3 = select i1 %1, i64 %2, i64 4
1061 ///
1062 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
1063 ///
1064 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
1065                                        Instruction::CastOps *CastOp = nullptr,
1066                                        unsigned Depth = 0);
1067 
1068 inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
1069                                               const Value *&RHS) {
1070   Value *L = const_cast<Value *>(LHS);
1071   Value *R = const_cast<Value *>(RHS);
1072   auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
1073   LHS = L;
1074   RHS = R;
1075   return Result;
1076 }
1077 
1078 /// Determine the pattern that a select with the given compare as its
1079 /// predicate and given values as its true/false operands would match.
1080 SelectPatternResult matchDecomposedSelectPattern(
1081     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
1082     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
1083 
1084 /// Return the canonical comparison predicate for the specified
1085 /// minimum/maximum flavor.
1086 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
1087 
1088 /// Return the inverse minimum/maximum flavor of the specified flavor.
1089 /// For example, signed minimum is the inverse of signed maximum.
1090 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
1091 
1092 Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
1093 
1094 /// Return the minimum or maximum constant value for the specified integer
1095 /// min/max flavor and type.
1096 APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
1097 
1098 /// Check if the values in \p VL are select instructions that can be converted
1099 /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
1100 /// conversion is possible, together with a bool indicating whether all select
1101 /// conditions are only used by the selects. Otherwise return
1102 /// Intrinsic::not_intrinsic.
1103 std::pair<Intrinsic::ID, bool>
1104 canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
1105 
1106 /// Attempt to match a simple first order recurrence cycle of the form:
1107 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1108 ///   %inc = binop %iv, %step
1109 /// OR
1110 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1111 ///   %inc = binop %step, %iv
1112 ///
1113 /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
1114 ///
1115 /// A couple of notes on subtleties in that definition:
1116 /// * The Step does not have to be loop invariant.  In math terms, it can
1117 ///   be a free variable.  We allow recurrences with both constant and
1118 ///   variable coefficients. Callers may wish to filter cases where Step
1119 ///   does not dominate P.
1120 /// * For non-commutative operators, we will match both forms.  This
1121 ///   results in some odd recurrence structures.  Callers may wish to filter
1122 ///   out recurrences where the phi is not the LHS of the returned operator.
1123 /// * Because of the structure matched, the caller can assume as a post
1124 ///   condition of the match the presence of a Loop with P's parent as it's
1125 ///   header *except* in unreachable code.  (Dominance decays in unreachable
1126 ///   code.)
1127 ///
1128 /// NOTE: This is intentional simple.  If you want the ability to analyze
1129 /// non-trivial loop conditons, see ScalarEvolution instead.
1130 bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
1131                            Value *&Step);
1132 
1133 /// Analogous to the above, but starting from the binary operator
1134 bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
1135                            Value *&Step);
1136 
1137 /// Return true if RHS is known to be implied true by LHS.  Return false if
1138 /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
1139 /// no implication can be made. A & B must be i1 (boolean) values or a vector of
1140 /// such values. Note that the truth table for implication is the same as <=u on
1141 /// i1 values (but not
1142 /// <=s!).  The truth table for both is:
1143 ///    | T | F (B)
1144 ///  T | T | F
1145 ///  F | T | T
1146 /// (A)
1147 std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
1148                                        const DataLayout &DL,
1149                                        bool LHSIsTrue = true,
1150                                        unsigned Depth = 0);
1151 std::optional<bool> isImpliedCondition(const Value *LHS,
1152                                        CmpInst::Predicate RHSPred,
1153                                        const Value *RHSOp0, const Value *RHSOp1,
1154                                        const DataLayout &DL,
1155                                        bool LHSIsTrue = true,
1156                                        unsigned Depth = 0);
1157 
1158 /// Return the boolean condition value in the context of the given instruction
1159 /// if it is known based on dominating conditions.
1160 std::optional<bool> isImpliedByDomCondition(const Value *Cond,
1161                                             const Instruction *ContextI,
1162                                             const DataLayout &DL);
1163 std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
1164                                             const Value *LHS, const Value *RHS,
1165                                             const Instruction *ContextI,
1166                                             const DataLayout &DL);
1167 } // end namespace llvm
1168 
1169 #endif // LLVM_ANALYSIS_VALUETRACKING_H
1170