1 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
15 #define LLVM_ANALYSIS_VALUETRACKING_H
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/Analysis/SimplifyQuery.h"
19 #include "llvm/Analysis/WithCache.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/FMF.h"
23 #include "llvm/IR/InstrTypes.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include <cassert>
26 #include <cstdint>
27 
28 namespace llvm {
29 
30 class Operator;
31 class AddOperator;
32 class AllocaInst;
33 class APInt;
34 class AssumptionCache;
35 class DominatorTree;
36 class GEPOperator;
37 class LoadInst;
38 class WithOverflowInst;
39 struct KnownBits;
40 class Loop;
41 class LoopInfo;
42 class MDNode;
43 class StringRef;
44 class TargetLibraryInfo;
45 class Value;
46 
47 constexpr unsigned MaxAnalysisRecursionDepth = 6;
48 
49 /// Determine which bits of V are known to be either zero or one and return
50 /// them in the KnownZero/KnownOne bit sets.
51 ///
52 /// This function is defined on values with integer type, values with pointer
53 /// type, and vectors of integers.  In the case
54 /// where V is a vector, the known zero and known one values are the
55 /// same width as the vector element, and the bit is set only if it is true
56 /// for all of the elements in the vector.
57 void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
58                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
59                       const Instruction *CxtI = nullptr,
60                       const DominatorTree *DT = nullptr,
61                       bool UseInstrInfo = true);
62 
63 /// Returns the known bits rather than passing by reference.
64 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
65                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
66                            const Instruction *CxtI = nullptr,
67                            const DominatorTree *DT = nullptr,
68                            bool UseInstrInfo = true);
69 
70 /// Returns the known bits rather than passing by reference.
71 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
72                            const DataLayout &DL, unsigned Depth = 0,
73                            AssumptionCache *AC = nullptr,
74                            const Instruction *CxtI = nullptr,
75                            const DominatorTree *DT = nullptr,
76                            bool UseInstrInfo = true);
77 
78 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
79                            unsigned Depth, const SimplifyQuery &Q);
80 
81 KnownBits computeKnownBits(const Value *V, unsigned Depth,
82                            const SimplifyQuery &Q);
83 
84 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
85                       const SimplifyQuery &Q);
86 
87 /// Compute known bits from the range metadata.
88 /// \p KnownZero the set of bits that are known to be zero
89 /// \p KnownOne the set of bits that are known to be one
90 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
91 
92 /// Merge bits known from context-dependent facts into Known.
93 void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
94                                  unsigned Depth, const SimplifyQuery &Q);
95 
96 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
97 KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
98                                        const KnownBits &KnownLHS,
99                                        const KnownBits &KnownRHS,
100                                        unsigned Depth, const SimplifyQuery &SQ);
101 
102 /// Return true if LHS and RHS have no common bits set.
103 bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
104                          const WithCache<const Value *> &RHSCache,
105                          const SimplifyQuery &SQ);
106 
107 /// Return true if the given value is known to have exactly one bit set when
108 /// defined. For vectors return true if every element is known to be a power
109 /// of two when defined. Supports values with integer or pointer type and
110 /// vectors of integers. If 'OrZero' is set, then return true if the given
111 /// value is either a power of two or zero.
112 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
113                             bool OrZero = false, unsigned Depth = 0,
114                             AssumptionCache *AC = nullptr,
115                             const Instruction *CxtI = nullptr,
116                             const DominatorTree *DT = nullptr,
117                             bool UseInstrInfo = true);
118 
119 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
120 
121 /// Return true if the given value is known to be non-zero when defined. For
122 /// vectors, return true if every element is known to be non-zero when
123 /// defined. For pointers, if the context instruction and dominator tree are
124 /// specified, perform context-sensitive analysis and return true if the
125 /// pointer couldn't possibly be null at the specified instruction.
126 /// Supports values with integer or pointer type and vectors of integers.
127 bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
128                     AssumptionCache *AC = nullptr,
129                     const Instruction *CxtI = nullptr,
130                     const DominatorTree *DT = nullptr,
131                     bool UseInstrInfo = true);
132 
133 /// Return true if the two given values are negation.
134 /// Currently can recoginze Value pair:
135 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
136 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
137 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
138 
139 /// Returns true if the give value is known to be non-negative.
140 bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
141                         unsigned Depth = 0);
142 
143 /// Returns true if the given value is known be positive (i.e. non-negative
144 /// and non-zero).
145 bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
146                      unsigned Depth = 0);
147 
148 /// Returns true if the given value is known be negative (i.e. non-positive
149 /// and non-zero).
150 bool isKnownNegative(const Value *V, const SimplifyQuery &DL,
151                      unsigned Depth = 0);
152 
153 /// Return true if the given values are known to be non-equal when defined.
154 /// Supports scalar integer types only.
155 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
156                      AssumptionCache *AC = nullptr,
157                      const Instruction *CxtI = nullptr,
158                      const DominatorTree *DT = nullptr,
159                      bool UseInstrInfo = true);
160 
161 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
162 /// simplify operations downstream. Mask is known to be zero for bits that V
163 /// cannot have.
164 ///
165 /// This function is defined on values with integer type, values with pointer
166 /// type, and vectors of integers.  In the case
167 /// where V is a vector, the mask, known zero, and known one values are the
168 /// same width as the vector element, and the bit is set only if it is true
169 /// for all of the elements in the vector.
170 bool MaskedValueIsZero(const Value *V, const APInt &Mask,
171                        const SimplifyQuery &DL, unsigned Depth = 0);
172 
173 /// Return the number of times the sign bit of the register is replicated into
174 /// the other bits. We know that at least 1 bit is always equal to the sign
175 /// bit (itself), but other cases can give us information. For example,
176 /// immediately after an "ashr X, 2", we know that the top 3 bits are all
177 /// equal to each other, so we return 3. For vectors, return the number of
178 /// sign bits for the vector element with the mininum number of known sign
179 /// bits.
180 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
181                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
182                             const Instruction *CxtI = nullptr,
183                             const DominatorTree *DT = nullptr,
184                             bool UseInstrInfo = true);
185 
186 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
187 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
188 /// Similar to the APInt::getSignificantBits function.
189 unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
190                                    unsigned Depth = 0,
191                                    AssumptionCache *AC = nullptr,
192                                    const Instruction *CxtI = nullptr,
193                                    const DominatorTree *DT = nullptr);
194 
195 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
196 /// intrinsics are treated as-if they were intrinsics.
197 Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
198                                       const TargetLibraryInfo *TLI);
199 
200 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
201 /// same result as an fcmp with the given operands.
202 ///
203 /// If \p LookThroughSrc is true, consider the input value when computing the
204 /// mask.
205 ///
206 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
207 /// element will always be LHS.
208 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
209                                                 const Function &F, Value *LHS,
210                                                 Value *RHS,
211                                                 bool LookThroughSrc = true);
212 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
213                                                 const Function &F, Value *LHS,
214                                                 const APFloat *ConstRHS,
215                                                 bool LookThroughSrc = true);
216 
217 struct KnownFPClass {
218   /// Floating-point classes the value could be one of.
219   FPClassTest KnownFPClasses = fcAllFlags;
220 
221   /// std::nullopt if the sign bit is unknown, true if the sign bit is
222   /// definitely set or false if the sign bit is definitely unset.
223   std::optional<bool> SignBit;
224 
225   /// Return true if it's known this can never be one of the mask entries.
226   bool isKnownNever(FPClassTest Mask) const {
227     return (KnownFPClasses & Mask) == fcNone;
228   }
229 
230   bool isUnknown() const {
231     return KnownFPClasses == fcAllFlags && !SignBit;
232   }
233 
234   /// Return true if it's known this can never be a nan.
235   bool isKnownNeverNaN() const {
236     return isKnownNever(fcNan);
237   }
238 
239   /// Return true if it's known this can never be an infinity.
240   bool isKnownNeverInfinity() const {
241     return isKnownNever(fcInf);
242   }
243 
244   /// Return true if it's known this can never be +infinity.
245   bool isKnownNeverPosInfinity() const {
246     return isKnownNever(fcPosInf);
247   }
248 
249   /// Return true if it's known this can never be -infinity.
250   bool isKnownNeverNegInfinity() const {
251     return isKnownNever(fcNegInf);
252   }
253 
254   /// Return true if it's known this can never be a subnormal
255   bool isKnownNeverSubnormal() const {
256     return isKnownNever(fcSubnormal);
257   }
258 
259   /// Return true if it's known this can never be a positive subnormal
260   bool isKnownNeverPosSubnormal() const {
261     return isKnownNever(fcPosSubnormal);
262   }
263 
264   /// Return true if it's known this can never be a negative subnormal
265   bool isKnownNeverNegSubnormal() const {
266     return isKnownNever(fcNegSubnormal);
267   }
268 
269   /// Return true if it's known this can never be a zero. This means a literal
270   /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
271   bool isKnownNeverZero() const {
272     return isKnownNever(fcZero);
273   }
274 
275   /// Return true if it's known this can never be a literal positive zero.
276   bool isKnownNeverPosZero() const {
277     return isKnownNever(fcPosZero);
278   }
279 
280   /// Return true if it's known this can never be a negative zero. This means a
281   /// literal -0 and does not include denormal inputs implicitly treated as -0.
282   bool isKnownNeverNegZero() const {
283     return isKnownNever(fcNegZero);
284   }
285 
286   /// Return true if it's know this can never be interpreted as a zero. This
287   /// extends isKnownNeverZero to cover the case where the assumed
288   /// floating-point mode for the function interprets denormals as zero.
289   bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;
290 
291   /// Return true if it's know this can never be interpreted as a negative zero.
292   bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;
293 
294   /// Return true if it's know this can never be interpreted as a positive zero.
295   bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;
296 
297   static constexpr FPClassTest OrderedLessThanZeroMask =
298       fcNegSubnormal | fcNegNormal | fcNegInf;
299   static constexpr FPClassTest OrderedGreaterThanZeroMask =
300       fcPosSubnormal | fcPosNormal | fcPosInf;
301 
302   /// Return true if we can prove that the analyzed floating-point value is
303   /// either NaN or never less than -0.0.
304   ///
305   ///      NaN --> true
306   ///       +0 --> true
307   ///       -0 --> true
308   ///   x > +0 --> true
309   ///   x < -0 --> false
310   bool cannotBeOrderedLessThanZero() const {
311     return isKnownNever(OrderedLessThanZeroMask);
312   }
313 
314   /// Return true if we can prove that the analyzed floating-point value is
315   /// either NaN or never greater than -0.0.
316   ///      NaN --> true
317   ///       +0 --> true
318   ///       -0 --> true
319   ///   x > +0 --> false
320   ///   x < -0 --> true
321   bool cannotBeOrderedGreaterThanZero() const {
322     return isKnownNever(OrderedGreaterThanZeroMask);
323   }
324 
325   KnownFPClass &operator|=(const KnownFPClass &RHS) {
326     KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;
327 
328     if (SignBit != RHS.SignBit)
329       SignBit = std::nullopt;
330     return *this;
331   }
332 
333   void knownNot(FPClassTest RuleOut) {
334     KnownFPClasses = KnownFPClasses & ~RuleOut;
335   }
336 
337   void fneg() {
338     KnownFPClasses = llvm::fneg(KnownFPClasses);
339     if (SignBit)
340       SignBit = !*SignBit;
341   }
342 
343   void fabs() {
344     if (KnownFPClasses & fcNegZero)
345       KnownFPClasses |= fcPosZero;
346 
347     if (KnownFPClasses & fcNegInf)
348       KnownFPClasses |= fcPosInf;
349 
350     if (KnownFPClasses & fcNegSubnormal)
351       KnownFPClasses |= fcPosSubnormal;
352 
353     if (KnownFPClasses & fcNegNormal)
354       KnownFPClasses |= fcPosNormal;
355 
356     signBitMustBeZero();
357   }
358 
359   /// Return true if the sign bit must be 0, ignoring the sign of nans.
360   bool signBitIsZeroOrNaN() const {
361     return isKnownNever(fcNegative);
362   }
363 
364   /// Assume the sign bit is zero.
365   void signBitMustBeZero() {
366     KnownFPClasses &= (fcPositive | fcNan);
367     SignBit = false;
368   }
369 
370   void copysign(const KnownFPClass &Sign) {
371     // Don't know anything about the sign of the source. Expand the possible set
372     // to its opposite sign pair.
373     if (KnownFPClasses & fcZero)
374       KnownFPClasses |= fcZero;
375     if (KnownFPClasses & fcSubnormal)
376       KnownFPClasses |= fcSubnormal;
377     if (KnownFPClasses & fcNormal)
378       KnownFPClasses |= fcNormal;
379     if (KnownFPClasses & fcInf)
380       KnownFPClasses |= fcInf;
381 
382     // Sign bit is exactly preserved even for nans.
383     SignBit = Sign.SignBit;
384 
385     // Clear sign bits based on the input sign mask.
386     if (Sign.isKnownNever(fcPositive | fcNan) || (SignBit && *SignBit))
387       KnownFPClasses &= (fcNegative | fcNan);
388     if (Sign.isKnownNever(fcNegative | fcNan) || (SignBit && !*SignBit))
389       KnownFPClasses &= (fcPositive | fcNan);
390   }
391 
392   // Propagate knowledge that a non-NaN source implies the result can also not
393   // be a NaN. For unconstrained operations, signaling nans are not guaranteed
394   // to be quieted but cannot be introduced.
395   void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
396     if (Src.isKnownNever(fcNan)) {
397       knownNot(fcNan);
398       if (PreserveSign)
399         SignBit = Src.SignBit;
400     } else if (Src.isKnownNever(fcSNan))
401       knownNot(fcSNan);
402   }
403 
404   /// Propagate knowledge from a source value that could be a denormal or
405   /// zero. We have to be conservative since output flushing is not guaranteed,
406   /// so known-never-zero may not hold.
407   ///
408   /// This assumes a copy-like operation and will replace any currently known
409   /// information.
410   void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);
411 
412   /// Report known classes if \p Src is evaluated through a potentially
413   /// canonicalizing operation. We can assume signaling nans will not be
414   /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
415   ///
416   /// This assumes a copy-like operation and will replace any currently known
417   /// information.
418   void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
419                                   Type *Ty);
420 
421   void resetAll() { *this = KnownFPClass(); }
422 };
423 
424 inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
425   LHS |= RHS;
426   return LHS;
427 }
428 
429 inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
430   RHS |= LHS;
431   return std::move(RHS);
432 }
433 
434 /// Determine which floating-point classes are valid for \p V, and return them
435 /// in KnownFPClass bit sets.
436 ///
437 /// This function is defined on values with floating-point type, values vectors
438 /// of floating-point type, and arrays of floating-point type.
439 
440 /// \p InterestedClasses is a compile time optimization hint for which floating
441 /// point classes should be queried. Queries not specified in \p
442 /// InterestedClasses should be reliable if they are determined during the
443 /// query.
444 KnownFPClass computeKnownFPClass(
445     const Value *V, const APInt &DemandedElts, const DataLayout &DL,
446     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
447     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
448     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
449     bool UseInstrInfo = true);
450 
451 KnownFPClass computeKnownFPClass(
452     const Value *V, const DataLayout &DL,
453     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
454     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
455     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
456     bool UseInstrInfo = true);
457 
458 /// Wrapper to account for known fast math flags at the use instruction.
459 inline KnownFPClass computeKnownFPClass(
460     const Value *V, FastMathFlags FMF, const DataLayout &DL,
461     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
462     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
463     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
464     bool UseInstrInfo = true) {
465   if (FMF.noNaNs())
466     InterestedClasses &= ~fcNan;
467   if (FMF.noInfs())
468     InterestedClasses &= ~fcInf;
469 
470   KnownFPClass Result = computeKnownFPClass(V, DL, InterestedClasses, Depth,
471                                             TLI, AC, CxtI, DT, UseInstrInfo);
472 
473   if (FMF.noNaNs())
474     Result.KnownFPClasses &= ~fcNan;
475   if (FMF.noInfs())
476     Result.KnownFPClasses &= ~fcInf;
477   return Result;
478 }
479 
480 /// Return true if we can prove that the specified FP value is never equal to
481 /// -0.0. Users should use caution when considering PreserveSign
482 /// denormal-fp-math.
483 inline bool cannotBeNegativeZero(const Value *V, const DataLayout &DL,
484                                  const TargetLibraryInfo *TLI = nullptr,
485                                  unsigned Depth = 0,
486                                  AssumptionCache *AC = nullptr,
487                                  const Instruction *CtxI = nullptr,
488                                  const DominatorTree *DT = nullptr,
489                                  bool UseInstrInfo = true) {
490   KnownFPClass Known = computeKnownFPClass(V, DL, fcNegZero, Depth, TLI, AC,
491                                            CtxI, DT, UseInstrInfo);
492   return Known.isKnownNeverNegZero();
493 }
494 
495 /// Return true if we can prove that the specified FP value is either NaN or
496 /// never less than -0.0.
497 ///
498 ///      NaN --> true
499 ///       +0 --> true
500 ///       -0 --> true
501 ///   x > +0 --> true
502 ///   x < -0 --> false
503 inline bool cannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
504                                         const TargetLibraryInfo *TLI = nullptr,
505                                         unsigned Depth = 0,
506                                         AssumptionCache *AC = nullptr,
507                                         const Instruction *CtxI = nullptr,
508                                         const DominatorTree *DT = nullptr,
509                                         bool UseInstrInfo = true) {
510   KnownFPClass Known =
511       computeKnownFPClass(V, DL, KnownFPClass::OrderedLessThanZeroMask, Depth,
512                           TLI, AC, CtxI, DT, UseInstrInfo);
513   return Known.cannotBeOrderedLessThanZero();
514 }
515 
516 /// Return true if the floating-point scalar value is not an infinity or if
517 /// the floating-point vector value has no infinities. Return false if a value
518 /// could ever be infinity.
519 inline bool isKnownNeverInfinity(const Value *V, const DataLayout &DL,
520                                  const TargetLibraryInfo *TLI = nullptr,
521                                  unsigned Depth = 0,
522                                  AssumptionCache *AC = nullptr,
523                                  const Instruction *CtxI = nullptr,
524                                  const DominatorTree *DT = nullptr,
525                                  bool UseInstrInfo = true) {
526   KnownFPClass Known = computeKnownFPClass(V, DL, fcInf, Depth, TLI, AC, CtxI,
527                                            DT, UseInstrInfo);
528   return Known.isKnownNeverInfinity();
529 }
530 
531 /// Return true if the floating-point value can never contain a NaN or infinity.
532 inline bool isKnownNeverInfOrNaN(
533     const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI,
534     unsigned Depth = 0, AssumptionCache *AC = nullptr,
535     const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
536     bool UseInstrInfo = true) {
537   KnownFPClass Known = computeKnownFPClass(V, DL, fcInf | fcNan, Depth, TLI, AC,
538                                            CtxI, DT, UseInstrInfo);
539   return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
540 }
541 
542 /// Return true if the floating-point scalar value is not a NaN or if the
543 /// floating-point vector value has no NaN elements. Return false if a value
544 /// could ever be NaN.
545 inline bool isKnownNeverNaN(const Value *V, const DataLayout &DL,
546                             const TargetLibraryInfo *TLI, unsigned Depth = 0,
547                             AssumptionCache *AC = nullptr,
548                             const Instruction *CtxI = nullptr,
549                             const DominatorTree *DT = nullptr,
550                             bool UseInstrInfo = true) {
551   KnownFPClass Known = computeKnownFPClass(V, DL, fcNan, Depth, TLI, AC, CtxI,
552                                            DT, UseInstrInfo);
553   return Known.isKnownNeverNaN();
554 }
555 
556 /// Return true if we can prove that the specified FP value's sign bit is 0.
557 ///
558 ///      NaN --> true/false (depending on the NaN's sign bit)
559 ///       +0 --> true
560 ///       -0 --> false
561 ///   x > +0 --> true
562 ///   x < -0 --> false
563 bool SignBitMustBeZero(const Value *V, const DataLayout &DL,
564                        const TargetLibraryInfo *TLI);
565 
566 /// If the specified value can be set by repeating the same byte in memory,
567 /// return the i8 value that it is represented with. This is true for all i8
568 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
569 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
570 /// i16 0x1234), return null. If the value is entirely undef and padding,
571 /// return undef.
572 Value *isBytewiseValue(Value *V, const DataLayout &DL);
573 
574 /// Given an aggregate and an sequence of indices, see if the scalar value
575 /// indexed is already around as a register, for example if it were inserted
576 /// directly into the aggregate.
577 ///
578 /// If InsertBefore is not null, this function will duplicate (modified)
579 /// insertvalues when a part of a nested struct is extracted.
580 Value *FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
581                          Instruction *InsertBefore = nullptr);
582 
583 /// Analyze the specified pointer to see if it can be expressed as a base
584 /// pointer plus a constant offset. Return the base and offset to the caller.
585 ///
586 /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
587 /// creates and later unpacks the required APInt.
588 inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
589                                                const DataLayout &DL,
590                                                bool AllowNonInbounds = true) {
591   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
592   Value *Base =
593       Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
594 
595   Offset = OffsetAPInt.getSExtValue();
596   return Base;
597 }
598 inline const Value *
599 GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
600                                  const DataLayout &DL,
601                                  bool AllowNonInbounds = true) {
602   return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
603                                           AllowNonInbounds);
604 }
605 
606 /// Returns true if the GEP is based on a pointer to a string (array of
607 // \p CharSize integers) and is indexing into this string.
608 bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
609 
610 /// Represents offset+length into a ConstantDataArray.
611 struct ConstantDataArraySlice {
612   /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
613   /// initializer, it just doesn't fit the ConstantDataArray interface).
614   const ConstantDataArray *Array;
615 
616   /// Slice starts at this Offset.
617   uint64_t Offset;
618 
619   /// Length of the slice.
620   uint64_t Length;
621 
622   /// Moves the Offset and adjusts Length accordingly.
623   void move(uint64_t Delta) {
624     assert(Delta < Length);
625     Offset += Delta;
626     Length -= Delta;
627   }
628 
629   /// Convenience accessor for elements in the slice.
630   uint64_t operator[](unsigned I) const {
631     return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
632   }
633 };
634 
635 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
636 /// If successful \p Slice will point to a ConstantDataArray info object
637 /// with an appropriate offset.
638 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
639                               unsigned ElementSize, uint64_t Offset = 0);
640 
641 /// This function computes the length of a null-terminated C string pointed to
642 /// by V. If successful, it returns true and returns the string in Str. If
643 /// unsuccessful, it returns false. This does not include the trailing null
644 /// character by default. If TrimAtNul is set to false, then this returns any
645 /// trailing null characters as well as any other characters that come after
646 /// it.
647 bool getConstantStringInfo(const Value *V, StringRef &Str,
648                            bool TrimAtNul = true);
649 
650 /// If we can compute the length of the string pointed to by the specified
651 /// pointer, return 'len+1'.  If we can't, return 0.
652 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
653 
654 /// This function returns call pointer argument that is considered the same by
655 /// aliasing rules. You CAN'T use it to replace one value with another. If
656 /// \p MustPreserveNullness is true, the call must preserve the nullness of
657 /// the pointer.
658 const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
659                                                   bool MustPreserveNullness);
660 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
661                                                    bool MustPreserveNullness) {
662   return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
663       const_cast<const CallBase *>(Call), MustPreserveNullness));
664 }
665 
666 /// {launder,strip}.invariant.group returns pointer that aliases its argument,
667 /// and it only captures pointer by returning it.
668 /// These intrinsics are not marked as nocapture, because returning is
669 /// considered as capture. The arguments are not marked as returned neither,
670 /// because it would make it useless. If \p MustPreserveNullness is true,
671 /// the intrinsic must preserve the nullness of the pointer.
672 bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
673     const CallBase *Call, bool MustPreserveNullness);
674 
675 /// This method strips off any GEP address adjustments and pointer casts from
676 /// the specified value, returning the original object being addressed. Note
677 /// that the returned value has pointer type if the specified value does. If
678 /// the MaxLookup value is non-zero, it limits the number of instructions to
679 /// be stripped off.
680 const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
681 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
682   // Force const to avoid infinite recursion.
683   const Value *VConst = V;
684   return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
685 }
686 
687 /// This method is similar to getUnderlyingObject except that it can
688 /// look through phi and select instructions and return multiple objects.
689 ///
690 /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
691 /// accesses different objects in each iteration, we don't look through the
692 /// phi node. E.g. consider this loop nest:
693 ///
694 ///   int **A;
695 ///   for (i)
696 ///     for (j) {
697 ///        A[i][j] = A[i-1][j] * B[j]
698 ///     }
699 ///
700 /// This is transformed by Load-PRE to stash away A[i] for the next iteration
701 /// of the outer loop:
702 ///
703 ///   Curr = A[0];          // Prev_0
704 ///   for (i: 1..N) {
705 ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
706 ///     Curr = A[i];
707 ///     for (j: 0..N) {
708 ///        Curr[j] = Prev[j] * B[j]
709 ///     }
710 ///   }
711 ///
712 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
713 /// should not assume that Curr and Prev share the same underlying object thus
714 /// it shouldn't look through the phi above.
715 void getUnderlyingObjects(const Value *V,
716                           SmallVectorImpl<const Value *> &Objects,
717                           LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
718 
719 /// This is a wrapper around getUnderlyingObjects and adds support for basic
720 /// ptrtoint+arithmetic+inttoptr sequences.
721 bool getUnderlyingObjectsForCodeGen(const Value *V,
722                                     SmallVectorImpl<Value *> &Objects);
723 
724 /// Returns unique alloca where the value comes from, or nullptr.
725 /// If OffsetZero is true check that V points to the begining of the alloca.
726 AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
727 inline const AllocaInst *findAllocaForValue(const Value *V,
728                                             bool OffsetZero = false) {
729   return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
730 }
731 
732 /// Return true if the only users of this pointer are lifetime markers.
733 bool onlyUsedByLifetimeMarkers(const Value *V);
734 
735 /// Return true if the only users of this pointer are lifetime markers or
736 /// droppable instructions.
737 bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
738 
739 /// Return true if speculation of the given load must be suppressed to avoid
740 /// ordering or interfering with an active sanitizer.  If not suppressed,
741 /// dereferenceability and alignment must be proven separately.  Note: This
742 /// is only needed for raw reasoning; if you use the interface below
743 /// (isSafeToSpeculativelyExecute), this is handled internally.
744 bool mustSuppressSpeculation(const LoadInst &LI);
745 
746 /// Return true if the instruction does not have any effects besides
747 /// calculating the result and does not have undefined behavior.
748 ///
749 /// This method never returns true for an instruction that returns true for
750 /// mayHaveSideEffects; however, this method also does some other checks in
751 /// addition. It checks for undefined behavior, like dividing by zero or
752 /// loading from an invalid pointer (but not for undefined results, like a
753 /// shift with a shift amount larger than the width of the result). It checks
754 /// for malloc and alloca because speculatively executing them might cause a
755 /// memory leak. It also returns false for instructions related to control
756 /// flow, specifically terminators and PHI nodes.
757 ///
758 /// If the CtxI is specified this method performs context-sensitive analysis
759 /// and returns true if it is safe to execute the instruction immediately
760 /// before the CtxI.
761 ///
762 /// If the CtxI is NOT specified this method only looks at the instruction
763 /// itself and its operands, so if this method returns true, it is safe to
764 /// move the instruction as long as the correct dominance relationships for
765 /// the operands and users hold.
766 ///
767 /// This method can return true for instructions that read memory;
768 /// for such instructions, moving them may change the resulting value.
769 bool isSafeToSpeculativelyExecute(const Instruction *I,
770                                   const Instruction *CtxI = nullptr,
771                                   AssumptionCache *AC = nullptr,
772                                   const DominatorTree *DT = nullptr,
773                                   const TargetLibraryInfo *TLI = nullptr);
774 
775 /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
776 /// the actual opcode of Inst. If the provided and actual opcode differ, the
777 /// function (virtually) overrides the opcode of Inst with the provided
778 /// Opcode. There are come constraints in this case:
779 /// * If Opcode has a fixed number of operands (eg, as binary operators do),
780 ///   then Inst has to have at least as many leading operands. The function
781 ///   will ignore all trailing operands beyond that number.
782 /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
783 ///   do), then all operands are considered.
784 /// * The virtual instruction has to satisfy all typing rules of the provided
785 ///   Opcode.
786 /// * This function is pessimistic in the following sense: If one actually
787 ///   materialized the virtual instruction, then isSafeToSpeculativelyExecute
788 ///   may say that the materialized instruction is speculatable whereas this
789 ///   function may have said that the instruction wouldn't be speculatable.
790 ///   This behavior is a shortcoming in the current implementation and not
791 ///   intentional.
792 bool isSafeToSpeculativelyExecuteWithOpcode(
793     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
794     AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
795     const TargetLibraryInfo *TLI = nullptr);
796 
797 /// Returns true if the result or effects of the given instructions \p I
798 /// depend values not reachable through the def use graph.
799 /// * Memory dependence arises for example if the instruction reads from
800 ///   memory or may produce effects or undefined behaviour. Memory dependent
801 ///   instructions generally cannot be reorderd with respect to other memory
802 ///   dependent instructions.
803 /// * Control dependence arises for example if the instruction may fault
804 ///   if lifted above a throwing call or infinite loop.
805 bool mayHaveNonDefUseDependency(const Instruction &I);
806 
807 /// Return true if it is an intrinsic that cannot be speculated but also
808 /// cannot trap.
809 bool isAssumeLikeIntrinsic(const Instruction *I);
810 
811 /// Return true if it is valid to use the assumptions provided by an
812 /// assume intrinsic, I, at the point in the control-flow identified by the
813 /// context instruction, CxtI. By default, ephemeral values of the assumption
814 /// are treated as an invalid context, to prevent the assumption from being used
815 /// to optimize away its argument. If the caller can ensure that this won't
816 /// happen, it can call with AllowEphemerals set to true to get more valid
817 /// assumptions.
818 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
819                              const DominatorTree *DT = nullptr,
820                              bool AllowEphemerals = false);
821 
822 enum class OverflowResult {
823   /// Always overflows in the direction of signed/unsigned min value.
824   AlwaysOverflowsLow,
825   /// Always overflows in the direction of signed/unsigned max value.
826   AlwaysOverflowsHigh,
827   /// May or may not overflow.
828   MayOverflow,
829   /// Never overflows.
830   NeverOverflows,
831 };
832 
833 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
834                                              const SimplifyQuery &SQ);
835 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
836                                            const SimplifyQuery &SQ);
837 OverflowResult
838 computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
839                               const WithCache<const Value *> &RHS,
840                               const SimplifyQuery &SQ);
841 OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
842                                            const WithCache<const Value *> &RHS,
843                                            const SimplifyQuery &SQ);
844 /// This version also leverages the sign bit of Add if known.
845 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
846                                            const SimplifyQuery &SQ);
847 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
848                                              const SimplifyQuery &SQ);
849 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
850                                            const SimplifyQuery &SQ);
851 
852 /// Returns true if the arithmetic part of the \p WO 's result is
853 /// used only along the paths control dependent on the computation
854 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
855 bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
856                                const DominatorTree &DT);
857 
858 /// Determine the possible constant range of vscale with the given bit width,
859 /// based on the vscale_range function attribute.
860 ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
861 
862 /// Determine the possible constant range of an integer or vector of integer
863 /// value. This is intended as a cheap, non-recursive check.
864 ConstantRange computeConstantRange(const Value *V, bool ForSigned,
865                                    bool UseInstrInfo = true,
866                                    AssumptionCache *AC = nullptr,
867                                    const Instruction *CtxI = nullptr,
868                                    const DominatorTree *DT = nullptr,
869                                    unsigned Depth = 0);
870 
871 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
872 ConstantRange
873 computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
874                                        bool ForSigned, const SimplifyQuery &SQ);
875 
876 /// Return true if this function can prove that the instruction I will
877 /// always transfer execution to one of its successors (including the next
878 /// instruction that follows within a basic block). E.g. this is not
879 /// guaranteed for function calls that could loop infinitely.
880 ///
881 /// In other words, this function returns false for instructions that may
882 /// transfer execution or fail to transfer execution in a way that is not
883 /// captured in the CFG nor in the sequence of instructions within a basic
884 /// block.
885 ///
886 /// Undefined behavior is assumed not to happen, so e.g. division is
887 /// guaranteed to transfer execution to the following instruction even
888 /// though division by zero might cause undefined behavior.
889 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
890 
891 /// Returns true if this block does not contain a potential implicit exit.
892 /// This is equivelent to saying that all instructions within the basic block
893 /// are guaranteed to transfer execution to their successor within the basic
894 /// block. This has the same assumptions w.r.t. undefined behavior as the
895 /// instruction variant of this function.
896 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
897 
898 /// Return true if every instruction in the range (Begin, End) is
899 /// guaranteed to transfer execution to its static successor. \p ScanLimit
900 /// bounds the search to avoid scanning huge blocks.
901 bool isGuaranteedToTransferExecutionToSuccessor(
902     BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
903     unsigned ScanLimit = 32);
904 
905 /// Same as previous, but with range expressed via iterator_range.
906 bool isGuaranteedToTransferExecutionToSuccessor(
907     iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
908 
909 /// Return true if this function can prove that the instruction I
910 /// is executed for every iteration of the loop L.
911 ///
912 /// Note that this currently only considers the loop header.
913 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
914                                             const Loop *L);
915 
916 /// Return true if \p PoisonOp's user yields poison or raises UB if its
917 /// operand \p PoisonOp is poison.
918 ///
919 /// If \p PoisonOp is a vector or an aggregate and the operation's result is a
920 /// single value, any poison element in /p PoisonOp should make the result
921 /// poison or raise UB.
922 ///
923 /// To filter out operands that raise UB on poison, you can use
924 /// getGuaranteedNonPoisonOp.
925 bool propagatesPoison(const Use &PoisonOp);
926 
927 /// Insert operands of I into Ops such that I will trigger undefined behavior
928 /// if I is executed and that operand has a poison value.
929 void getGuaranteedNonPoisonOps(const Instruction *I,
930                                SmallVectorImpl<const Value *> &Ops);
931 
932 /// Insert operands of I into Ops such that I will trigger undefined behavior
933 /// if I is executed and that operand is not a well-defined value
934 /// (i.e. has undef bits or poison).
935 void getGuaranteedWellDefinedOps(const Instruction *I,
936                                  SmallVectorImpl<const Value *> &Ops);
937 
938 /// Return true if the given instruction must trigger undefined behavior
939 /// when I is executed with any operands which appear in KnownPoison holding
940 /// a poison value at the point of execution.
941 bool mustTriggerUB(const Instruction *I,
942                    const SmallPtrSetImpl<const Value *> &KnownPoison);
943 
944 /// Return true if this function can prove that if Inst is executed
945 /// and yields a poison value or undef bits, then that will trigger
946 /// undefined behavior.
947 ///
948 /// Note that this currently only considers the basic block that is
949 /// the parent of Inst.
950 bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
951 bool programUndefinedIfPoison(const Instruction *Inst);
952 
953 /// canCreateUndefOrPoison returns true if Op can create undef or poison from
954 /// non-undef & non-poison operands.
955 /// For vectors, canCreateUndefOrPoison returns true if there is potential
956 /// poison or undef in any element of the result when vectors without
957 /// undef/poison poison are given as operands.
958 /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
959 /// true. If Op raises immediate UB but never creates poison or undef
960 /// (e.g. sdiv I, 0), canCreatePoison returns false.
961 ///
962 /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
963 /// metadata on the instruction are considered.  This can be used to see if the
964 /// instruction could still introduce undef or poison even without poison
965 /// generating flags and metadata which might be on the instruction.
966 /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
967 /// poison or undef)
968 ///
969 /// canCreatePoison returns true if Op can create poison from non-poison
970 /// operands.
971 bool canCreateUndefOrPoison(const Operator *Op,
972                             bool ConsiderFlagsAndMetadata = true);
973 bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
974 
975 /// Return true if V is poison given that ValAssumedPoison is already poison.
976 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
977 /// impliesPoison returns true.
978 bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
979 
980 /// Return true if this function can prove that V does not have undef bits
981 /// and is never poison. If V is an aggregate value or vector, check whether
982 /// all elements (except padding) are not undef or poison.
983 /// Note that this is different from canCreateUndefOrPoison because the
984 /// function assumes Op's operands are not poison/undef.
985 ///
986 /// If CtxI and DT are specified this method performs flow-sensitive analysis
987 /// and returns true if it is guaranteed to be never undef or poison
988 /// immediately before the CtxI.
989 bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
990                                       AssumptionCache *AC = nullptr,
991                                       const Instruction *CtxI = nullptr,
992                                       const DominatorTree *DT = nullptr,
993                                       unsigned Depth = 0);
994 
995 /// Returns true if V cannot be poison, but may be undef.
996 bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
997                                const Instruction *CtxI = nullptr,
998                                const DominatorTree *DT = nullptr,
999                                unsigned Depth = 0);
1000 
1001 /// Returns true if V cannot be undef, but may be poison.
1002 bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
1003                               const Instruction *CtxI = nullptr,
1004                               const DominatorTree *DT = nullptr,
1005                               unsigned Depth = 0);
1006 
1007 /// Return true if undefined behavior would provable be executed on the path to
1008 /// OnPathTo if Root produced a posion result.  Note that this doesn't say
1009 /// anything about whether OnPathTo is actually executed or whether Root is
1010 /// actually poison.  This can be used to assess whether a new use of Root can
1011 /// be added at a location which is control equivalent with OnPathTo (such as
1012 /// immediately before it) without introducing UB which didn't previously
1013 /// exist.  Note that a false result conveys no information.
1014 bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
1015                                    Instruction *OnPathTo,
1016                                    DominatorTree *DT);
1017 
1018 /// Specific patterns of select instructions we can match.
1019 enum SelectPatternFlavor {
1020   SPF_UNKNOWN = 0,
1021   SPF_SMIN,    /// Signed minimum
1022   SPF_UMIN,    /// Unsigned minimum
1023   SPF_SMAX,    /// Signed maximum
1024   SPF_UMAX,    /// Unsigned maximum
1025   SPF_FMINNUM, /// Floating point minnum
1026   SPF_FMAXNUM, /// Floating point maxnum
1027   SPF_ABS,     /// Absolute value
1028   SPF_NABS     /// Negated absolute value
1029 };
1030 
1031 /// Behavior when a floating point min/max is given one NaN and one
1032 /// non-NaN as input.
1033 enum SelectPatternNaNBehavior {
1034   SPNB_NA = 0,        /// NaN behavior not applicable.
1035   SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN.
1036   SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1037   SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or
1038                       /// it has been determined that no operands can
1039                       /// be NaN).
1040 };
1041 
1042 struct SelectPatternResult {
1043   SelectPatternFlavor Flavor;
1044   SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
1045                                         /// SPF_FMINNUM or SPF_FMAXNUM.
1046   bool Ordered; /// When implementing this min/max pattern as
1047                 /// fcmp; select, does the fcmp have to be
1048                 /// ordered?
1049 
1050   /// Return true if \p SPF is a min or a max pattern.
1051   static bool isMinOrMax(SelectPatternFlavor SPF) {
1052     return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
1053   }
1054 };
1055 
1056 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
1057 /// and providing the out parameter results if we successfully match.
1058 ///
1059 /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
1060 /// the negation instruction from the idiom.
1061 ///
1062 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
1063 /// not match that of the original select. If this is the case, the cast
1064 /// operation (one of Trunc,SExt,Zext) that must be done to transform the
1065 /// type of LHS and RHS into the type of V is returned in CastOp.
1066 ///
1067 /// For example:
1068 ///   %1 = icmp slt i32 %a, i32 4
1069 ///   %2 = sext i32 %a to i64
1070 ///   %3 = select i1 %1, i64 %2, i64 4
1071 ///
1072 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
1073 ///
1074 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
1075                                        Instruction::CastOps *CastOp = nullptr,
1076                                        unsigned Depth = 0);
1077 
1078 inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
1079                                               const Value *&RHS) {
1080   Value *L = const_cast<Value *>(LHS);
1081   Value *R = const_cast<Value *>(RHS);
1082   auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
1083   LHS = L;
1084   RHS = R;
1085   return Result;
1086 }
1087 
1088 /// Determine the pattern that a select with the given compare as its
1089 /// predicate and given values as its true/false operands would match.
1090 SelectPatternResult matchDecomposedSelectPattern(
1091     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
1092     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
1093 
1094 /// Return the canonical comparison predicate for the specified
1095 /// minimum/maximum flavor.
1096 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
1097 
1098 /// Return the inverse minimum/maximum flavor of the specified flavor.
1099 /// For example, signed minimum is the inverse of signed maximum.
1100 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
1101 
1102 Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
1103 
1104 /// Return the minimum or maximum constant value for the specified integer
1105 /// min/max flavor and type.
1106 APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
1107 
1108 /// Check if the values in \p VL are select instructions that can be converted
1109 /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
1110 /// conversion is possible, together with a bool indicating whether all select
1111 /// conditions are only used by the selects. Otherwise return
1112 /// Intrinsic::not_intrinsic.
1113 std::pair<Intrinsic::ID, bool>
1114 canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
1115 
1116 /// Attempt to match a simple first order recurrence cycle of the form:
1117 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1118 ///   %inc = binop %iv, %step
1119 /// OR
1120 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1121 ///   %inc = binop %step, %iv
1122 ///
1123 /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
1124 ///
1125 /// A couple of notes on subtleties in that definition:
1126 /// * The Step does not have to be loop invariant.  In math terms, it can
1127 ///   be a free variable.  We allow recurrences with both constant and
1128 ///   variable coefficients. Callers may wish to filter cases where Step
1129 ///   does not dominate P.
1130 /// * For non-commutative operators, we will match both forms.  This
1131 ///   results in some odd recurrence structures.  Callers may wish to filter
1132 ///   out recurrences where the phi is not the LHS of the returned operator.
1133 /// * Because of the structure matched, the caller can assume as a post
1134 ///   condition of the match the presence of a Loop with P's parent as it's
1135 ///   header *except* in unreachable code.  (Dominance decays in unreachable
1136 ///   code.)
1137 ///
1138 /// NOTE: This is intentional simple.  If you want the ability to analyze
1139 /// non-trivial loop conditons, see ScalarEvolution instead.
1140 bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
1141                            Value *&Step);
1142 
1143 /// Analogous to the above, but starting from the binary operator
1144 bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
1145                            Value *&Step);
1146 
1147 /// Return true if RHS is known to be implied true by LHS.  Return false if
1148 /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
1149 /// no implication can be made. A & B must be i1 (boolean) values or a vector of
1150 /// such values. Note that the truth table for implication is the same as <=u on
1151 /// i1 values (but not
1152 /// <=s!).  The truth table for both is:
1153 ///    | T | F (B)
1154 ///  T | T | F
1155 ///  F | T | T
1156 /// (A)
1157 std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
1158                                        const DataLayout &DL,
1159                                        bool LHSIsTrue = true,
1160                                        unsigned Depth = 0);
1161 std::optional<bool> isImpliedCondition(const Value *LHS,
1162                                        CmpInst::Predicate RHSPred,
1163                                        const Value *RHSOp0, const Value *RHSOp1,
1164                                        const DataLayout &DL,
1165                                        bool LHSIsTrue = true,
1166                                        unsigned Depth = 0);
1167 
1168 /// Return the boolean condition value in the context of the given instruction
1169 /// if it is known based on dominating conditions.
1170 std::optional<bool> isImpliedByDomCondition(const Value *Cond,
1171                                             const Instruction *ContextI,
1172                                             const DataLayout &DL);
1173 std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
1174                                             const Value *LHS, const Value *RHS,
1175                                             const Instruction *ContextI,
1176                                             const DataLayout &DL);
1177 } // end namespace llvm
1178 
1179 #endif // LLVM_ANALYSIS_VALUETRACKING_H
1180