1 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
15 #define LLVM_ANALYSIS_VALUETRACKING_H
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/InstrTypes.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include <cassert>
24 #include <cstdint>
25 
26 namespace llvm {
27 
28 class Operator;
29 class AddOperator;
30 class AllocaInst;
31 class APInt;
32 class AssumptionCache;
33 class DominatorTree;
34 class GEPOperator;
35 class LoadInst;
36 class WithOverflowInst;
37 struct KnownBits;
38 class Loop;
39 class LoopInfo;
40 class MDNode;
41 struct SimplifyQuery;
42 class StringRef;
43 class TargetLibraryInfo;
44 class Value;
45 
46 constexpr unsigned MaxAnalysisRecursionDepth = 6;
47 
48 /// Determine which bits of V are known to be either zero or one and return
49 /// them in the KnownZero/KnownOne bit sets.
50 ///
51 /// This function is defined on values with integer type, values with pointer
52 /// type, and vectors of integers.  In the case
53 /// where V is a vector, the known zero and known one values are the
54 /// same width as the vector element, and the bit is set only if it is true
55 /// for all of the elements in the vector.
56 void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
57                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
58                       const Instruction *CxtI = nullptr,
59                       const DominatorTree *DT = nullptr,
60                       bool UseInstrInfo = true);
61 
62 /// Determine which bits of V are known to be either zero or one and return
63 /// them in the KnownZero/KnownOne bit sets.
64 ///
65 /// This function is defined on values with integer type, values with pointer
66 /// type, and vectors of integers.  In the case
67 /// where V is a vector, the known zero and known one values are the
68 /// same width as the vector element, and the bit is set only if it is true
69 /// for all of the demanded elements in the vector.
70 void computeKnownBits(const Value *V, const APInt &DemandedElts,
71                       KnownBits &Known, const DataLayout &DL,
72                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
73                       const Instruction *CxtI = nullptr,
74                       const DominatorTree *DT = nullptr,
75                       bool UseInstrInfo = true);
76 
77 /// Returns the known bits rather than passing by reference.
78 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
79                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
80                            const Instruction *CxtI = nullptr,
81                            const DominatorTree *DT = nullptr,
82                            bool UseInstrInfo = true);
83 
84 /// Returns the known bits rather than passing by reference.
85 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
86                            const DataLayout &DL, unsigned Depth = 0,
87                            AssumptionCache *AC = nullptr,
88                            const Instruction *CxtI = nullptr,
89                            const DominatorTree *DT = nullptr,
90                            bool UseInstrInfo = true);
91 
92 /// Compute known bits from the range metadata.
93 /// \p KnownZero the set of bits that are known to be zero
94 /// \p KnownOne the set of bits that are known to be one
95 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
96 
97 /// Merge bits known from assumes into Known.
98 void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
99                                 unsigned Depth, const SimplifyQuery &Q);
100 
101 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
102 KnownBits analyzeKnownBitsFromAndXorOr(
103     const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
104     unsigned Depth, const DataLayout &DL, AssumptionCache *AC = nullptr,
105     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
106     bool UseInstrInfo = true);
107 
108 /// Return true if LHS and RHS have no common bits set.
109 bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
110                          const DataLayout &DL, AssumptionCache *AC = nullptr,
111                          const Instruction *CxtI = nullptr,
112                          const DominatorTree *DT = nullptr,
113                          bool UseInstrInfo = true);
114 
115 /// Return true if the given value is known to have exactly one bit set when
116 /// defined. For vectors return true if every element is known to be a power
117 /// of two when defined. Supports values with integer or pointer type and
118 /// vectors of integers. If 'OrZero' is set, then return true if the given
119 /// value is either a power of two or zero.
120 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
121                             bool OrZero = false, unsigned Depth = 0,
122                             AssumptionCache *AC = nullptr,
123                             const Instruction *CxtI = nullptr,
124                             const DominatorTree *DT = nullptr,
125                             bool UseInstrInfo = true);
126 
127 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
128 
129 /// Return true if the given value is known to be non-zero when defined. For
130 /// vectors, return true if every element is known to be non-zero when
131 /// defined. For pointers, if the context instruction and dominator tree are
132 /// specified, perform context-sensitive analysis and return true if the
133 /// pointer couldn't possibly be null at the specified instruction.
134 /// Supports values with integer or pointer type and vectors of integers.
135 bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
136                     AssumptionCache *AC = nullptr,
137                     const Instruction *CxtI = nullptr,
138                     const DominatorTree *DT = nullptr,
139                     bool UseInstrInfo = true);
140 
141 /// Return true if the two given values are negation.
142 /// Currently can recoginze Value pair:
143 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
144 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
145 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);
146 
147 /// Returns true if the give value is known to be non-negative.
148 bool isKnownNonNegative(const Value *V, const DataLayout &DL,
149                         unsigned Depth = 0, AssumptionCache *AC = nullptr,
150                         const Instruction *CxtI = nullptr,
151                         const DominatorTree *DT = nullptr,
152                         bool UseInstrInfo = true);
153 
154 /// Returns true if the given value is known be positive (i.e. non-negative
155 /// and non-zero).
156 bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
157                      AssumptionCache *AC = nullptr,
158                      const Instruction *CxtI = nullptr,
159                      const DominatorTree *DT = nullptr,
160                      bool UseInstrInfo = true);
161 
162 /// Returns true if the given value is known be negative (i.e. non-positive
163 /// and non-zero).
164 bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
165                      AssumptionCache *AC = nullptr,
166                      const Instruction *CxtI = nullptr,
167                      const DominatorTree *DT = nullptr,
168                      bool UseInstrInfo = true);
169 
170 /// Return true if the given values are known to be non-equal when defined.
171 /// Supports scalar integer types only.
172 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
173                      AssumptionCache *AC = nullptr,
174                      const Instruction *CxtI = nullptr,
175                      const DominatorTree *DT = nullptr,
176                      bool UseInstrInfo = true);
177 
178 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
179 /// simplify operations downstream. Mask is known to be zero for bits that V
180 /// cannot have.
181 ///
182 /// This function is defined on values with integer type, values with pointer
183 /// type, and vectors of integers.  In the case
184 /// where V is a vector, the mask, known zero, and known one values are the
185 /// same width as the vector element, and the bit is set only if it is true
186 /// for all of the elements in the vector.
187 bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL,
188                        unsigned Depth = 0, AssumptionCache *AC = nullptr,
189                        const Instruction *CxtI = nullptr,
190                        const DominatorTree *DT = nullptr,
191                        bool UseInstrInfo = true);
192 
193 /// Return the number of times the sign bit of the register is replicated into
194 /// the other bits. We know that at least 1 bit is always equal to the sign
195 /// bit (itself), but other cases can give us information. For example,
196 /// immediately after an "ashr X, 2", we know that the top 3 bits are all
197 /// equal to each other, so we return 3. For vectors, return the number of
198 /// sign bits for the vector element with the mininum number of known sign
199 /// bits.
200 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
201                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
202                             const Instruction *CxtI = nullptr,
203                             const DominatorTree *DT = nullptr,
204                             bool UseInstrInfo = true);
205 
206 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
207 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
208 /// Similar to the APInt::getSignificantBits function.
209 unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
210                                    unsigned Depth = 0,
211                                    AssumptionCache *AC = nullptr,
212                                    const Instruction *CxtI = nullptr,
213                                    const DominatorTree *DT = nullptr);
214 
215 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
216 /// intrinsics are treated as-if they were intrinsics.
217 Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
218                                       const TargetLibraryInfo *TLI);
219 
220 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
221 /// same result as an fcmp with the given operands.
222 ///
223 /// If \p LookThroughSrc is true, consider the input value when computing the
224 /// mask.
225 ///
226 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
227 /// element will always be LHS.
228 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
229                                                 const Function &F, Value *LHS,
230                                                 Value *RHS,
231                                                 bool LookThroughSrc = true);
232 
233 struct KnownFPClass {
234   /// Floating-point classes the value could be one of.
235   FPClassTest KnownFPClasses = fcAllFlags;
236 
237   /// std::nullopt if the sign bit is unknown, true if the sign bit is
238   /// definitely set or false if the sign bit is definitely unset.
239   std::optional<bool> SignBit;
240 
241   /// Return true if it's known this can never be one of the mask entries.
242   bool isKnownNever(FPClassTest Mask) const {
243     return (KnownFPClasses & Mask) == fcNone;
244   }
245 
246   bool isUnknown() const {
247     return KnownFPClasses == fcAllFlags && !SignBit;
248   }
249 
250   /// Return true if it's known this can never be a nan.
251   bool isKnownNeverNaN() const {
252     return isKnownNever(fcNan);
253   }
254 
255   /// Return true if it's known this can never be an infinity.
256   bool isKnownNeverInfinity() const {
257     return isKnownNever(fcInf);
258   }
259 
260   /// Return true if it's known this can never be +infinity.
261   bool isKnownNeverPosInfinity() const {
262     return isKnownNever(fcPosInf);
263   }
264 
265   /// Return true if it's known this can never be -infinity.
266   bool isKnownNeverNegInfinity() const {
267     return isKnownNever(fcNegInf);
268   }
269 
270   /// Return true if it's known this can never be a subnormal
271   bool isKnownNeverSubnormal() const {
272     return isKnownNever(fcSubnormal);
273   }
274 
275   /// Return true if it's known this can never be a positive subnormal
276   bool isKnownNeverPosSubnormal() const {
277     return isKnownNever(fcPosSubnormal);
278   }
279 
280   /// Return true if it's known this can never be a negative subnormal
281   bool isKnownNeverNegSubnormal() const {
282     return isKnownNever(fcNegSubnormal);
283   }
284 
285   /// Return true if it's known this can never be a zero. This means a literal
286   /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
287   bool isKnownNeverZero() const {
288     return isKnownNever(fcZero);
289   }
290 
291   /// Return true if it's known this can never be a literal positive zero.
292   bool isKnownNeverPosZero() const {
293     return isKnownNever(fcPosZero);
294   }
295 
296   /// Return true if it's known this can never be a negative zero. This means a
297   /// literal -0 and does not include denormal inputs implicitly treated as -0.
298   bool isKnownNeverNegZero() const {
299     return isKnownNever(fcNegZero);
300   }
301 
302   /// Return true if it's know this can never be interpreted as a zero. This
303   /// extends isKnownNeverZero to cover the case where the assumed
304   /// floating-point mode for the function interprets denormals as zero.
305   bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;
306 
307   /// Return true if it's know this can never be interpreted as a negative zero.
308   bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;
309 
310   /// Return true if it's know this can never be interpreted as a positive zero.
311   bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;
312 
313   static constexpr FPClassTest OrderedLessThanZeroMask =
314       fcNegSubnormal | fcNegNormal | fcNegInf;
315   static constexpr FPClassTest OrderedGreaterThanZeroMask =
316       fcPosSubnormal | fcPosNormal | fcPosInf;
317 
318   /// Return true if we can prove that the analyzed floating-point value is
319   /// either NaN or never less than -0.0.
320   ///
321   ///      NaN --> true
322   ///       +0 --> true
323   ///       -0 --> true
324   ///   x > +0 --> true
325   ///   x < -0 --> false
326   bool cannotBeOrderedLessThanZero() const {
327     return isKnownNever(OrderedLessThanZeroMask);
328   }
329 
330   /// Return true if we can prove that the analyzed floating-point value is
331   /// either NaN or never greater than -0.0.
332   ///      NaN --> true
333   ///       +0 --> true
334   ///       -0 --> true
335   ///   x > +0 --> false
336   ///   x < -0 --> true
337   bool cannotBeOrderedGreaterThanZero() const {
338     return isKnownNever(OrderedGreaterThanZeroMask);
339   }
340 
341   KnownFPClass &operator|=(const KnownFPClass &RHS) {
342     KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;
343 
344     if (SignBit != RHS.SignBit)
345       SignBit = std::nullopt;
346     return *this;
347   }
348 
349   void knownNot(FPClassTest RuleOut) {
350     KnownFPClasses = KnownFPClasses & ~RuleOut;
351   }
352 
353   void fneg() {
354     KnownFPClasses = llvm::fneg(KnownFPClasses);
355     if (SignBit)
356       SignBit = !*SignBit;
357   }
358 
359   void fabs() {
360     if (KnownFPClasses & fcNegZero)
361       KnownFPClasses |= fcPosZero;
362 
363     if (KnownFPClasses & fcNegInf)
364       KnownFPClasses |= fcPosInf;
365 
366     if (KnownFPClasses & fcNegSubnormal)
367       KnownFPClasses |= fcPosSubnormal;
368 
369     if (KnownFPClasses & fcNegNormal)
370       KnownFPClasses |= fcPosNormal;
371 
372     signBitMustBeZero();
373   }
374 
375   /// Return true if the sign bit must be 0, ignoring the sign of nans.
376   bool signBitIsZeroOrNaN() const {
377     return isKnownNever(fcNegative);
378   }
379 
380   /// Assume the sign bit is zero.
381   void signBitMustBeZero() {
382     KnownFPClasses &= (fcPositive | fcNan);
383     SignBit = false;
384   }
385 
386   void copysign(const KnownFPClass &Sign) {
387     // Don't know anything about the sign of the source. Expand the possible set
388     // to its opposite sign pair.
389     if (KnownFPClasses & fcZero)
390       KnownFPClasses |= fcZero;
391     if (KnownFPClasses & fcSubnormal)
392       KnownFPClasses |= fcSubnormal;
393     if (KnownFPClasses & fcNormal)
394       KnownFPClasses |= fcNormal;
395     if (KnownFPClasses & fcInf)
396       KnownFPClasses |= fcInf;
397 
398     // Sign bit is exactly preserved even for nans.
399     SignBit = Sign.SignBit;
400 
401     // Clear sign bits based on the input sign mask.
402     if (Sign.isKnownNever(fcPositive | fcNan) || (SignBit && *SignBit))
403       KnownFPClasses &= (fcNegative | fcNan);
404     if (Sign.isKnownNever(fcNegative | fcNan) || (SignBit && !*SignBit))
405       KnownFPClasses &= (fcPositive | fcNan);
406   }
407 
408   // Propagate knowledge that a non-NaN source implies the result can also not
409   // be a NaN. For unconstrained operations, signaling nans are not guaranteed
410   // to be quieted but cannot be introduced.
411   void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
412     if (Src.isKnownNever(fcNan)) {
413       knownNot(fcNan);
414       if (PreserveSign)
415         SignBit = Src.SignBit;
416     } else if (Src.isKnownNever(fcSNan))
417       knownNot(fcSNan);
418   }
419 
420   /// Propagate knowledge from a source value that could be a denormal or
421   /// zero. We have to be conservative since output flushing is not guaranteed,
422   /// so known-never-zero may not hold.
423   ///
424   /// This assumes a copy-like operation and will replace any currently known
425   /// information.
426   void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);
427 
428   /// Report known classes if \p Src is evaluated through a potentially
429   /// canonicalizing operation. We can assume signaling nans will not be
430   /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
431   ///
432   /// This assumes a copy-like operation and will replace any currently known
433   /// information.
434   void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
435                                   Type *Ty);
436 
437   void resetAll() { *this = KnownFPClass(); }
438 };
439 
440 inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
441   LHS |= RHS;
442   return LHS;
443 }
444 
445 inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
446   RHS |= LHS;
447   return std::move(RHS);
448 }
449 
450 /// Determine which floating-point classes are valid for \p V, and return them
451 /// in KnownFPClass bit sets.
452 ///
453 /// This function is defined on values with floating-point type, values vectors
454 /// of floating-point type, and arrays of floating-point type.
455 
456 /// \p InterestedClasses is a compile time optimization hint for which floating
457 /// point classes should be queried. Queries not specified in \p
458 /// InterestedClasses should be reliable if they are determined during the
459 /// query.
460 KnownFPClass computeKnownFPClass(
461     const Value *V, const APInt &DemandedElts, const DataLayout &DL,
462     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
463     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
464     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
465     bool UseInstrInfo = true);
466 
467 KnownFPClass computeKnownFPClass(
468     const Value *V, const DataLayout &DL,
469     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
470     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
471     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
472     bool UseInstrInfo = true);
473 
474 /// Return true if we can prove that the specified FP value is never equal to
475 /// -0.0. Users should use caution when considering PreserveSign
476 /// denormal-fp-math.
477 inline bool cannotBeNegativeZero(const Value *V, const DataLayout &DL,
478                                  const TargetLibraryInfo *TLI = nullptr,
479                                  unsigned Depth = 0,
480                                  AssumptionCache *AC = nullptr,
481                                  const Instruction *CtxI = nullptr,
482                                  const DominatorTree *DT = nullptr,
483                                  bool UseInstrInfo = true) {
484   KnownFPClass Known = computeKnownFPClass(V, DL, fcNegZero, Depth, TLI, AC,
485                                            CtxI, DT, UseInstrInfo);
486   return Known.isKnownNeverNegZero();
487 }
488 
489 bool CannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
490                                  const TargetLibraryInfo *TLI);
491 
492 /// Return true if we can prove that the specified FP value is either NaN or
493 /// never less than -0.0.
494 ///
495 ///      NaN --> true
496 ///       +0 --> true
497 ///       -0 --> true
498 ///   x > +0 --> true
499 ///   x < -0 --> false
500 inline bool cannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
501                                         const TargetLibraryInfo *TLI = nullptr,
502                                         unsigned Depth = 0,
503                                         AssumptionCache *AC = nullptr,
504                                         const Instruction *CtxI = nullptr,
505                                         const DominatorTree *DT = nullptr,
506                                         bool UseInstrInfo = true) {
507   KnownFPClass Known =
508       computeKnownFPClass(V, DL, KnownFPClass::OrderedLessThanZeroMask, Depth,
509                           TLI, AC, CtxI, DT, UseInstrInfo);
510   return Known.cannotBeOrderedLessThanZero();
511 }
512 
513 /// Return true if the floating-point scalar value is not an infinity or if
514 /// the floating-point vector value has no infinities. Return false if a value
515 /// could ever be infinity.
516 inline bool isKnownNeverInfinity(const Value *V, const DataLayout &DL,
517                                  const TargetLibraryInfo *TLI = nullptr,
518                                  unsigned Depth = 0,
519                                  AssumptionCache *AC = nullptr,
520                                  const Instruction *CtxI = nullptr,
521                                  const DominatorTree *DT = nullptr,
522                                  bool UseInstrInfo = true) {
523   KnownFPClass Known = computeKnownFPClass(V, DL, fcInf, Depth, TLI, AC, CtxI,
524                                            DT, UseInstrInfo);
525   return Known.isKnownNeverInfinity();
526 }
527 
528 /// Return true if the floating-point value can never contain a NaN or infinity.
529 inline bool isKnownNeverInfOrNaN(
530     const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI,
531     unsigned Depth = 0, AssumptionCache *AC = nullptr,
532     const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
533     bool UseInstrInfo = true) {
534   KnownFPClass Known = computeKnownFPClass(V, DL, fcInf | fcNan, Depth, TLI, AC,
535                                            CtxI, DT, UseInstrInfo);
536   return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
537 }
538 
539 /// Return true if the floating-point scalar value is not a NaN or if the
540 /// floating-point vector value has no NaN elements. Return false if a value
541 /// could ever be NaN.
542 inline bool isKnownNeverNaN(const Value *V, const DataLayout &DL,
543                             const TargetLibraryInfo *TLI, unsigned Depth = 0,
544                             AssumptionCache *AC = nullptr,
545                             const Instruction *CtxI = nullptr,
546                             const DominatorTree *DT = nullptr,
547                             bool UseInstrInfo = true) {
548   KnownFPClass Known = computeKnownFPClass(V, DL, fcNan, Depth, TLI, AC, CtxI,
549                                            DT, UseInstrInfo);
550   return Known.isKnownNeverNaN();
551 }
552 
553 /// Return true if we can prove that the specified FP value's sign bit is 0.
554 ///
555 ///      NaN --> true/false (depending on the NaN's sign bit)
556 ///       +0 --> true
557 ///       -0 --> false
558 ///   x > +0 --> true
559 ///   x < -0 --> false
560 bool SignBitMustBeZero(const Value *V, const DataLayout &DL,
561                        const TargetLibraryInfo *TLI);
562 
563 /// If the specified value can be set by repeating the same byte in memory,
564 /// return the i8 value that it is represented with. This is true for all i8
565 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
566 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
567 /// i16 0x1234), return null. If the value is entirely undef and padding,
568 /// return undef.
569 Value *isBytewiseValue(Value *V, const DataLayout &DL);
570 
571 /// Given an aggregate and an sequence of indices, see if the scalar value
572 /// indexed is already around as a register, for example if it were inserted
573 /// directly into the aggregate.
574 ///
575 /// If InsertBefore is not null, this function will duplicate (modified)
576 /// insertvalues when a part of a nested struct is extracted.
577 Value *FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
578                          Instruction *InsertBefore = nullptr);
579 
580 /// Analyze the specified pointer to see if it can be expressed as a base
581 /// pointer plus a constant offset. Return the base and offset to the caller.
582 ///
583 /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
584 /// creates and later unpacks the required APInt.
585 inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
586                                                const DataLayout &DL,
587                                                bool AllowNonInbounds = true) {
588   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
589   Value *Base =
590       Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
591 
592   Offset = OffsetAPInt.getSExtValue();
593   return Base;
594 }
595 inline const Value *
596 GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
597                                  const DataLayout &DL,
598                                  bool AllowNonInbounds = true) {
599   return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
600                                           AllowNonInbounds);
601 }
602 
603 /// Returns true if the GEP is based on a pointer to a string (array of
604 // \p CharSize integers) and is indexing into this string.
605 bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
606 
607 /// Represents offset+length into a ConstantDataArray.
608 struct ConstantDataArraySlice {
609   /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
610   /// initializer, it just doesn't fit the ConstantDataArray interface).
611   const ConstantDataArray *Array;
612 
613   /// Slice starts at this Offset.
614   uint64_t Offset;
615 
616   /// Length of the slice.
617   uint64_t Length;
618 
619   /// Moves the Offset and adjusts Length accordingly.
620   void move(uint64_t Delta) {
621     assert(Delta < Length);
622     Offset += Delta;
623     Length -= Delta;
624   }
625 
626   /// Convenience accessor for elements in the slice.
627   uint64_t operator[](unsigned I) const {
628     return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
629   }
630 };
631 
632 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
633 /// If successful \p Slice will point to a ConstantDataArray info object
634 /// with an appropriate offset.
635 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
636                               unsigned ElementSize, uint64_t Offset = 0);
637 
638 /// This function computes the length of a null-terminated C string pointed to
639 /// by V. If successful, it returns true and returns the string in Str. If
640 /// unsuccessful, it returns false. This does not include the trailing null
641 /// character by default. If TrimAtNul is set to false, then this returns any
642 /// trailing null characters as well as any other characters that come after
643 /// it.
644 bool getConstantStringInfo(const Value *V, StringRef &Str,
645                            bool TrimAtNul = true);
646 
647 /// If we can compute the length of the string pointed to by the specified
648 /// pointer, return 'len+1'.  If we can't, return 0.
649 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
650 
651 /// This function returns call pointer argument that is considered the same by
652 /// aliasing rules. You CAN'T use it to replace one value with another. If
653 /// \p MustPreserveNullness is true, the call must preserve the nullness of
654 /// the pointer.
655 const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
656                                                   bool MustPreserveNullness);
657 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
658                                                    bool MustPreserveNullness) {
659   return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
660       const_cast<const CallBase *>(Call), MustPreserveNullness));
661 }
662 
663 /// {launder,strip}.invariant.group returns pointer that aliases its argument,
664 /// and it only captures pointer by returning it.
665 /// These intrinsics are not marked as nocapture, because returning is
666 /// considered as capture. The arguments are not marked as returned neither,
667 /// because it would make it useless. If \p MustPreserveNullness is true,
668 /// the intrinsic must preserve the nullness of the pointer.
669 bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
670     const CallBase *Call, bool MustPreserveNullness);
671 
672 /// This method strips off any GEP address adjustments and pointer casts from
673 /// the specified value, returning the original object being addressed. Note
674 /// that the returned value has pointer type if the specified value does. If
675 /// the MaxLookup value is non-zero, it limits the number of instructions to
676 /// be stripped off.
677 const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
678 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
679   // Force const to avoid infinite recursion.
680   const Value *VConst = V;
681   return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
682 }
683 
684 /// This method is similar to getUnderlyingObject except that it can
685 /// look through phi and select instructions and return multiple objects.
686 ///
687 /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
688 /// accesses different objects in each iteration, we don't look through the
689 /// phi node. E.g. consider this loop nest:
690 ///
691 ///   int **A;
692 ///   for (i)
693 ///     for (j) {
694 ///        A[i][j] = A[i-1][j] * B[j]
695 ///     }
696 ///
697 /// This is transformed by Load-PRE to stash away A[i] for the next iteration
698 /// of the outer loop:
699 ///
700 ///   Curr = A[0];          // Prev_0
701 ///   for (i: 1..N) {
702 ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
703 ///     Curr = A[i];
704 ///     for (j: 0..N) {
705 ///        Curr[j] = Prev[j] * B[j]
706 ///     }
707 ///   }
708 ///
709 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
710 /// should not assume that Curr and Prev share the same underlying object thus
711 /// it shouldn't look through the phi above.
712 void getUnderlyingObjects(const Value *V,
713                           SmallVectorImpl<const Value *> &Objects,
714                           LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
715 
716 /// This is a wrapper around getUnderlyingObjects and adds support for basic
717 /// ptrtoint+arithmetic+inttoptr sequences.
718 bool getUnderlyingObjectsForCodeGen(const Value *V,
719                                     SmallVectorImpl<Value *> &Objects);
720 
721 /// Returns unique alloca where the value comes from, or nullptr.
722 /// If OffsetZero is true check that V points to the begining of the alloca.
723 AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
724 inline const AllocaInst *findAllocaForValue(const Value *V,
725                                             bool OffsetZero = false) {
726   return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
727 }
728 
729 /// Return true if the only users of this pointer are lifetime markers.
730 bool onlyUsedByLifetimeMarkers(const Value *V);
731 
732 /// Return true if the only users of this pointer are lifetime markers or
733 /// droppable instructions.
734 bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
735 
736 /// Return true if speculation of the given load must be suppressed to avoid
737 /// ordering or interfering with an active sanitizer.  If not suppressed,
738 /// dereferenceability and alignment must be proven separately.  Note: This
739 /// is only needed for raw reasoning; if you use the interface below
740 /// (isSafeToSpeculativelyExecute), this is handled internally.
741 bool mustSuppressSpeculation(const LoadInst &LI);
742 
743 /// Return true if the instruction does not have any effects besides
744 /// calculating the result and does not have undefined behavior.
745 ///
746 /// This method never returns true for an instruction that returns true for
747 /// mayHaveSideEffects; however, this method also does some other checks in
748 /// addition. It checks for undefined behavior, like dividing by zero or
749 /// loading from an invalid pointer (but not for undefined results, like a
750 /// shift with a shift amount larger than the width of the result). It checks
751 /// for malloc and alloca because speculatively executing them might cause a
752 /// memory leak. It also returns false for instructions related to control
753 /// flow, specifically terminators and PHI nodes.
754 ///
755 /// If the CtxI is specified this method performs context-sensitive analysis
756 /// and returns true if it is safe to execute the instruction immediately
757 /// before the CtxI.
758 ///
759 /// If the CtxI is NOT specified this method only looks at the instruction
760 /// itself and its operands, so if this method returns true, it is safe to
761 /// move the instruction as long as the correct dominance relationships for
762 /// the operands and users hold.
763 ///
764 /// This method can return true for instructions that read memory;
765 /// for such instructions, moving them may change the resulting value.
766 bool isSafeToSpeculativelyExecute(const Instruction *I,
767                                   const Instruction *CtxI = nullptr,
768                                   AssumptionCache *AC = nullptr,
769                                   const DominatorTree *DT = nullptr,
770                                   const TargetLibraryInfo *TLI = nullptr);
771 
772 /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
773 /// the actual opcode of Inst. If the provided and actual opcode differ, the
774 /// function (virtually) overrides the opcode of Inst with the provided
775 /// Opcode. There are come constraints in this case:
776 /// * If Opcode has a fixed number of operands (eg, as binary operators do),
777 ///   then Inst has to have at least as many leading operands. The function
778 ///   will ignore all trailing operands beyond that number.
779 /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
780 ///   do), then all operands are considered.
781 /// * The virtual instruction has to satisfy all typing rules of the provided
782 ///   Opcode.
783 /// * This function is pessimistic in the following sense: If one actually
784 ///   materialized the virtual instruction, then isSafeToSpeculativelyExecute
785 ///   may say that the materialized instruction is speculatable whereas this
786 ///   function may have said that the instruction wouldn't be speculatable.
787 ///   This behavior is a shortcoming in the current implementation and not
788 ///   intentional.
789 bool isSafeToSpeculativelyExecuteWithOpcode(
790     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
791     AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
792     const TargetLibraryInfo *TLI = nullptr);
793 
794 /// Returns true if the result or effects of the given instructions \p I
795 /// depend values not reachable through the def use graph.
796 /// * Memory dependence arises for example if the instruction reads from
797 ///   memory or may produce effects or undefined behaviour. Memory dependent
798 ///   instructions generally cannot be reorderd with respect to other memory
799 ///   dependent instructions.
800 /// * Control dependence arises for example if the instruction may fault
801 ///   if lifted above a throwing call or infinite loop.
802 bool mayHaveNonDefUseDependency(const Instruction &I);
803 
804 /// Return true if it is an intrinsic that cannot be speculated but also
805 /// cannot trap.
806 bool isAssumeLikeIntrinsic(const Instruction *I);
807 
808 /// Return true if it is valid to use the assumptions provided by an
809 /// assume intrinsic, I, at the point in the control-flow identified by the
810 /// context instruction, CxtI.
811 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
812                              const DominatorTree *DT = nullptr);
813 
814 enum class OverflowResult {
815   /// Always overflows in the direction of signed/unsigned min value.
816   AlwaysOverflowsLow,
817   /// Always overflows in the direction of signed/unsigned max value.
818   AlwaysOverflowsHigh,
819   /// May or may not overflow.
820   MayOverflow,
821   /// Never overflows.
822   NeverOverflows,
823 };
824 
825 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
826                                              const DataLayout &DL,
827                                              AssumptionCache *AC,
828                                              const Instruction *CxtI,
829                                              const DominatorTree *DT,
830                                              bool UseInstrInfo = true);
831 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
832                                            const DataLayout &DL,
833                                            AssumptionCache *AC,
834                                            const Instruction *CxtI,
835                                            const DominatorTree *DT,
836                                            bool UseInstrInfo = true);
837 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS,
838                                              const DataLayout &DL,
839                                              AssumptionCache *AC,
840                                              const Instruction *CxtI,
841                                              const DominatorTree *DT,
842                                              bool UseInstrInfo = true);
843 OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
844                                            const DataLayout &DL,
845                                            AssumptionCache *AC = nullptr,
846                                            const Instruction *CxtI = nullptr,
847                                            const DominatorTree *DT = nullptr);
848 /// This version also leverages the sign bit of Add if known.
849 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
850                                            const DataLayout &DL,
851                                            AssumptionCache *AC = nullptr,
852                                            const Instruction *CxtI = nullptr,
853                                            const DominatorTree *DT = nullptr);
854 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
855                                              const DataLayout &DL,
856                                              AssumptionCache *AC,
857                                              const Instruction *CxtI,
858                                              const DominatorTree *DT);
859 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
860                                            const DataLayout &DL,
861                                            AssumptionCache *AC,
862                                            const Instruction *CxtI,
863                                            const DominatorTree *DT);
864 
865 /// Returns true if the arithmetic part of the \p WO 's result is
866 /// used only along the paths control dependent on the computation
867 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
868 bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
869                                const DominatorTree &DT);
870 
871 /// Determine the possible constant range of vscale with the given bit width,
872 /// based on the vscale_range function attribute.
873 ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
874 
875 /// Determine the possible constant range of an integer or vector of integer
876 /// value. This is intended as a cheap, non-recursive check.
877 ConstantRange computeConstantRange(const Value *V, bool ForSigned,
878                                    bool UseInstrInfo = true,
879                                    AssumptionCache *AC = nullptr,
880                                    const Instruction *CtxI = nullptr,
881                                    const DominatorTree *DT = nullptr,
882                                    unsigned Depth = 0);
883 
884 /// Return true if this function can prove that the instruction I will
885 /// always transfer execution to one of its successors (including the next
886 /// instruction that follows within a basic block). E.g. this is not
887 /// guaranteed for function calls that could loop infinitely.
888 ///
889 /// In other words, this function returns false for instructions that may
890 /// transfer execution or fail to transfer execution in a way that is not
891 /// captured in the CFG nor in the sequence of instructions within a basic
892 /// block.
893 ///
894 /// Undefined behavior is assumed not to happen, so e.g. division is
895 /// guaranteed to transfer execution to the following instruction even
896 /// though division by zero might cause undefined behavior.
897 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
898 
899 /// Returns true if this block does not contain a potential implicit exit.
900 /// This is equivelent to saying that all instructions within the basic block
901 /// are guaranteed to transfer execution to their successor within the basic
902 /// block. This has the same assumptions w.r.t. undefined behavior as the
903 /// instruction variant of this function.
904 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
905 
906 /// Return true if every instruction in the range (Begin, End) is
907 /// guaranteed to transfer execution to its static successor. \p ScanLimit
908 /// bounds the search to avoid scanning huge blocks.
909 bool isGuaranteedToTransferExecutionToSuccessor(
910     BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
911     unsigned ScanLimit = 32);
912 
913 /// Same as previous, but with range expressed via iterator_range.
914 bool isGuaranteedToTransferExecutionToSuccessor(
915     iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
916 
917 /// Return true if this function can prove that the instruction I
918 /// is executed for every iteration of the loop L.
919 ///
920 /// Note that this currently only considers the loop header.
921 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
922                                             const Loop *L);
923 
924 /// Return true if \p PoisonOp's user yields poison or raises UB if its
925 /// operand \p PoisonOp is poison.
926 ///
927 /// If \p PoisonOp is a vector or an aggregate and the operation's result is a
928 /// single value, any poison element in /p PoisonOp should make the result
929 /// poison or raise UB.
930 ///
931 /// To filter out operands that raise UB on poison, you can use
932 /// getGuaranteedNonPoisonOp.
933 bool propagatesPoison(const Use &PoisonOp);
934 
935 /// Insert operands of I into Ops such that I will trigger undefined behavior
936 /// if I is executed and that operand has a poison value.
937 void getGuaranteedNonPoisonOps(const Instruction *I,
938                                SmallVectorImpl<const Value *> &Ops);
939 
940 /// Insert operands of I into Ops such that I will trigger undefined behavior
941 /// if I is executed and that operand is not a well-defined value
942 /// (i.e. has undef bits or poison).
943 void getGuaranteedWellDefinedOps(const Instruction *I,
944                                  SmallVectorImpl<const Value *> &Ops);
945 
946 /// Return true if the given instruction must trigger undefined behavior
947 /// when I is executed with any operands which appear in KnownPoison holding
948 /// a poison value at the point of execution.
949 bool mustTriggerUB(const Instruction *I,
950                    const SmallPtrSetImpl<const Value *> &KnownPoison);
951 
952 /// Return true if this function can prove that if Inst is executed
953 /// and yields a poison value or undef bits, then that will trigger
954 /// undefined behavior.
955 ///
956 /// Note that this currently only considers the basic block that is
957 /// the parent of Inst.
958 bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
959 bool programUndefinedIfPoison(const Instruction *Inst);
960 
961 /// canCreateUndefOrPoison returns true if Op can create undef or poison from
962 /// non-undef & non-poison operands.
963 /// For vectors, canCreateUndefOrPoison returns true if there is potential
964 /// poison or undef in any element of the result when vectors without
965 /// undef/poison poison are given as operands.
966 /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
967 /// true. If Op raises immediate UB but never creates poison or undef
968 /// (e.g. sdiv I, 0), canCreatePoison returns false.
969 ///
970 /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
971 /// metadata on the instruction are considered.  This can be used to see if the
972 /// instruction could still introduce undef or poison even without poison
973 /// generating flags and metadata which might be on the instruction.
974 /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
975 /// poison or undef)
976 ///
977 /// canCreatePoison returns true if Op can create poison from non-poison
978 /// operands.
979 bool canCreateUndefOrPoison(const Operator *Op,
980                             bool ConsiderFlagsAndMetadata = true);
981 bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
982 
983 /// Return true if V is poison given that ValAssumedPoison is already poison.
984 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
985 /// impliesPoison returns true.
986 bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
987 
988 /// Return true if this function can prove that V does not have undef bits
989 /// and is never poison. If V is an aggregate value or vector, check whether
990 /// all elements (except padding) are not undef or poison.
991 /// Note that this is different from canCreateUndefOrPoison because the
992 /// function assumes Op's operands are not poison/undef.
993 ///
994 /// If CtxI and DT are specified this method performs flow-sensitive analysis
995 /// and returns true if it is guaranteed to be never undef or poison
996 /// immediately before the CtxI.
997 bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
998                                       AssumptionCache *AC = nullptr,
999                                       const Instruction *CtxI = nullptr,
1000                                       const DominatorTree *DT = nullptr,
1001                                       unsigned Depth = 0);
1002 bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
1003                                const Instruction *CtxI = nullptr,
1004                                const DominatorTree *DT = nullptr,
1005                                unsigned Depth = 0);
1006 
1007 /// Return true if undefined behavior would provable be executed on the path to
1008 /// OnPathTo if Root produced a posion result.  Note that this doesn't say
1009 /// anything about whether OnPathTo is actually executed or whether Root is
1010 /// actually poison.  This can be used to assess whether a new use of Root can
1011 /// be added at a location which is control equivalent with OnPathTo (such as
1012 /// immediately before it) without introducing UB which didn't previously
1013 /// exist.  Note that a false result conveys no information.
1014 bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
1015                                    Instruction *OnPathTo,
1016                                    DominatorTree *DT);
1017 
1018 /// Specific patterns of select instructions we can match.
1019 enum SelectPatternFlavor {
1020   SPF_UNKNOWN = 0,
1021   SPF_SMIN,    /// Signed minimum
1022   SPF_UMIN,    /// Unsigned minimum
1023   SPF_SMAX,    /// Signed maximum
1024   SPF_UMAX,    /// Unsigned maximum
1025   SPF_FMINNUM, /// Floating point minnum
1026   SPF_FMAXNUM, /// Floating point maxnum
1027   SPF_ABS,     /// Absolute value
1028   SPF_NABS     /// Negated absolute value
1029 };
1030 
1031 /// Behavior when a floating point min/max is given one NaN and one
1032 /// non-NaN as input.
1033 enum SelectPatternNaNBehavior {
1034   SPNB_NA = 0,        /// NaN behavior not applicable.
1035   SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN.
1036   SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1037   SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or
1038                       /// it has been determined that no operands can
1039                       /// be NaN).
1040 };
1041 
1042 struct SelectPatternResult {
1043   SelectPatternFlavor Flavor;
1044   SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
1045                                         /// SPF_FMINNUM or SPF_FMAXNUM.
1046   bool Ordered; /// When implementing this min/max pattern as
1047                 /// fcmp; select, does the fcmp have to be
1048                 /// ordered?
1049 
1050   /// Return true if \p SPF is a min or a max pattern.
1051   static bool isMinOrMax(SelectPatternFlavor SPF) {
1052     return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
1053   }
1054 };
1055 
1056 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
1057 /// and providing the out parameter results if we successfully match.
1058 ///
1059 /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
1060 /// the negation instruction from the idiom.
1061 ///
1062 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
1063 /// not match that of the original select. If this is the case, the cast
1064 /// operation (one of Trunc,SExt,Zext) that must be done to transform the
1065 /// type of LHS and RHS into the type of V is returned in CastOp.
1066 ///
1067 /// For example:
1068 ///   %1 = icmp slt i32 %a, i32 4
1069 ///   %2 = sext i32 %a to i64
1070 ///   %3 = select i1 %1, i64 %2, i64 4
1071 ///
1072 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
1073 ///
1074 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
1075                                        Instruction::CastOps *CastOp = nullptr,
1076                                        unsigned Depth = 0);
1077 
1078 inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
1079                                               const Value *&RHS) {
1080   Value *L = const_cast<Value *>(LHS);
1081   Value *R = const_cast<Value *>(RHS);
1082   auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
1083   LHS = L;
1084   RHS = R;
1085   return Result;
1086 }
1087 
1088 /// Determine the pattern that a select with the given compare as its
1089 /// predicate and given values as its true/false operands would match.
1090 SelectPatternResult matchDecomposedSelectPattern(
1091     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
1092     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
1093 
1094 /// Return the canonical comparison predicate for the specified
1095 /// minimum/maximum flavor.
1096 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
1097 
1098 /// Return the inverse minimum/maximum flavor of the specified flavor.
1099 /// For example, signed minimum is the inverse of signed maximum.
1100 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
1101 
1102 Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
1103 
1104 /// Return the minimum or maximum constant value for the specified integer
1105 /// min/max flavor and type.
1106 APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
1107 
1108 /// Check if the values in \p VL are select instructions that can be converted
1109 /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
1110 /// conversion is possible, together with a bool indicating whether all select
1111 /// conditions are only used by the selects. Otherwise return
1112 /// Intrinsic::not_intrinsic.
1113 std::pair<Intrinsic::ID, bool>
1114 canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
1115 
1116 /// Attempt to match a simple first order recurrence cycle of the form:
1117 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1118 ///   %inc = binop %iv, %step
1119 /// OR
1120 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1121 ///   %inc = binop %step, %iv
1122 ///
1123 /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
1124 ///
1125 /// A couple of notes on subtleties in that definition:
1126 /// * The Step does not have to be loop invariant.  In math terms, it can
1127 ///   be a free variable.  We allow recurrences with both constant and
1128 ///   variable coefficients. Callers may wish to filter cases where Step
1129 ///   does not dominate P.
1130 /// * For non-commutative operators, we will match both forms.  This
1131 ///   results in some odd recurrence structures.  Callers may wish to filter
1132 ///   out recurrences where the phi is not the LHS of the returned operator.
1133 /// * Because of the structure matched, the caller can assume as a post
1134 ///   condition of the match the presence of a Loop with P's parent as it's
1135 ///   header *except* in unreachable code.  (Dominance decays in unreachable
1136 ///   code.)
1137 ///
1138 /// NOTE: This is intentional simple.  If you want the ability to analyze
1139 /// non-trivial loop conditons, see ScalarEvolution instead.
1140 bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
1141                            Value *&Step);
1142 
1143 /// Analogous to the above, but starting from the binary operator
1144 bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
1145                            Value *&Step);
1146 
1147 /// Return true if RHS is known to be implied true by LHS.  Return false if
1148 /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
1149 /// no implication can be made. A & B must be i1 (boolean) values or a vector of
1150 /// such values. Note that the truth table for implication is the same as <=u on
1151 /// i1 values (but not
1152 /// <=s!).  The truth table for both is:
1153 ///    | T | F (B)
1154 ///  T | T | F
1155 ///  F | T | T
1156 /// (A)
1157 std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
1158                                        const DataLayout &DL,
1159                                        bool LHSIsTrue = true,
1160                                        unsigned Depth = 0);
1161 std::optional<bool> isImpliedCondition(const Value *LHS,
1162                                        CmpInst::Predicate RHSPred,
1163                                        const Value *RHSOp0, const Value *RHSOp1,
1164                                        const DataLayout &DL,
1165                                        bool LHSIsTrue = true,
1166                                        unsigned Depth = 0);
1167 
1168 /// Return the boolean condition value in the context of the given instruction
1169 /// if it is known based on dominating conditions.
1170 std::optional<bool> isImpliedByDomCondition(const Value *Cond,
1171                                             const Instruction *ContextI,
1172                                             const DataLayout &DL);
1173 std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
1174                                             const Value *LHS, const Value *RHS,
1175                                             const Instruction *ContextI,
1176                                             const DataLayout &DL);
1177 } // end namespace llvm
1178 
1179 #endif // LLVM_ANALYSIS_VALUETRACKING_H
1180