1 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_ANALYSIS_VALUETRACKING_H 15 #define LLVM_ANALYSIS_VALUETRACKING_H 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/IR/Constants.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/InstrTypes.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include <cassert> 25 #include <cstdint> 26 27 namespace llvm { 28 29 class AddOperator; 30 class APInt; 31 class AssumptionCache; 32 class DominatorTree; 33 class GEPOperator; 34 class IntrinsicInst; 35 class LoadInst; 36 class WithOverflowInst; 37 struct KnownBits; 38 class Loop; 39 class LoopInfo; 40 class MDNode; 41 class OptimizationRemarkEmitter; 42 class StringRef; 43 class TargetLibraryInfo; 44 class Value; 45 46 /// Determine which bits of V are known to be either zero or one and return 47 /// them in the KnownZero/KnownOne bit sets. 48 /// 49 /// This function is defined on values with integer type, values with pointer 50 /// type, and vectors of integers. In the case 51 /// where V is a vector, the known zero and known one values are the 52 /// same width as the vector element, and the bit is set only if it is true 53 /// for all of the elements in the vector. 54 void computeKnownBits(const Value *V, KnownBits &Known, 55 const DataLayout &DL, unsigned Depth = 0, 56 AssumptionCache *AC = nullptr, 57 const Instruction *CxtI = nullptr, 58 const DominatorTree *DT = nullptr, 59 OptimizationRemarkEmitter *ORE = nullptr, 60 bool UseInstrInfo = true); 61 62 /// Determine which bits of V are known to be either zero or one and return 63 /// them in the KnownZero/KnownOne bit sets. 64 /// 65 /// This function is defined on values with integer type, values with pointer 66 /// type, and vectors of integers. In the case 67 /// where V is a vector, the known zero and known one values are the 68 /// same width as the vector element, and the bit is set only if it is true 69 /// for all of the demanded elements in the vector. 70 void computeKnownBits(const Value *V, const APInt &DemandedElts, 71 KnownBits &Known, const DataLayout &DL, 72 unsigned Depth = 0, AssumptionCache *AC = nullptr, 73 const Instruction *CxtI = nullptr, 74 const DominatorTree *DT = nullptr, 75 OptimizationRemarkEmitter *ORE = nullptr, 76 bool UseInstrInfo = true); 77 78 /// Returns the known bits rather than passing by reference. 79 KnownBits computeKnownBits(const Value *V, const DataLayout &DL, 80 unsigned Depth = 0, AssumptionCache *AC = nullptr, 81 const Instruction *CxtI = nullptr, 82 const DominatorTree *DT = nullptr, 83 OptimizationRemarkEmitter *ORE = nullptr, 84 bool UseInstrInfo = true); 85 86 /// Returns the known bits rather than passing by reference. 87 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 88 const DataLayout &DL, unsigned Depth = 0, 89 AssumptionCache *AC = nullptr, 90 const Instruction *CxtI = nullptr, 91 const DominatorTree *DT = nullptr, 92 OptimizationRemarkEmitter *ORE = nullptr, 93 bool UseInstrInfo = true); 94 95 /// Compute known bits from the range metadata. 96 /// \p KnownZero the set of bits that are known to be zero 97 /// \p KnownOne the set of bits that are known to be one 98 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 99 KnownBits &Known); 100 101 /// Return true if LHS and RHS have no common bits set. 102 bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 103 const DataLayout &DL, 104 AssumptionCache *AC = nullptr, 105 const Instruction *CxtI = nullptr, 106 const DominatorTree *DT = nullptr, 107 bool UseInstrInfo = true); 108 109 /// Return true if the given value is known to have exactly one bit set when 110 /// defined. For vectors return true if every element is known to be a power 111 /// of two when defined. Supports values with integer or pointer type and 112 /// vectors of integers. If 'OrZero' is set, then return true if the given 113 /// value is either a power of two or zero. 114 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 115 bool OrZero = false, unsigned Depth = 0, 116 AssumptionCache *AC = nullptr, 117 const Instruction *CxtI = nullptr, 118 const DominatorTree *DT = nullptr, 119 bool UseInstrInfo = true); 120 121 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); 122 123 /// Return true if the given value is known to be non-zero when defined. For 124 /// vectors, return true if every element is known to be non-zero when 125 /// defined. For pointers, if the context instruction and dominator tree are 126 /// specified, perform context-sensitive analysis and return true if the 127 /// pointer couldn't possibly be null at the specified instruction. 128 /// Supports values with integer or pointer type and vectors of integers. 129 bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0, 130 AssumptionCache *AC = nullptr, 131 const Instruction *CxtI = nullptr, 132 const DominatorTree *DT = nullptr, 133 bool UseInstrInfo = true); 134 135 /// Return true if the two given values are negation. 136 /// Currently can recoginze Value pair: 137 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X) 138 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A) 139 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false); 140 141 /// Returns true if the give value is known to be non-negative. 142 bool isKnownNonNegative(const Value *V, const DataLayout &DL, 143 unsigned Depth = 0, 144 AssumptionCache *AC = nullptr, 145 const Instruction *CxtI = nullptr, 146 const DominatorTree *DT = nullptr, 147 bool UseInstrInfo = true); 148 149 /// Returns true if the given value is known be positive (i.e. non-negative 150 /// and non-zero). 151 bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0, 152 AssumptionCache *AC = nullptr, 153 const Instruction *CxtI = nullptr, 154 const DominatorTree *DT = nullptr, 155 bool UseInstrInfo = true); 156 157 /// Returns true if the given value is known be negative (i.e. non-positive 158 /// and non-zero). 159 bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0, 160 AssumptionCache *AC = nullptr, 161 const Instruction *CxtI = nullptr, 162 const DominatorTree *DT = nullptr, 163 bool UseInstrInfo = true); 164 165 /// Return true if the given values are known to be non-equal when defined. 166 /// Supports scalar integer types only. 167 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, 168 AssumptionCache *AC = nullptr, 169 const Instruction *CxtI = nullptr, 170 const DominatorTree *DT = nullptr, 171 bool UseInstrInfo = true); 172 173 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 174 /// simplify operations downstream. Mask is known to be zero for bits that V 175 /// cannot have. 176 /// 177 /// This function is defined on values with integer type, values with pointer 178 /// type, and vectors of integers. In the case 179 /// where V is a vector, the mask, known zero, and known one values are the 180 /// same width as the vector element, and the bit is set only if it is true 181 /// for all of the elements in the vector. 182 bool MaskedValueIsZero(const Value *V, const APInt &Mask, 183 const DataLayout &DL, 184 unsigned Depth = 0, AssumptionCache *AC = nullptr, 185 const Instruction *CxtI = nullptr, 186 const DominatorTree *DT = nullptr, 187 bool UseInstrInfo = true); 188 189 /// Return the number of times the sign bit of the register is replicated into 190 /// the other bits. We know that at least 1 bit is always equal to the sign 191 /// bit (itself), but other cases can give us information. For example, 192 /// immediately after an "ashr X, 2", we know that the top 3 bits are all 193 /// equal to each other, so we return 3. For vectors, return the number of 194 /// sign bits for the vector element with the mininum number of known sign 195 /// bits. 196 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, 197 unsigned Depth = 0, AssumptionCache *AC = nullptr, 198 const Instruction *CxtI = nullptr, 199 const DominatorTree *DT = nullptr, 200 bool UseInstrInfo = true); 201 202 /// This function computes the integer multiple of Base that equals V. If 203 /// successful, it returns true and returns the multiple in Multiple. If 204 /// unsuccessful, it returns false. Also, if V can be simplified to an 205 /// integer, then the simplified V is returned in Val. Look through sext only 206 /// if LookThroughSExt=true. 207 bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 208 bool LookThroughSExt = false, 209 unsigned Depth = 0); 210 211 /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent 212 /// intrinsics are treated as-if they were intrinsics. 213 Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, 214 const TargetLibraryInfo *TLI); 215 216 /// Return true if we can prove that the specified FP value is never equal to 217 /// -0.0. 218 bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 219 unsigned Depth = 0); 220 221 /// Return true if we can prove that the specified FP value is either NaN or 222 /// never less than -0.0. 223 /// 224 /// NaN --> true 225 /// +0 --> true 226 /// -0 --> true 227 /// x > +0 --> true 228 /// x < -0 --> false 229 bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI); 230 231 /// Return true if the floating-point scalar value is not an infinity or if 232 /// the floating-point vector value has no infinities. Return false if a value 233 /// could ever be infinity. 234 bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, 235 unsigned Depth = 0); 236 237 /// Return true if the floating-point scalar value is not a NaN or if the 238 /// floating-point vector value has no NaN elements. Return false if a value 239 /// could ever be NaN. 240 bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 241 unsigned Depth = 0); 242 243 /// Return true if we can prove that the specified FP value's sign bit is 0. 244 /// 245 /// NaN --> true/false (depending on the NaN's sign bit) 246 /// +0 --> true 247 /// -0 --> false 248 /// x > +0 --> true 249 /// x < -0 --> false 250 bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI); 251 252 /// If the specified value can be set by repeating the same byte in memory, 253 /// return the i8 value that it is represented with. This is true for all i8 254 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double 255 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g. 256 /// i16 0x1234), return null. If the value is entirely undef and padding, 257 /// return undef. 258 Value *isBytewiseValue(Value *V, const DataLayout &DL); 259 260 /// Given an aggregate and an sequence of indices, see if the scalar value 261 /// indexed is already around as a register, for example if it were inserted 262 /// directly into the aggregate. 263 /// 264 /// If InsertBefore is not null, this function will duplicate (modified) 265 /// insertvalues when a part of a nested struct is extracted. 266 Value *FindInsertedValue(Value *V, 267 ArrayRef<unsigned> idx_range, 268 Instruction *InsertBefore = nullptr); 269 270 /// Analyze the specified pointer to see if it can be expressed as a base 271 /// pointer plus a constant offset. Return the base and offset to the caller. 272 /// 273 /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that 274 /// creates and later unpacks the required APInt. 275 inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 276 const DataLayout &DL, 277 bool AllowNonInbounds = true) { 278 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 279 Value *Base = 280 Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds); 281 282 Offset = OffsetAPInt.getSExtValue(); 283 return Base; 284 } 285 inline const Value * 286 GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, 287 const DataLayout &DL, 288 bool AllowNonInbounds = true) { 289 return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL, 290 AllowNonInbounds); 291 } 292 293 /// Returns true if the GEP is based on a pointer to a string (array of 294 // \p CharSize integers) and is indexing into this string. 295 bool isGEPBasedOnPointerToString(const GEPOperator *GEP, 296 unsigned CharSize = 8); 297 298 /// Represents offset+length into a ConstantDataArray. 299 struct ConstantDataArraySlice { 300 /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid 301 /// initializer, it just doesn't fit the ConstantDataArray interface). 302 const ConstantDataArray *Array; 303 304 /// Slice starts at this Offset. 305 uint64_t Offset; 306 307 /// Length of the slice. 308 uint64_t Length; 309 310 /// Moves the Offset and adjusts Length accordingly. moveConstantDataArraySlice311 void move(uint64_t Delta) { 312 assert(Delta < Length); 313 Offset += Delta; 314 Length -= Delta; 315 } 316 317 /// Convenience accessor for elements in the slice. 318 uint64_t operator[](unsigned I) const { 319 return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset); 320 } 321 }; 322 323 /// Returns true if the value \p V is a pointer into a ConstantDataArray. 324 /// If successful \p Slice will point to a ConstantDataArray info object 325 /// with an appropriate offset. 326 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, 327 unsigned ElementSize, uint64_t Offset = 0); 328 329 /// This function computes the length of a null-terminated C string pointed to 330 /// by V. If successful, it returns true and returns the string in Str. If 331 /// unsuccessful, it returns false. This does not include the trailing null 332 /// character by default. If TrimAtNul is set to false, then this returns any 333 /// trailing null characters as well as any other characters that come after 334 /// it. 335 bool getConstantStringInfo(const Value *V, StringRef &Str, 336 uint64_t Offset = 0, bool TrimAtNul = true); 337 338 /// If we can compute the length of the string pointed to by the specified 339 /// pointer, return 'len+1'. If we can't, return 0. 340 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8); 341 342 /// This function returns call pointer argument that is considered the same by 343 /// aliasing rules. You CAN'T use it to replace one value with another. If 344 /// \p MustPreserveNullness is true, the call must preserve the nullness of 345 /// the pointer. 346 const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call, 347 bool MustPreserveNullness); 348 inline Value * getArgumentAliasingToReturnedPointer(CallBase * Call,bool MustPreserveNullness)349 getArgumentAliasingToReturnedPointer(CallBase *Call, 350 bool MustPreserveNullness) { 351 return const_cast<Value *>(getArgumentAliasingToReturnedPointer( 352 const_cast<const CallBase *>(Call), MustPreserveNullness)); 353 } 354 355 /// {launder,strip}.invariant.group returns pointer that aliases its argument, 356 /// and it only captures pointer by returning it. 357 /// These intrinsics are not marked as nocapture, because returning is 358 /// considered as capture. The arguments are not marked as returned neither, 359 /// because it would make it useless. If \p MustPreserveNullness is true, 360 /// the intrinsic must preserve the nullness of the pointer. 361 bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 362 const CallBase *Call, bool MustPreserveNullness); 363 364 /// This method strips off any GEP address adjustments and pointer casts from 365 /// the specified value, returning the original object being addressed. Note 366 /// that the returned value has pointer type if the specified value does. If 367 /// the MaxLookup value is non-zero, it limits the number of instructions to 368 /// be stripped off. 369 Value *GetUnderlyingObject(Value *V, const DataLayout &DL, 370 unsigned MaxLookup = 6); 371 inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL, 372 unsigned MaxLookup = 6) { 373 return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup); 374 } 375 376 /// This method is similar to GetUnderlyingObject except that it can 377 /// look through phi and select instructions and return multiple objects. 378 /// 379 /// If LoopInfo is passed, loop phis are further analyzed. If a pointer 380 /// accesses different objects in each iteration, we don't look through the 381 /// phi node. E.g. consider this loop nest: 382 /// 383 /// int **A; 384 /// for (i) 385 /// for (j) { 386 /// A[i][j] = A[i-1][j] * B[j] 387 /// } 388 /// 389 /// This is transformed by Load-PRE to stash away A[i] for the next iteration 390 /// of the outer loop: 391 /// 392 /// Curr = A[0]; // Prev_0 393 /// for (i: 1..N) { 394 /// Prev = Curr; // Prev = PHI (Prev_0, Curr) 395 /// Curr = A[i]; 396 /// for (j: 0..N) { 397 /// Curr[j] = Prev[j] * B[j] 398 /// } 399 /// } 400 /// 401 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects 402 /// should not assume that Curr and Prev share the same underlying object thus 403 /// it shouldn't look through the phi above. 404 void GetUnderlyingObjects(const Value *V, 405 SmallVectorImpl<const Value *> &Objects, 406 const DataLayout &DL, LoopInfo *LI = nullptr, 407 unsigned MaxLookup = 6); 408 409 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 410 /// ptrtoint+arithmetic+inttoptr sequences. 411 bool getUnderlyingObjectsForCodeGen(const Value *V, 412 SmallVectorImpl<Value *> &Objects, 413 const DataLayout &DL); 414 415 /// Return true if the only users of this pointer are lifetime markers. 416 bool onlyUsedByLifetimeMarkers(const Value *V); 417 418 /// Return true if speculation of the given load must be suppressed to avoid 419 /// ordering or interfering with an active sanitizer. If not suppressed, 420 /// dereferenceability and alignment must be proven separately. Note: This 421 /// is only needed for raw reasoning; if you use the interface below 422 /// (isSafeToSpeculativelyExecute), this is handled internally. 423 bool mustSuppressSpeculation(const LoadInst &LI); 424 425 /// Return true if the instruction does not have any effects besides 426 /// calculating the result and does not have undefined behavior. 427 /// 428 /// This method never returns true for an instruction that returns true for 429 /// mayHaveSideEffects; however, this method also does some other checks in 430 /// addition. It checks for undefined behavior, like dividing by zero or 431 /// loading from an invalid pointer (but not for undefined results, like a 432 /// shift with a shift amount larger than the width of the result). It checks 433 /// for malloc and alloca because speculatively executing them might cause a 434 /// memory leak. It also returns false for instructions related to control 435 /// flow, specifically terminators and PHI nodes. 436 /// 437 /// If the CtxI is specified this method performs context-sensitive analysis 438 /// and returns true if it is safe to execute the instruction immediately 439 /// before the CtxI. 440 /// 441 /// If the CtxI is NOT specified this method only looks at the instruction 442 /// itself and its operands, so if this method returns true, it is safe to 443 /// move the instruction as long as the correct dominance relationships for 444 /// the operands and users hold. 445 /// 446 /// This method can return true for instructions that read memory; 447 /// for such instructions, moving them may change the resulting value. 448 bool isSafeToSpeculativelyExecute(const Value *V, 449 const Instruction *CtxI = nullptr, 450 const DominatorTree *DT = nullptr); 451 452 /// Returns true if the result or effects of the given instructions \p I 453 /// depend on or influence global memory. 454 /// Memory dependence arises for example if the instruction reads from 455 /// memory or may produce effects or undefined behaviour. Memory dependent 456 /// instructions generally cannot be reorderd with respect to other memory 457 /// dependent instructions or moved into non-dominated basic blocks. 458 /// Instructions which just compute a value based on the values of their 459 /// operands are not memory dependent. 460 bool mayBeMemoryDependent(const Instruction &I); 461 462 /// Return true if it is an intrinsic that cannot be speculated but also 463 /// cannot trap. 464 bool isAssumeLikeIntrinsic(const Instruction *I); 465 466 /// Return true if it is valid to use the assumptions provided by an 467 /// assume intrinsic, I, at the point in the control-flow identified by the 468 /// context instruction, CxtI. 469 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, 470 const DominatorTree *DT = nullptr); 471 472 enum class OverflowResult { 473 /// Always overflows in the direction of signed/unsigned min value. 474 AlwaysOverflowsLow, 475 /// Always overflows in the direction of signed/unsigned max value. 476 AlwaysOverflowsHigh, 477 /// May or may not overflow. 478 MayOverflow, 479 /// Never overflows. 480 NeverOverflows, 481 }; 482 483 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 484 const Value *RHS, 485 const DataLayout &DL, 486 AssumptionCache *AC, 487 const Instruction *CxtI, 488 const DominatorTree *DT, 489 bool UseInstrInfo = true); 490 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 491 const DataLayout &DL, 492 AssumptionCache *AC, 493 const Instruction *CxtI, 494 const DominatorTree *DT, 495 bool UseInstrInfo = true); 496 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 497 const Value *RHS, 498 const DataLayout &DL, 499 AssumptionCache *AC, 500 const Instruction *CxtI, 501 const DominatorTree *DT, 502 bool UseInstrInfo = true); 503 OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, 504 const DataLayout &DL, 505 AssumptionCache *AC = nullptr, 506 const Instruction *CxtI = nullptr, 507 const DominatorTree *DT = nullptr); 508 /// This version also leverages the sign bit of Add if known. 509 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add, 510 const DataLayout &DL, 511 AssumptionCache *AC = nullptr, 512 const Instruction *CxtI = nullptr, 513 const DominatorTree *DT = nullptr); 514 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, 515 const DataLayout &DL, 516 AssumptionCache *AC, 517 const Instruction *CxtI, 518 const DominatorTree *DT); 519 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 520 const DataLayout &DL, 521 AssumptionCache *AC, 522 const Instruction *CxtI, 523 const DominatorTree *DT); 524 525 /// Returns true if the arithmetic part of the \p WO 's result is 526 /// used only along the paths control dependent on the computation 527 /// not overflowing, \p WO being an <op>.with.overflow intrinsic. 528 bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, 529 const DominatorTree &DT); 530 531 532 /// Determine the possible constant range of an integer or vector of integer 533 /// value. This is intended as a cheap, non-recursive check. 534 ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo = true, 535 AssumptionCache *AC = nullptr, 536 const Instruction *CtxI = nullptr, 537 unsigned Depth = 0); 538 539 /// Return true if this function can prove that the instruction I will 540 /// always transfer execution to one of its successors (including the next 541 /// instruction that follows within a basic block). E.g. this is not 542 /// guaranteed for function calls that could loop infinitely. 543 /// 544 /// In other words, this function returns false for instructions that may 545 /// transfer execution or fail to transfer execution in a way that is not 546 /// captured in the CFG nor in the sequence of instructions within a basic 547 /// block. 548 /// 549 /// Undefined behavior is assumed not to happen, so e.g. division is 550 /// guaranteed to transfer execution to the following instruction even 551 /// though division by zero might cause undefined behavior. 552 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); 553 554 /// Returns true if this block does not contain a potential implicit exit. 555 /// This is equivelent to saying that all instructions within the basic block 556 /// are guaranteed to transfer execution to their successor within the basic 557 /// block. This has the same assumptions w.r.t. undefined behavior as the 558 /// instruction variant of this function. 559 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB); 560 561 /// Return true if this function can prove that the instruction I 562 /// is executed for every iteration of the loop L. 563 /// 564 /// Note that this currently only considers the loop header. 565 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, 566 const Loop *L); 567 568 /// Return true if I yields poison or raises UB if any of its operands is 569 /// poison. 570 /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true 571 /// if, for all i, r is evaluated to poison or op raises UB if vi = poison. 572 /// To filter out operands that raise UB on poison, you can use 573 /// getGuaranteedNonPoisonOp. 574 bool propagatesPoison(const Instruction *I); 575 576 /// Return either nullptr or an operand of I such that I will trigger 577 /// undefined behavior if I is executed and that operand has a poison 578 /// value. 579 const Value *getGuaranteedNonPoisonOp(const Instruction *I); 580 581 /// Return true if the given instruction must trigger undefined behavior. 582 /// when I is executed with any operands which appear in KnownPoison holding 583 /// a poison value at the point of execution. 584 bool mustTriggerUB(const Instruction *I, 585 const SmallSet<const Value *, 16>& KnownPoison); 586 587 /// Return true if this function can prove that if PoisonI is executed 588 /// and yields a poison value, then that will trigger undefined behavior. 589 /// 590 /// Note that this currently only considers the basic block that is 591 /// the parent of I. 592 bool programUndefinedIfPoison(const Instruction *PoisonI); 593 594 /// Return true if I can create poison from non-poison operands. 595 /// For vectors, canCreatePoison returns true if there is potential poison in 596 /// any element of the result when vectors without poison are given as 597 /// operands. 598 /// For example, given `I = shl <2 x i32> %x, <0, 32>`, this function returns 599 /// true. If I raises immediate UB but never creates poison (e.g. sdiv I, 0), 600 /// canCreatePoison returns false. 601 bool canCreatePoison(const Instruction *I); 602 603 /// Return true if this function can prove that V is never undef value 604 /// or poison value. 605 // 606 /// If CtxI and DT are specified this method performs flow-sensitive analysis 607 /// and returns true if it is guaranteed to be never undef or poison 608 /// immediately before the CtxI. 609 bool isGuaranteedNotToBeUndefOrPoison(const Value *V, 610 const Instruction *CtxI = nullptr, 611 const DominatorTree *DT = nullptr, 612 unsigned Depth = 0); 613 614 /// Specific patterns of select instructions we can match. 615 enum SelectPatternFlavor { 616 SPF_UNKNOWN = 0, 617 SPF_SMIN, /// Signed minimum 618 SPF_UMIN, /// Unsigned minimum 619 SPF_SMAX, /// Signed maximum 620 SPF_UMAX, /// Unsigned maximum 621 SPF_FMINNUM, /// Floating point minnum 622 SPF_FMAXNUM, /// Floating point maxnum 623 SPF_ABS, /// Absolute value 624 SPF_NABS /// Negated absolute value 625 }; 626 627 /// Behavior when a floating point min/max is given one NaN and one 628 /// non-NaN as input. 629 enum SelectPatternNaNBehavior { 630 SPNB_NA = 0, /// NaN behavior not applicable. 631 SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN. 632 SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. 633 SPNB_RETURNS_ANY /// Given one NaN input, can return either (or 634 /// it has been determined that no operands can 635 /// be NaN). 636 }; 637 638 struct SelectPatternResult { 639 SelectPatternFlavor Flavor; 640 SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is 641 /// SPF_FMINNUM or SPF_FMAXNUM. 642 bool Ordered; /// When implementing this min/max pattern as 643 /// fcmp; select, does the fcmp have to be 644 /// ordered? 645 646 /// Return true if \p SPF is a min or a max pattern. isMinOrMaxSelectPatternResult647 static bool isMinOrMax(SelectPatternFlavor SPF) { 648 return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS; 649 } 650 }; 651 652 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind 653 /// and providing the out parameter results if we successfully match. 654 /// 655 /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be 656 /// the negation instruction from the idiom. 657 /// 658 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does 659 /// not match that of the original select. If this is the case, the cast 660 /// operation (one of Trunc,SExt,Zext) that must be done to transform the 661 /// type of LHS and RHS into the type of V is returned in CastOp. 662 /// 663 /// For example: 664 /// %1 = icmp slt i32 %a, i32 4 665 /// %2 = sext i32 %a to i64 666 /// %3 = select i1 %1, i64 %2, i64 4 667 /// 668 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt 669 /// 670 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 671 Instruction::CastOps *CastOp = nullptr, 672 unsigned Depth = 0); 673 674 inline SelectPatternResult matchSelectPattern(const Value * V,const Value * & LHS,const Value * & RHS)675 matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) { 676 Value *L = const_cast<Value *>(LHS); 677 Value *R = const_cast<Value *>(RHS); 678 auto Result = matchSelectPattern(const_cast<Value *>(V), L, R); 679 LHS = L; 680 RHS = R; 681 return Result; 682 } 683 684 /// Determine the pattern that a select with the given compare as its 685 /// predicate and given values as its true/false operands would match. 686 SelectPatternResult matchDecomposedSelectPattern( 687 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, 688 Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); 689 690 /// Return the canonical comparison predicate for the specified 691 /// minimum/maximum flavor. 692 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, 693 bool Ordered = false); 694 695 /// Return the inverse minimum/maximum flavor of the specified flavor. 696 /// For example, signed minimum is the inverse of signed maximum. 697 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF); 698 699 /// Return the canonical inverse comparison predicate for the specified 700 /// minimum/maximum flavor. 701 CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF); 702 703 /// Return true if RHS is known to be implied true by LHS. Return false if 704 /// RHS is known to be implied false by LHS. Otherwise, return None if no 705 /// implication can be made. 706 /// A & B must be i1 (boolean) values or a vector of such values. Note that 707 /// the truth table for implication is the same as <=u on i1 values (but not 708 /// <=s!). The truth table for both is: 709 /// | T | F (B) 710 /// T | T | F 711 /// F | T | T 712 /// (A) 713 Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS, 714 const DataLayout &DL, bool LHSIsTrue = true, 715 unsigned Depth = 0); 716 Optional<bool> isImpliedCondition(const Value *LHS, 717 CmpInst::Predicate RHSPred, 718 const Value *RHSOp0, const Value *RHSOp1, 719 const DataLayout &DL, bool LHSIsTrue = true, 720 unsigned Depth = 0); 721 722 /// Return the boolean condition value in the context of the given instruction 723 /// if it is known based on dominating conditions. 724 Optional<bool> isImpliedByDomCondition(const Value *Cond, 725 const Instruction *ContextI, 726 const DataLayout &DL); 727 Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred, 728 const Value *LHS, const Value *RHS, 729 const Instruction *ContextI, 730 const DataLayout &DL); 731 732 /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that 733 /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In 734 /// this case offset would be -8. 735 Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2, 736 const DataLayout &DL); 737 } // end namespace llvm 738 739 #endif // LLVM_ANALYSIS_VALUETRACKING_H 740