1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file exposes the class definitions of all of the subclasses of the 10 // Instruction class. This is meant to be an easy way to get access to all 11 // instruction subclasses. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_INSTRUCTIONS_H 16 #define LLVM_IR_INSTRUCTIONS_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/Bitfields.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/ADT/iterator.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/CallingConv.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/Constant.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/InstrTypes.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/OperandTraits.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Use.h" 39 #include "llvm/IR/User.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/Support/AtomicOrdering.h" 42 #include "llvm/Support/Casting.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include <cassert> 45 #include <cstddef> 46 #include <cstdint> 47 #include <iterator> 48 49 namespace llvm { 50 51 class APInt; 52 class ConstantInt; 53 class DataLayout; 54 class LLVMContext; 55 56 //===----------------------------------------------------------------------===// 57 // AllocaInst Class 58 //===----------------------------------------------------------------------===// 59 60 /// an instruction to allocate memory on the stack 61 class AllocaInst : public UnaryInstruction { 62 Type *AllocatedType; 63 64 using AlignmentField = AlignmentBitfieldElementT<0>; 65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; 66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; 67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, 68 SwiftErrorField>(), 69 "Bitfields must be contiguous"); 70 71 protected: 72 // Note: Instruction needs to be a friend here to call cloneImpl. 73 friend class Instruction; 74 75 AllocaInst *cloneImpl() const; 76 77 public: 78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 79 const Twine &Name, Instruction *InsertBefore); 80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, 81 const Twine &Name, BasicBlock *InsertAtEnd); 82 83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, 84 Instruction *InsertBefore); 85 AllocaInst(Type *Ty, unsigned AddrSpace, 86 const Twine &Name, BasicBlock *InsertAtEnd); 87 88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 89 const Twine &Name = "", Instruction *InsertBefore = nullptr); 90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, 91 const Twine &Name, BasicBlock *InsertAtEnd); 92 93 /// Return true if there is an allocation size parameter to the allocation 94 /// instruction that is not 1. 95 bool isArrayAllocation() const; 96 97 /// Get the number of elements allocated. For a simple allocation of a single 98 /// element, this will return a constant 1 value. getArraySize()99 const Value *getArraySize() const { return getOperand(0); } getArraySize()100 Value *getArraySize() { return getOperand(0); } 101 102 /// Overload to return most specific pointer type. getType()103 PointerType *getType() const { 104 return cast<PointerType>(Instruction::getType()); 105 } 106 107 /// Get allocation size in bits. Returns None if size can't be determined, 108 /// e.g. in case of a VLA. 109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; 110 111 /// Return the type that is being allocated by the instruction. getAllocatedType()112 Type *getAllocatedType() const { return AllocatedType; } 113 /// for use only in special circumstances that need to generically 114 /// transform a whole instruction (eg: IR linking and vectorization). setAllocatedType(Type * Ty)115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; } 116 117 /// Return the alignment of the memory that is being allocated by the 118 /// instruction. getAlign()119 Align getAlign() const { 120 return Align(1ULL << getSubclassData<AlignmentField>()); 121 } 122 setAlignment(Align Align)123 void setAlignment(Align Align) { 124 setSubclassData<AlignmentField>(Log2(Align)); 125 } 126 127 // FIXME: Remove this one transition to Align is over. getAlignment()128 unsigned getAlignment() const { return getAlign().value(); } 129 130 /// Return true if this alloca is in the entry block of the function and is a 131 /// constant size. If so, the code generator will fold it into the 132 /// prolog/epilog code, so it is basically free. 133 bool isStaticAlloca() const; 134 135 /// Return true if this alloca is used as an inalloca argument to a call. Such 136 /// allocas are never considered static even if they are in the entry block. isUsedWithInAlloca()137 bool isUsedWithInAlloca() const { 138 return getSubclassData<UsedWithInAllocaField>(); 139 } 140 141 /// Specify whether this alloca is used to represent the arguments to a call. setUsedWithInAlloca(bool V)142 void setUsedWithInAlloca(bool V) { 143 setSubclassData<UsedWithInAllocaField>(V); 144 } 145 146 /// Return true if this alloca is used as a swifterror argument to a call. isSwiftError()147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } 148 /// Specify whether this alloca is used to represent a swifterror. setSwiftError(bool V)149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } 150 151 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)152 static bool classof(const Instruction *I) { 153 return (I->getOpcode() == Instruction::Alloca); 154 } classof(const Value * V)155 static bool classof(const Value *V) { 156 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 157 } 158 159 private: 160 // Shadow Instruction::setInstructionSubclassData with a private forwarding 161 // method so that subclasses cannot accidentally use it. 162 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)163 void setSubclassData(typename Bitfield::Type Value) { 164 Instruction::setSubclassData<Bitfield>(Value); 165 } 166 }; 167 168 //===----------------------------------------------------------------------===// 169 // LoadInst Class 170 //===----------------------------------------------------------------------===// 171 172 /// An instruction for reading from memory. This uses the SubclassData field in 173 /// Value to store whether or not the load is volatile. 174 class LoadInst : public UnaryInstruction { 175 using VolatileField = BoolBitfieldElementT<0>; 176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 178 static_assert( 179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 180 "Bitfields must be contiguous"); 181 182 void AssertOK(); 183 184 protected: 185 // Note: Instruction needs to be a friend here to call cloneImpl. 186 friend class Instruction; 187 188 LoadInst *cloneImpl() const; 189 190 public: 191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, 192 Instruction *InsertBefore); 193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); 194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 195 Instruction *InsertBefore); 196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 197 BasicBlock *InsertAtEnd); 198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 199 Align Align, Instruction *InsertBefore = nullptr); 200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 201 Align Align, BasicBlock *InsertAtEnd); 202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 203 Align Align, AtomicOrdering Order, 204 SyncScope::ID SSID = SyncScope::System, 205 Instruction *InsertBefore = nullptr); 206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, 207 Align Align, AtomicOrdering Order, SyncScope::ID SSID, 208 BasicBlock *InsertAtEnd); 209 210 /// Return true if this is a load from a volatile memory location. isVolatile()211 bool isVolatile() const { return getSubclassData<VolatileField>(); } 212 213 /// Specify whether this is a volatile load or not. setVolatile(bool V)214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 215 216 /// Return the alignment of the access that is being performed. 217 /// FIXME: Remove this function once transition to Align is over. 218 /// Use getAlign() instead. getAlignment()219 unsigned getAlignment() const { return getAlign().value(); } 220 221 /// Return the alignment of the access that is being performed. getAlign()222 Align getAlign() const { 223 return Align(1ULL << (getSubclassData<AlignmentField>())); 224 } 225 setAlignment(Align Align)226 void setAlignment(Align Align) { 227 setSubclassData<AlignmentField>(Log2(Align)); 228 } 229 230 /// Returns the ordering constraint of this load instruction. getOrdering()231 AtomicOrdering getOrdering() const { 232 return getSubclassData<OrderingField>(); 233 } 234 /// Sets the ordering constraint of this load instruction. May not be Release 235 /// or AcquireRelease. setOrdering(AtomicOrdering Ordering)236 void setOrdering(AtomicOrdering Ordering) { 237 setSubclassData<OrderingField>(Ordering); 238 } 239 240 /// Returns the synchronization scope ID of this load instruction. getSyncScopeID()241 SyncScope::ID getSyncScopeID() const { 242 return SSID; 243 } 244 245 /// Sets the synchronization scope ID of this load instruction. setSyncScopeID(SyncScope::ID SSID)246 void setSyncScopeID(SyncScope::ID SSID) { 247 this->SSID = SSID; 248 } 249 250 /// Sets the ordering constraint and the synchronization scope ID of this load 251 /// instruction. 252 void setAtomic(AtomicOrdering Ordering, 253 SyncScope::ID SSID = SyncScope::System) { 254 setOrdering(Ordering); 255 setSyncScopeID(SSID); 256 } 257 isSimple()258 bool isSimple() const { return !isAtomic() && !isVolatile(); } 259 isUnordered()260 bool isUnordered() const { 261 return (getOrdering() == AtomicOrdering::NotAtomic || 262 getOrdering() == AtomicOrdering::Unordered) && 263 !isVolatile(); 264 } 265 getPointerOperand()266 Value *getPointerOperand() { return getOperand(0); } getPointerOperand()267 const Value *getPointerOperand() const { return getOperand(0); } getPointerOperandIndex()268 static unsigned getPointerOperandIndex() { return 0U; } getPointerOperandType()269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 270 271 /// Returns the address space of the pointer operand. getPointerAddressSpace()272 unsigned getPointerAddressSpace() const { 273 return getPointerOperandType()->getPointerAddressSpace(); 274 } 275 276 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)277 static bool classof(const Instruction *I) { 278 return I->getOpcode() == Instruction::Load; 279 } classof(const Value * V)280 static bool classof(const Value *V) { 281 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 282 } 283 284 private: 285 // Shadow Instruction::setInstructionSubclassData with a private forwarding 286 // method so that subclasses cannot accidentally use it. 287 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)288 void setSubclassData(typename Bitfield::Type Value) { 289 Instruction::setSubclassData<Bitfield>(Value); 290 } 291 292 /// The synchronization scope ID of this load instruction. Not quite enough 293 /// room in SubClassData for everything, so synchronization scope ID gets its 294 /// own field. 295 SyncScope::ID SSID; 296 }; 297 298 //===----------------------------------------------------------------------===// 299 // StoreInst Class 300 //===----------------------------------------------------------------------===// 301 302 /// An instruction for storing to memory. 303 class StoreInst : public Instruction { 304 using VolatileField = BoolBitfieldElementT<0>; 305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; 306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; 307 static_assert( 308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), 309 "Bitfields must be contiguous"); 310 311 void AssertOK(); 312 313 protected: 314 // Note: Instruction needs to be a friend here to call cloneImpl. 315 friend class Instruction; 316 317 StoreInst *cloneImpl() const; 318 319 public: 320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); 321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); 322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); 323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); 324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 325 Instruction *InsertBefore = nullptr); 326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 327 BasicBlock *InsertAtEnd); 328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, 330 Instruction *InsertBefore = nullptr); 331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, 332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); 333 334 // allocate space for exactly two operands new(size_t s)335 void *operator new(size_t s) { 336 return User::operator new(s, 2); 337 } 338 339 /// Return true if this is a store to a volatile memory location. isVolatile()340 bool isVolatile() const { return getSubclassData<VolatileField>(); } 341 342 /// Specify whether this is a volatile store or not. setVolatile(bool V)343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 344 345 /// Transparently provide more efficient getOperand methods. 346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 347 348 /// Return the alignment of the access that is being performed 349 /// FIXME: Remove this function once transition to Align is over. 350 /// Use getAlign() instead. getAlignment()351 unsigned getAlignment() const { return getAlign().value(); } 352 getAlign()353 Align getAlign() const { 354 return Align(1ULL << (getSubclassData<AlignmentField>())); 355 } 356 setAlignment(Align Align)357 void setAlignment(Align Align) { 358 setSubclassData<AlignmentField>(Log2(Align)); 359 } 360 361 /// Returns the ordering constraint of this store instruction. getOrdering()362 AtomicOrdering getOrdering() const { 363 return getSubclassData<OrderingField>(); 364 } 365 366 /// Sets the ordering constraint of this store instruction. May not be 367 /// Acquire or AcquireRelease. setOrdering(AtomicOrdering Ordering)368 void setOrdering(AtomicOrdering Ordering) { 369 setSubclassData<OrderingField>(Ordering); 370 } 371 372 /// Returns the synchronization scope ID of this store instruction. getSyncScopeID()373 SyncScope::ID getSyncScopeID() const { 374 return SSID; 375 } 376 377 /// Sets the synchronization scope ID of this store instruction. setSyncScopeID(SyncScope::ID SSID)378 void setSyncScopeID(SyncScope::ID SSID) { 379 this->SSID = SSID; 380 } 381 382 /// Sets the ordering constraint and the synchronization scope ID of this 383 /// store instruction. 384 void setAtomic(AtomicOrdering Ordering, 385 SyncScope::ID SSID = SyncScope::System) { 386 setOrdering(Ordering); 387 setSyncScopeID(SSID); 388 } 389 isSimple()390 bool isSimple() const { return !isAtomic() && !isVolatile(); } 391 isUnordered()392 bool isUnordered() const { 393 return (getOrdering() == AtomicOrdering::NotAtomic || 394 getOrdering() == AtomicOrdering::Unordered) && 395 !isVolatile(); 396 } 397 getValueOperand()398 Value *getValueOperand() { return getOperand(0); } getValueOperand()399 const Value *getValueOperand() const { return getOperand(0); } 400 getPointerOperand()401 Value *getPointerOperand() { return getOperand(1); } getPointerOperand()402 const Value *getPointerOperand() const { return getOperand(1); } getPointerOperandIndex()403 static unsigned getPointerOperandIndex() { return 1U; } getPointerOperandType()404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); } 405 406 /// Returns the address space of the pointer operand. getPointerAddressSpace()407 unsigned getPointerAddressSpace() const { 408 return getPointerOperandType()->getPointerAddressSpace(); 409 } 410 411 // Methods for support type inquiry through isa, cast, and dyn_cast: classof(const Instruction * I)412 static bool classof(const Instruction *I) { 413 return I->getOpcode() == Instruction::Store; 414 } classof(const Value * V)415 static bool classof(const Value *V) { 416 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 417 } 418 419 private: 420 // Shadow Instruction::setInstructionSubclassData with a private forwarding 421 // method so that subclasses cannot accidentally use it. 422 template <typename Bitfield> setSubclassData(typename Bitfield::Type Value)423 void setSubclassData(typename Bitfield::Type Value) { 424 Instruction::setSubclassData<Bitfield>(Value); 425 } 426 427 /// The synchronization scope ID of this store instruction. Not quite enough 428 /// room in SubClassData for everything, so synchronization scope ID gets its 429 /// own field. 430 SyncScope::ID SSID; 431 }; 432 433 template <> 434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { 435 }; 436 437 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) 438 439 //===----------------------------------------------------------------------===// 440 // FenceInst Class 441 //===----------------------------------------------------------------------===// 442 443 /// An instruction for ordering other memory operations. 444 class FenceInst : public Instruction { 445 using OrderingField = AtomicOrderingBitfieldElementT<0>; 446 447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID); 448 449 protected: 450 // Note: Instruction needs to be a friend here to call cloneImpl. 451 friend class Instruction; 452 453 FenceInst *cloneImpl() const; 454 455 public: 456 // Ordering may only be Acquire, Release, AcquireRelease, or 457 // SequentiallyConsistent. 458 FenceInst(LLVMContext &C, AtomicOrdering Ordering, 459 SyncScope::ID SSID = SyncScope::System, 460 Instruction *InsertBefore = nullptr); 461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, 462 BasicBlock *InsertAtEnd); 463 464 // allocate space for exactly zero operands 465 void *operator new(size_t s) { 466 return User::operator new(s, 0); 467 } 468 469 /// Returns the ordering constraint of this fence instruction. 470 AtomicOrdering getOrdering() const { 471 return getSubclassData<OrderingField>(); 472 } 473 474 /// Sets the ordering constraint of this fence instruction. May only be 475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. 476 void setOrdering(AtomicOrdering Ordering) { 477 setSubclassData<OrderingField>(Ordering); 478 } 479 480 /// Returns the synchronization scope ID of this fence instruction. 481 SyncScope::ID getSyncScopeID() const { 482 return SSID; 483 } 484 485 /// Sets the synchronization scope ID of this fence instruction. 486 void setSyncScopeID(SyncScope::ID SSID) { 487 this->SSID = SSID; 488 } 489 490 // Methods for support type inquiry through isa, cast, and dyn_cast: 491 static bool classof(const Instruction *I) { 492 return I->getOpcode() == Instruction::Fence; 493 } 494 static bool classof(const Value *V) { 495 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 496 } 497 498 private: 499 // Shadow Instruction::setInstructionSubclassData with a private forwarding 500 // method so that subclasses cannot accidentally use it. 501 template <typename Bitfield> 502 void setSubclassData(typename Bitfield::Type Value) { 503 Instruction::setSubclassData<Bitfield>(Value); 504 } 505 506 /// The synchronization scope ID of this fence instruction. Not quite enough 507 /// room in SubClassData for everything, so synchronization scope ID gets its 508 /// own field. 509 SyncScope::ID SSID; 510 }; 511 512 //===----------------------------------------------------------------------===// 513 // AtomicCmpXchgInst Class 514 //===----------------------------------------------------------------------===// 515 516 /// An instruction that atomically checks whether a 517 /// specified value is in a memory location, and, if it is, stores a new value 518 /// there. The value returned by this instruction is a pair containing the 519 /// original value as first element, and an i1 indicating success (true) or 520 /// failure (false) as second element. 521 /// 522 class AtomicCmpXchgInst : public Instruction { 523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, 524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, 525 SyncScope::ID SSID); 526 527 template <unsigned Offset> 528 using AtomicOrderingBitfieldElement = 529 typename Bitfield::Element<AtomicOrdering, Offset, 3, 530 AtomicOrdering::LAST>; 531 532 protected: 533 // Note: Instruction needs to be a friend here to call cloneImpl. 534 friend class Instruction; 535 536 AtomicCmpXchgInst *cloneImpl() const; 537 538 public: 539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 540 AtomicOrdering SuccessOrdering, 541 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 542 Instruction *InsertBefore = nullptr); 543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, 544 AtomicOrdering SuccessOrdering, 545 AtomicOrdering FailureOrdering, SyncScope::ID SSID, 546 BasicBlock *InsertAtEnd); 547 548 // allocate space for exactly three operands 549 void *operator new(size_t s) { 550 return User::operator new(s, 3); 551 } 552 553 using VolatileField = BoolBitfieldElementT<0>; 554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; 555 using SuccessOrderingField = 556 AtomicOrderingBitfieldElementT<WeakField::NextBit>; 557 using FailureOrderingField = 558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; 559 using AlignmentField = 560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>; 561 static_assert( 562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, 563 FailureOrderingField, AlignmentField>(), 564 "Bitfields must be contiguous"); 565 566 /// Return the alignment of the memory that is being allocated by the 567 /// instruction. 568 Align getAlign() const { 569 return Align(1ULL << getSubclassData<AlignmentField>()); 570 } 571 572 void setAlignment(Align Align) { 573 setSubclassData<AlignmentField>(Log2(Align)); 574 } 575 576 /// Return true if this is a cmpxchg from a volatile memory 577 /// location. 578 /// 579 bool isVolatile() const { return getSubclassData<VolatileField>(); } 580 581 /// Specify whether this is a volatile cmpxchg. 582 /// 583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 584 585 /// Return true if this cmpxchg may spuriously fail. 586 bool isWeak() const { return getSubclassData<WeakField>(); } 587 588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } 589 590 /// Transparently provide more efficient getOperand methods. 591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 592 593 static bool isValidSuccessOrdering(AtomicOrdering Ordering) { 594 return Ordering != AtomicOrdering::NotAtomic && 595 Ordering != AtomicOrdering::Unordered; 596 } 597 598 static bool isValidFailureOrdering(AtomicOrdering Ordering) { 599 return Ordering != AtomicOrdering::NotAtomic && 600 Ordering != AtomicOrdering::Unordered && 601 Ordering != AtomicOrdering::AcquireRelease && 602 Ordering != AtomicOrdering::Release; 603 } 604 605 /// Returns the success ordering constraint of this cmpxchg instruction. 606 AtomicOrdering getSuccessOrdering() const { 607 return getSubclassData<SuccessOrderingField>(); 608 } 609 610 /// Sets the success ordering constraint of this cmpxchg instruction. 611 void setSuccessOrdering(AtomicOrdering Ordering) { 612 assert(isValidSuccessOrdering(Ordering) && 613 "invalid CmpXchg success ordering"); 614 setSubclassData<SuccessOrderingField>(Ordering); 615 } 616 617 /// Returns the failure ordering constraint of this cmpxchg instruction. 618 AtomicOrdering getFailureOrdering() const { 619 return getSubclassData<FailureOrderingField>(); 620 } 621 622 /// Sets the failure ordering constraint of this cmpxchg instruction. 623 void setFailureOrdering(AtomicOrdering Ordering) { 624 assert(isValidFailureOrdering(Ordering) && 625 "invalid CmpXchg failure ordering"); 626 setSubclassData<FailureOrderingField>(Ordering); 627 } 628 629 /// Returns the synchronization scope ID of this cmpxchg instruction. 630 SyncScope::ID getSyncScopeID() const { 631 return SSID; 632 } 633 634 /// Sets the synchronization scope ID of this cmpxchg instruction. 635 void setSyncScopeID(SyncScope::ID SSID) { 636 this->SSID = SSID; 637 } 638 639 Value *getPointerOperand() { return getOperand(0); } 640 const Value *getPointerOperand() const { return getOperand(0); } 641 static unsigned getPointerOperandIndex() { return 0U; } 642 643 Value *getCompareOperand() { return getOperand(1); } 644 const Value *getCompareOperand() const { return getOperand(1); } 645 646 Value *getNewValOperand() { return getOperand(2); } 647 const Value *getNewValOperand() const { return getOperand(2); } 648 649 /// Returns the address space of the pointer operand. 650 unsigned getPointerAddressSpace() const { 651 return getPointerOperand()->getType()->getPointerAddressSpace(); 652 } 653 654 /// Returns the strongest permitted ordering on failure, given the 655 /// desired ordering on success. 656 /// 657 /// If the comparison in a cmpxchg operation fails, there is no atomic store 658 /// so release semantics cannot be provided. So this function drops explicit 659 /// Release requests from the AtomicOrdering. A SequentiallyConsistent 660 /// operation would remain SequentiallyConsistent. 661 static AtomicOrdering 662 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { 663 switch (SuccessOrdering) { 664 default: 665 llvm_unreachable("invalid cmpxchg success ordering"); 666 case AtomicOrdering::Release: 667 case AtomicOrdering::Monotonic: 668 return AtomicOrdering::Monotonic; 669 case AtomicOrdering::AcquireRelease: 670 case AtomicOrdering::Acquire: 671 return AtomicOrdering::Acquire; 672 case AtomicOrdering::SequentiallyConsistent: 673 return AtomicOrdering::SequentiallyConsistent; 674 } 675 } 676 677 // Methods for support type inquiry through isa, cast, and dyn_cast: 678 static bool classof(const Instruction *I) { 679 return I->getOpcode() == Instruction::AtomicCmpXchg; 680 } 681 static bool classof(const Value *V) { 682 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 683 } 684 685 private: 686 // Shadow Instruction::setInstructionSubclassData with a private forwarding 687 // method so that subclasses cannot accidentally use it. 688 template <typename Bitfield> 689 void setSubclassData(typename Bitfield::Type Value) { 690 Instruction::setSubclassData<Bitfield>(Value); 691 } 692 693 /// The synchronization scope ID of this cmpxchg instruction. Not quite 694 /// enough room in SubClassData for everything, so synchronization scope ID 695 /// gets its own field. 696 SyncScope::ID SSID; 697 }; 698 699 template <> 700 struct OperandTraits<AtomicCmpXchgInst> : 701 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { 702 }; 703 704 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) 705 706 //===----------------------------------------------------------------------===// 707 // AtomicRMWInst Class 708 //===----------------------------------------------------------------------===// 709 710 /// an instruction that atomically reads a memory location, 711 /// combines it with another value, and then stores the result back. Returns 712 /// the old value. 713 /// 714 class AtomicRMWInst : public Instruction { 715 protected: 716 // Note: Instruction needs to be a friend here to call cloneImpl. 717 friend class Instruction; 718 719 AtomicRMWInst *cloneImpl() const; 720 721 public: 722 /// This enumeration lists the possible modifications atomicrmw can make. In 723 /// the descriptions, 'p' is the pointer to the instruction's memory location, 724 /// 'old' is the initial value of *p, and 'v' is the other value passed to the 725 /// instruction. These instructions always return 'old'. 726 enum BinOp : unsigned { 727 /// *p = v 728 Xchg, 729 /// *p = old + v 730 Add, 731 /// *p = old - v 732 Sub, 733 /// *p = old & v 734 And, 735 /// *p = ~(old & v) 736 Nand, 737 /// *p = old | v 738 Or, 739 /// *p = old ^ v 740 Xor, 741 /// *p = old >signed v ? old : v 742 Max, 743 /// *p = old <signed v ? old : v 744 Min, 745 /// *p = old >unsigned v ? old : v 746 UMax, 747 /// *p = old <unsigned v ? old : v 748 UMin, 749 750 /// *p = old + v 751 FAdd, 752 753 /// *p = old - v 754 FSub, 755 756 FIRST_BINOP = Xchg, 757 LAST_BINOP = FSub, 758 BAD_BINOP 759 }; 760 761 private: 762 template <unsigned Offset> 763 using AtomicOrderingBitfieldElement = 764 typename Bitfield::Element<AtomicOrdering, Offset, 3, 765 AtomicOrdering::LAST>; 766 767 template <unsigned Offset> 768 using BinOpBitfieldElement = 769 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; 770 771 public: 772 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 773 AtomicOrdering Ordering, SyncScope::ID SSID, 774 Instruction *InsertBefore = nullptr); 775 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, 776 AtomicOrdering Ordering, SyncScope::ID SSID, 777 BasicBlock *InsertAtEnd); 778 779 // allocate space for exactly two operands 780 void *operator new(size_t s) { 781 return User::operator new(s, 2); 782 } 783 784 using VolatileField = BoolBitfieldElementT<0>; 785 using AtomicOrderingField = 786 AtomicOrderingBitfieldElementT<VolatileField::NextBit>; 787 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; 788 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; 789 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, 790 OperationField, AlignmentField>(), 791 "Bitfields must be contiguous"); 792 793 BinOp getOperation() const { return getSubclassData<OperationField>(); } 794 795 static StringRef getOperationName(BinOp Op); 796 797 static bool isFPOperation(BinOp Op) { 798 switch (Op) { 799 case AtomicRMWInst::FAdd: 800 case AtomicRMWInst::FSub: 801 return true; 802 default: 803 return false; 804 } 805 } 806 807 void setOperation(BinOp Operation) { 808 setSubclassData<OperationField>(Operation); 809 } 810 811 /// Return the alignment of the memory that is being allocated by the 812 /// instruction. 813 Align getAlign() const { 814 return Align(1ULL << getSubclassData<AlignmentField>()); 815 } 816 817 void setAlignment(Align Align) { 818 setSubclassData<AlignmentField>(Log2(Align)); 819 } 820 821 /// Return true if this is a RMW on a volatile memory location. 822 /// 823 bool isVolatile() const { return getSubclassData<VolatileField>(); } 824 825 /// Specify whether this is a volatile RMW or not. 826 /// 827 void setVolatile(bool V) { setSubclassData<VolatileField>(V); } 828 829 /// Transparently provide more efficient getOperand methods. 830 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 831 832 /// Returns the ordering constraint of this rmw instruction. 833 AtomicOrdering getOrdering() const { 834 return getSubclassData<AtomicOrderingField>(); 835 } 836 837 /// Sets the ordering constraint of this rmw instruction. 838 void setOrdering(AtomicOrdering Ordering) { 839 assert(Ordering != AtomicOrdering::NotAtomic && 840 "atomicrmw instructions can only be atomic."); 841 setSubclassData<AtomicOrderingField>(Ordering); 842 } 843 844 /// Returns the synchronization scope ID of this rmw instruction. 845 SyncScope::ID getSyncScopeID() const { 846 return SSID; 847 } 848 849 /// Sets the synchronization scope ID of this rmw instruction. 850 void setSyncScopeID(SyncScope::ID SSID) { 851 this->SSID = SSID; 852 } 853 854 Value *getPointerOperand() { return getOperand(0); } 855 const Value *getPointerOperand() const { return getOperand(0); } 856 static unsigned getPointerOperandIndex() { return 0U; } 857 858 Value *getValOperand() { return getOperand(1); } 859 const Value *getValOperand() const { return getOperand(1); } 860 861 /// Returns the address space of the pointer operand. 862 unsigned getPointerAddressSpace() const { 863 return getPointerOperand()->getType()->getPointerAddressSpace(); 864 } 865 866 bool isFloatingPointOperation() const { 867 return isFPOperation(getOperation()); 868 } 869 870 // Methods for support type inquiry through isa, cast, and dyn_cast: 871 static bool classof(const Instruction *I) { 872 return I->getOpcode() == Instruction::AtomicRMW; 873 } 874 static bool classof(const Value *V) { 875 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 876 } 877 878 private: 879 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, 880 AtomicOrdering Ordering, SyncScope::ID SSID); 881 882 // Shadow Instruction::setInstructionSubclassData with a private forwarding 883 // method so that subclasses cannot accidentally use it. 884 template <typename Bitfield> 885 void setSubclassData(typename Bitfield::Type Value) { 886 Instruction::setSubclassData<Bitfield>(Value); 887 } 888 889 /// The synchronization scope ID of this rmw instruction. Not quite enough 890 /// room in SubClassData for everything, so synchronization scope ID gets its 891 /// own field. 892 SyncScope::ID SSID; 893 }; 894 895 template <> 896 struct OperandTraits<AtomicRMWInst> 897 : public FixedNumOperandTraits<AtomicRMWInst,2> { 898 }; 899 900 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) 901 902 //===----------------------------------------------------------------------===// 903 // GetElementPtrInst Class 904 //===----------------------------------------------------------------------===// 905 906 // checkGEPType - Simple wrapper function to give a better assertion failure 907 // message on bad indexes for a gep instruction. 908 // 909 inline Type *checkGEPType(Type *Ty) { 910 assert(Ty && "Invalid GetElementPtrInst indices for type!"); 911 return Ty; 912 } 913 914 /// an instruction for type-safe pointer arithmetic to 915 /// access elements of arrays and structs 916 /// 917 class GetElementPtrInst : public Instruction { 918 Type *SourceElementType; 919 Type *ResultElementType; 920 921 GetElementPtrInst(const GetElementPtrInst &GEPI); 922 923 /// Constructors - Create a getelementptr instruction with a base pointer an 924 /// list of indices. The first ctor can optionally insert before an existing 925 /// instruction, the second appends the new instruction to the specified 926 /// BasicBlock. 927 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 928 ArrayRef<Value *> IdxList, unsigned Values, 929 const Twine &NameStr, Instruction *InsertBefore); 930 inline GetElementPtrInst(Type *PointeeType, Value *Ptr, 931 ArrayRef<Value *> IdxList, unsigned Values, 932 const Twine &NameStr, BasicBlock *InsertAtEnd); 933 934 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); 935 936 protected: 937 // Note: Instruction needs to be a friend here to call cloneImpl. 938 friend class Instruction; 939 940 GetElementPtrInst *cloneImpl() const; 941 942 public: 943 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 944 ArrayRef<Value *> IdxList, 945 const Twine &NameStr = "", 946 Instruction *InsertBefore = nullptr) { 947 unsigned Values = 1 + unsigned(IdxList.size()); 948 if (!PointeeType) { 949 PointeeType = 950 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); 951 } else { 952 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 953 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 954 } 955 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 956 NameStr, InsertBefore); 957 } 958 959 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, 960 ArrayRef<Value *> IdxList, 961 const Twine &NameStr, 962 BasicBlock *InsertAtEnd) { 963 unsigned Values = 1 + unsigned(IdxList.size()); 964 if (!PointeeType) { 965 PointeeType = 966 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); 967 } else { 968 assert(cast<PointerType>(Ptr->getType()->getScalarType()) 969 ->isOpaqueOrPointeeTypeMatches(PointeeType)); 970 } 971 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, 972 NameStr, InsertAtEnd); 973 } 974 975 /// Create an "inbounds" getelementptr. See the documentation for the 976 /// "inbounds" flag in LangRef.html for details. 977 static GetElementPtrInst *CreateInBounds(Value *Ptr, 978 ArrayRef<Value *> IdxList, 979 const Twine &NameStr = "", 980 Instruction *InsertBefore = nullptr){ 981 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); 982 } 983 984 static GetElementPtrInst * 985 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, 986 const Twine &NameStr = "", 987 Instruction *InsertBefore = nullptr) { 988 GetElementPtrInst *GEP = 989 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); 990 GEP->setIsInBounds(true); 991 return GEP; 992 } 993 994 static GetElementPtrInst *CreateInBounds(Value *Ptr, 995 ArrayRef<Value *> IdxList, 996 const Twine &NameStr, 997 BasicBlock *InsertAtEnd) { 998 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); 999 } 1000 1001 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, 1002 ArrayRef<Value *> IdxList, 1003 const Twine &NameStr, 1004 BasicBlock *InsertAtEnd) { 1005 GetElementPtrInst *GEP = 1006 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); 1007 GEP->setIsInBounds(true); 1008 return GEP; 1009 } 1010 1011 /// Transparently provide more efficient getOperand methods. 1012 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1013 1014 Type *getSourceElementType() const { return SourceElementType; } 1015 1016 void setSourceElementType(Type *Ty) { SourceElementType = Ty; } 1017 void setResultElementType(Type *Ty) { ResultElementType = Ty; } 1018 1019 Type *getResultElementType() const { 1020 assert(ResultElementType == 1021 cast<PointerType>(getType()->getScalarType())->getElementType()); 1022 return ResultElementType; 1023 } 1024 1025 /// Returns the address space of this instruction's pointer type. 1026 unsigned getAddressSpace() const { 1027 // Note that this is always the same as the pointer operand's address space 1028 // and that is cheaper to compute, so cheat here. 1029 return getPointerAddressSpace(); 1030 } 1031 1032 /// Returns the result type of a getelementptr with the given source 1033 /// element type and indexes. 1034 /// 1035 /// Null is returned if the indices are invalid for the specified 1036 /// source element type. 1037 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); 1038 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); 1039 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); 1040 1041 /// Return the type of the element at the given index of an indexable 1042 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". 1043 /// 1044 /// Returns null if the type can't be indexed, or the given index is not 1045 /// legal for the given type. 1046 static Type *getTypeAtIndex(Type *Ty, Value *Idx); 1047 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); 1048 1049 inline op_iterator idx_begin() { return op_begin()+1; } 1050 inline const_op_iterator idx_begin() const { return op_begin()+1; } 1051 inline op_iterator idx_end() { return op_end(); } 1052 inline const_op_iterator idx_end() const { return op_end(); } 1053 1054 inline iterator_range<op_iterator> indices() { 1055 return make_range(idx_begin(), idx_end()); 1056 } 1057 1058 inline iterator_range<const_op_iterator> indices() const { 1059 return make_range(idx_begin(), idx_end()); 1060 } 1061 1062 Value *getPointerOperand() { 1063 return getOperand(0); 1064 } 1065 const Value *getPointerOperand() const { 1066 return getOperand(0); 1067 } 1068 static unsigned getPointerOperandIndex() { 1069 return 0U; // get index for modifying correct operand. 1070 } 1071 1072 /// Method to return the pointer operand as a 1073 /// PointerType. 1074 Type *getPointerOperandType() const { 1075 return getPointerOperand()->getType(); 1076 } 1077 1078 /// Returns the address space of the pointer operand. 1079 unsigned getPointerAddressSpace() const { 1080 return getPointerOperandType()->getPointerAddressSpace(); 1081 } 1082 1083 /// Returns the pointer type returned by the GEP 1084 /// instruction, which may be a vector of pointers. 1085 static Type *getGEPReturnType(Type *ElTy, Value *Ptr, 1086 ArrayRef<Value *> IdxList) { 1087 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), 1088 Ptr->getType()->getPointerAddressSpace()); 1089 // Vector GEP 1090 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { 1091 ElementCount EltCount = PtrVTy->getElementCount(); 1092 return VectorType::get(PtrTy, EltCount); 1093 } 1094 for (Value *Index : IdxList) 1095 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { 1096 ElementCount EltCount = IndexVTy->getElementCount(); 1097 return VectorType::get(PtrTy, EltCount); 1098 } 1099 // Scalar GEP 1100 return PtrTy; 1101 } 1102 1103 unsigned getNumIndices() const { // Note: always non-negative 1104 return getNumOperands() - 1; 1105 } 1106 1107 bool hasIndices() const { 1108 return getNumOperands() > 1; 1109 } 1110 1111 /// Return true if all of the indices of this GEP are 1112 /// zeros. If so, the result pointer and the first operand have the same 1113 /// value, just potentially different types. 1114 bool hasAllZeroIndices() const; 1115 1116 /// Return true if all of the indices of this GEP are 1117 /// constant integers. If so, the result pointer and the first operand have 1118 /// a constant offset between them. 1119 bool hasAllConstantIndices() const; 1120 1121 /// Set or clear the inbounds flag on this GEP instruction. 1122 /// See LangRef.html for the meaning of inbounds on a getelementptr. 1123 void setIsInBounds(bool b = true); 1124 1125 /// Determine whether the GEP has the inbounds flag. 1126 bool isInBounds() const; 1127 1128 /// Accumulate the constant address offset of this GEP if possible. 1129 /// 1130 /// This routine accepts an APInt into which it will accumulate the constant 1131 /// offset of this GEP if the GEP is in fact constant. If the GEP is not 1132 /// all-constant, it returns false and the value of the offset APInt is 1133 /// undefined (it is *not* preserved!). The APInt passed into this routine 1134 /// must be at least as wide as the IntPtr type for the address space of 1135 /// the base GEP pointer. 1136 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; 1137 bool collectOffset(const DataLayout &DL, unsigned BitWidth, 1138 SmallDenseMap<Value *, APInt, 8> &VariableOffsets, 1139 APInt &ConstantOffset) const; 1140 // Methods for support type inquiry through isa, cast, and dyn_cast: 1141 static bool classof(const Instruction *I) { 1142 return (I->getOpcode() == Instruction::GetElementPtr); 1143 } 1144 static bool classof(const Value *V) { 1145 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1146 } 1147 }; 1148 1149 template <> 1150 struct OperandTraits<GetElementPtrInst> : 1151 public VariadicOperandTraits<GetElementPtrInst, 1> { 1152 }; 1153 1154 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1155 ArrayRef<Value *> IdxList, unsigned Values, 1156 const Twine &NameStr, 1157 Instruction *InsertBefore) 1158 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1159 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1160 Values, InsertBefore), 1161 SourceElementType(PointeeType), 1162 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1163 assert(ResultElementType == 1164 cast<PointerType>(getType()->getScalarType())->getElementType()); 1165 init(Ptr, IdxList, NameStr); 1166 } 1167 1168 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, 1169 ArrayRef<Value *> IdxList, unsigned Values, 1170 const Twine &NameStr, 1171 BasicBlock *InsertAtEnd) 1172 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, 1173 OperandTraits<GetElementPtrInst>::op_end(this) - Values, 1174 Values, InsertAtEnd), 1175 SourceElementType(PointeeType), 1176 ResultElementType(getIndexedType(PointeeType, IdxList)) { 1177 assert(ResultElementType == 1178 cast<PointerType>(getType()->getScalarType())->getElementType()); 1179 init(Ptr, IdxList, NameStr); 1180 } 1181 1182 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) 1183 1184 //===----------------------------------------------------------------------===// 1185 // ICmpInst Class 1186 //===----------------------------------------------------------------------===// 1187 1188 /// This instruction compares its operands according to the predicate given 1189 /// to the constructor. It only operates on integers or pointers. The operands 1190 /// must be identical types. 1191 /// Represent an integer comparison operator. 1192 class ICmpInst: public CmpInst { 1193 void AssertOK() { 1194 assert(isIntPredicate() && 1195 "Invalid ICmp predicate value"); 1196 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1197 "Both operands to ICmp instruction are not of the same type!"); 1198 // Check that the operands are the right type 1199 assert((getOperand(0)->getType()->isIntOrIntVectorTy() || 1200 getOperand(0)->getType()->isPtrOrPtrVectorTy()) && 1201 "Invalid operand types for ICmp instruction"); 1202 } 1203 1204 protected: 1205 // Note: Instruction needs to be a friend here to call cloneImpl. 1206 friend class Instruction; 1207 1208 /// Clone an identical ICmpInst 1209 ICmpInst *cloneImpl() const; 1210 1211 public: 1212 /// Constructor with insert-before-instruction semantics. 1213 ICmpInst( 1214 Instruction *InsertBefore, ///< Where to insert 1215 Predicate pred, ///< The predicate to use for the comparison 1216 Value *LHS, ///< The left-hand-side of the expression 1217 Value *RHS, ///< The right-hand-side of the expression 1218 const Twine &NameStr = "" ///< Name of the instruction 1219 ) : CmpInst(makeCmpResultType(LHS->getType()), 1220 Instruction::ICmp, pred, LHS, RHS, NameStr, 1221 InsertBefore) { 1222 #ifndef NDEBUG 1223 AssertOK(); 1224 #endif 1225 } 1226 1227 /// Constructor with insert-at-end semantics. 1228 ICmpInst( 1229 BasicBlock &InsertAtEnd, ///< Block to insert into. 1230 Predicate pred, ///< The predicate to use for the comparison 1231 Value *LHS, ///< The left-hand-side of the expression 1232 Value *RHS, ///< The right-hand-side of the expression 1233 const Twine &NameStr = "" ///< Name of the instruction 1234 ) : CmpInst(makeCmpResultType(LHS->getType()), 1235 Instruction::ICmp, pred, LHS, RHS, NameStr, 1236 &InsertAtEnd) { 1237 #ifndef NDEBUG 1238 AssertOK(); 1239 #endif 1240 } 1241 1242 /// Constructor with no-insertion semantics 1243 ICmpInst( 1244 Predicate pred, ///< The predicate to use for the comparison 1245 Value *LHS, ///< The left-hand-side of the expression 1246 Value *RHS, ///< The right-hand-side of the expression 1247 const Twine &NameStr = "" ///< Name of the instruction 1248 ) : CmpInst(makeCmpResultType(LHS->getType()), 1249 Instruction::ICmp, pred, LHS, RHS, NameStr) { 1250 #ifndef NDEBUG 1251 AssertOK(); 1252 #endif 1253 } 1254 1255 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. 1256 /// @returns the predicate that would be the result if the operand were 1257 /// regarded as signed. 1258 /// Return the signed version of the predicate 1259 Predicate getSignedPredicate() const { 1260 return getSignedPredicate(getPredicate()); 1261 } 1262 1263 /// This is a static version that you can use without an instruction. 1264 /// Return the signed version of the predicate. 1265 static Predicate getSignedPredicate(Predicate pred); 1266 1267 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. 1268 /// @returns the predicate that would be the result if the operand were 1269 /// regarded as unsigned. 1270 /// Return the unsigned version of the predicate 1271 Predicate getUnsignedPredicate() const { 1272 return getUnsignedPredicate(getPredicate()); 1273 } 1274 1275 /// This is a static version that you can use without an instruction. 1276 /// Return the unsigned version of the predicate. 1277 static Predicate getUnsignedPredicate(Predicate pred); 1278 1279 /// Return true if this predicate is either EQ or NE. This also 1280 /// tests for commutativity. 1281 static bool isEquality(Predicate P) { 1282 return P == ICMP_EQ || P == ICMP_NE; 1283 } 1284 1285 /// Return true if this predicate is either EQ or NE. This also 1286 /// tests for commutativity. 1287 bool isEquality() const { 1288 return isEquality(getPredicate()); 1289 } 1290 1291 /// @returns true if the predicate of this ICmpInst is commutative 1292 /// Determine if this relation is commutative. 1293 bool isCommutative() const { return isEquality(); } 1294 1295 /// Return true if the predicate is relational (not EQ or NE). 1296 /// 1297 bool isRelational() const { 1298 return !isEquality(); 1299 } 1300 1301 /// Return true if the predicate is relational (not EQ or NE). 1302 /// 1303 static bool isRelational(Predicate P) { 1304 return !isEquality(P); 1305 } 1306 1307 /// Return true if the predicate is SGT or UGT. 1308 /// 1309 static bool isGT(Predicate P) { 1310 return P == ICMP_SGT || P == ICMP_UGT; 1311 } 1312 1313 /// Return true if the predicate is SLT or ULT. 1314 /// 1315 static bool isLT(Predicate P) { 1316 return P == ICMP_SLT || P == ICMP_ULT; 1317 } 1318 1319 /// Return true if the predicate is SGE or UGE. 1320 /// 1321 static bool isGE(Predicate P) { 1322 return P == ICMP_SGE || P == ICMP_UGE; 1323 } 1324 1325 /// Return true if the predicate is SLE or ULE. 1326 /// 1327 static bool isLE(Predicate P) { 1328 return P == ICMP_SLE || P == ICMP_ULE; 1329 } 1330 1331 /// Exchange the two operands to this instruction in such a way that it does 1332 /// not modify the semantics of the instruction. The predicate value may be 1333 /// changed to retain the same result if the predicate is order dependent 1334 /// (e.g. ult). 1335 /// Swap operands and adjust predicate. 1336 void swapOperands() { 1337 setPredicate(getSwappedPredicate()); 1338 Op<0>().swap(Op<1>()); 1339 } 1340 1341 // Methods for support type inquiry through isa, cast, and dyn_cast: 1342 static bool classof(const Instruction *I) { 1343 return I->getOpcode() == Instruction::ICmp; 1344 } 1345 static bool classof(const Value *V) { 1346 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1347 } 1348 }; 1349 1350 //===----------------------------------------------------------------------===// 1351 // FCmpInst Class 1352 //===----------------------------------------------------------------------===// 1353 1354 /// This instruction compares its operands according to the predicate given 1355 /// to the constructor. It only operates on floating point values or packed 1356 /// vectors of floating point values. The operands must be identical types. 1357 /// Represents a floating point comparison operator. 1358 class FCmpInst: public CmpInst { 1359 void AssertOK() { 1360 assert(isFPPredicate() && "Invalid FCmp predicate value"); 1361 assert(getOperand(0)->getType() == getOperand(1)->getType() && 1362 "Both operands to FCmp instruction are not of the same type!"); 1363 // Check that the operands are the right type 1364 assert(getOperand(0)->getType()->isFPOrFPVectorTy() && 1365 "Invalid operand types for FCmp instruction"); 1366 } 1367 1368 protected: 1369 // Note: Instruction needs to be a friend here to call cloneImpl. 1370 friend class Instruction; 1371 1372 /// Clone an identical FCmpInst 1373 FCmpInst *cloneImpl() const; 1374 1375 public: 1376 /// Constructor with insert-before-instruction semantics. 1377 FCmpInst( 1378 Instruction *InsertBefore, ///< Where to insert 1379 Predicate pred, ///< The predicate to use for the comparison 1380 Value *LHS, ///< The left-hand-side of the expression 1381 Value *RHS, ///< The right-hand-side of the expression 1382 const Twine &NameStr = "" ///< Name of the instruction 1383 ) : CmpInst(makeCmpResultType(LHS->getType()), 1384 Instruction::FCmp, pred, LHS, RHS, NameStr, 1385 InsertBefore) { 1386 AssertOK(); 1387 } 1388 1389 /// Constructor with insert-at-end semantics. 1390 FCmpInst( 1391 BasicBlock &InsertAtEnd, ///< Block to insert into. 1392 Predicate pred, ///< The predicate to use for the comparison 1393 Value *LHS, ///< The left-hand-side of the expression 1394 Value *RHS, ///< The right-hand-side of the expression 1395 const Twine &NameStr = "" ///< Name of the instruction 1396 ) : CmpInst(makeCmpResultType(LHS->getType()), 1397 Instruction::FCmp, pred, LHS, RHS, NameStr, 1398 &InsertAtEnd) { 1399 AssertOK(); 1400 } 1401 1402 /// Constructor with no-insertion semantics 1403 FCmpInst( 1404 Predicate Pred, ///< The predicate to use for the comparison 1405 Value *LHS, ///< The left-hand-side of the expression 1406 Value *RHS, ///< The right-hand-side of the expression 1407 const Twine &NameStr = "", ///< Name of the instruction 1408 Instruction *FlagsSource = nullptr 1409 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, 1410 RHS, NameStr, nullptr, FlagsSource) { 1411 AssertOK(); 1412 } 1413 1414 /// @returns true if the predicate of this instruction is EQ or NE. 1415 /// Determine if this is an equality predicate. 1416 static bool isEquality(Predicate Pred) { 1417 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || 1418 Pred == FCMP_UNE; 1419 } 1420 1421 /// @returns true if the predicate of this instruction is EQ or NE. 1422 /// Determine if this is an equality predicate. 1423 bool isEquality() const { return isEquality(getPredicate()); } 1424 1425 /// @returns true if the predicate of this instruction is commutative. 1426 /// Determine if this is a commutative predicate. 1427 bool isCommutative() const { 1428 return isEquality() || 1429 getPredicate() == FCMP_FALSE || 1430 getPredicate() == FCMP_TRUE || 1431 getPredicate() == FCMP_ORD || 1432 getPredicate() == FCMP_UNO; 1433 } 1434 1435 /// @returns true if the predicate is relational (not EQ or NE). 1436 /// Determine if this a relational predicate. 1437 bool isRelational() const { return !isEquality(); } 1438 1439 /// Exchange the two operands to this instruction in such a way that it does 1440 /// not modify the semantics of the instruction. The predicate value may be 1441 /// changed to retain the same result if the predicate is order dependent 1442 /// (e.g. ult). 1443 /// Swap operands and adjust predicate. 1444 void swapOperands() { 1445 setPredicate(getSwappedPredicate()); 1446 Op<0>().swap(Op<1>()); 1447 } 1448 1449 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1450 static bool classof(const Instruction *I) { 1451 return I->getOpcode() == Instruction::FCmp; 1452 } 1453 static bool classof(const Value *V) { 1454 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1455 } 1456 }; 1457 1458 //===----------------------------------------------------------------------===// 1459 /// This class represents a function call, abstracting a target 1460 /// machine's calling convention. This class uses low bit of the SubClassData 1461 /// field to indicate whether or not this is a tail call. The rest of the bits 1462 /// hold the calling convention of the call. 1463 /// 1464 class CallInst : public CallBase { 1465 CallInst(const CallInst &CI); 1466 1467 /// Construct a CallInst given a range of arguments. 1468 /// Construct a CallInst from a range of arguments 1469 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1470 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1471 Instruction *InsertBefore); 1472 1473 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1474 const Twine &NameStr, Instruction *InsertBefore) 1475 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} 1476 1477 /// Construct a CallInst given a range of arguments. 1478 /// Construct a CallInst from a range of arguments 1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1481 BasicBlock *InsertAtEnd); 1482 1483 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, 1484 Instruction *InsertBefore); 1485 1486 CallInst(FunctionType *ty, Value *F, const Twine &NameStr, 1487 BasicBlock *InsertAtEnd); 1488 1489 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, 1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 1491 void init(FunctionType *FTy, Value *Func, const Twine &NameStr); 1492 1493 /// Compute the number of operands to allocate. 1494 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 1495 // We need one operand for the called function, plus the input operand 1496 // counts provided. 1497 return 1 + NumArgs + NumBundleInputs; 1498 } 1499 1500 protected: 1501 // Note: Instruction needs to be a friend here to call cloneImpl. 1502 friend class Instruction; 1503 1504 CallInst *cloneImpl() const; 1505 1506 public: 1507 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", 1508 Instruction *InsertBefore = nullptr) { 1509 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); 1510 } 1511 1512 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1513 const Twine &NameStr, 1514 Instruction *InsertBefore = nullptr) { 1515 return new (ComputeNumOperands(Args.size())) 1516 CallInst(Ty, Func, Args, None, NameStr, InsertBefore); 1517 } 1518 1519 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1520 ArrayRef<OperandBundleDef> Bundles = None, 1521 const Twine &NameStr = "", 1522 Instruction *InsertBefore = nullptr) { 1523 const int NumOperands = 1524 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1525 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1526 1527 return new (NumOperands, DescriptorBytes) 1528 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); 1529 } 1530 1531 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, 1532 BasicBlock *InsertAtEnd) { 1533 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); 1534 } 1535 1536 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1537 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1538 return new (ComputeNumOperands(Args.size())) 1539 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); 1540 } 1541 1542 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1543 ArrayRef<OperandBundleDef> Bundles, 1544 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1545 const int NumOperands = 1546 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 1547 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 1548 1549 return new (NumOperands, DescriptorBytes) 1550 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); 1551 } 1552 1553 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", 1554 Instruction *InsertBefore = nullptr) { 1555 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1556 InsertBefore); 1557 } 1558 1559 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1560 ArrayRef<OperandBundleDef> Bundles = None, 1561 const Twine &NameStr = "", 1562 Instruction *InsertBefore = nullptr) { 1563 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1564 NameStr, InsertBefore); 1565 } 1566 1567 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1568 const Twine &NameStr, 1569 Instruction *InsertBefore = nullptr) { 1570 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1571 InsertBefore); 1572 } 1573 1574 static CallInst *Create(FunctionCallee Func, const Twine &NameStr, 1575 BasicBlock *InsertAtEnd) { 1576 return Create(Func.getFunctionType(), Func.getCallee(), NameStr, 1577 InsertAtEnd); 1578 } 1579 1580 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1581 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1582 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, 1583 InsertAtEnd); 1584 } 1585 1586 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, 1587 ArrayRef<OperandBundleDef> Bundles, 1588 const Twine &NameStr, BasicBlock *InsertAtEnd) { 1589 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, 1590 NameStr, InsertAtEnd); 1591 } 1592 1593 /// Create a clone of \p CI with a different set of operand bundles and 1594 /// insert it before \p InsertPt. 1595 /// 1596 /// The returned call instruction is identical \p CI in every way except that 1597 /// the operand bundles for the new instruction are set to the operand bundles 1598 /// in \p Bundles. 1599 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, 1600 Instruction *InsertPt = nullptr); 1601 1602 /// Generate the IR for a call to malloc: 1603 /// 1. Compute the malloc call's argument as the specified type's size, 1604 /// possibly multiplied by the array size if the array size is not 1605 /// constant 1. 1606 /// 2. Call malloc with that argument. 1607 /// 3. Bitcast the result of the malloc call to the specified type. 1608 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1609 Type *AllocTy, Value *AllocSize, 1610 Value *ArraySize = nullptr, 1611 Function *MallocF = nullptr, 1612 const Twine &Name = ""); 1613 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1614 Type *AllocTy, Value *AllocSize, 1615 Value *ArraySize = nullptr, 1616 Function *MallocF = nullptr, 1617 const Twine &Name = ""); 1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, 1619 Type *AllocTy, Value *AllocSize, 1620 Value *ArraySize = nullptr, 1621 ArrayRef<OperandBundleDef> Bundles = None, 1622 Function *MallocF = nullptr, 1623 const Twine &Name = ""); 1624 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, 1625 Type *AllocTy, Value *AllocSize, 1626 Value *ArraySize = nullptr, 1627 ArrayRef<OperandBundleDef> Bundles = None, 1628 Function *MallocF = nullptr, 1629 const Twine &Name = ""); 1630 /// Generate the IR for a call to the builtin free function. 1631 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); 1632 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); 1633 static Instruction *CreateFree(Value *Source, 1634 ArrayRef<OperandBundleDef> Bundles, 1635 Instruction *InsertBefore); 1636 static Instruction *CreateFree(Value *Source, 1637 ArrayRef<OperandBundleDef> Bundles, 1638 BasicBlock *InsertAtEnd); 1639 1640 // Note that 'musttail' implies 'tail'. 1641 enum TailCallKind : unsigned { 1642 TCK_None = 0, 1643 TCK_Tail = 1, 1644 TCK_MustTail = 2, 1645 TCK_NoTail = 3, 1646 TCK_LAST = TCK_NoTail 1647 }; 1648 1649 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; 1650 static_assert( 1651 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), 1652 "Bitfields must be contiguous"); 1653 1654 TailCallKind getTailCallKind() const { 1655 return getSubclassData<TailCallKindField>(); 1656 } 1657 1658 bool isTailCall() const { 1659 TailCallKind Kind = getTailCallKind(); 1660 return Kind == TCK_Tail || Kind == TCK_MustTail; 1661 } 1662 1663 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } 1664 1665 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } 1666 1667 void setTailCallKind(TailCallKind TCK) { 1668 setSubclassData<TailCallKindField>(TCK); 1669 } 1670 1671 void setTailCall(bool IsTc = true) { 1672 setTailCallKind(IsTc ? TCK_Tail : TCK_None); 1673 } 1674 1675 /// Return true if the call can return twice 1676 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } 1677 void setCanReturnTwice() { 1678 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); 1679 } 1680 1681 // Methods for support type inquiry through isa, cast, and dyn_cast: 1682 static bool classof(const Instruction *I) { 1683 return I->getOpcode() == Instruction::Call; 1684 } 1685 static bool classof(const Value *V) { 1686 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1687 } 1688 1689 /// Updates profile metadata by scaling it by \p S / \p T. 1690 void updateProfWeight(uint64_t S, uint64_t T); 1691 1692 private: 1693 // Shadow Instruction::setInstructionSubclassData with a private forwarding 1694 // method so that subclasses cannot accidentally use it. 1695 template <typename Bitfield> 1696 void setSubclassData(typename Bitfield::Type Value) { 1697 Instruction::setSubclassData<Bitfield>(Value); 1698 } 1699 }; 1700 1701 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1702 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1703 BasicBlock *InsertAtEnd) 1704 : CallBase(Ty->getReturnType(), Instruction::Call, 1705 OperandTraits<CallBase>::op_end(this) - 1706 (Args.size() + CountBundleInputs(Bundles) + 1), 1707 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1708 InsertAtEnd) { 1709 init(Ty, Func, Args, Bundles, NameStr); 1710 } 1711 1712 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, 1713 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, 1714 Instruction *InsertBefore) 1715 : CallBase(Ty->getReturnType(), Instruction::Call, 1716 OperandTraits<CallBase>::op_end(this) - 1717 (Args.size() + CountBundleInputs(Bundles) + 1), 1718 unsigned(Args.size() + CountBundleInputs(Bundles) + 1), 1719 InsertBefore) { 1720 init(Ty, Func, Args, Bundles, NameStr); 1721 } 1722 1723 //===----------------------------------------------------------------------===// 1724 // SelectInst Class 1725 //===----------------------------------------------------------------------===// 1726 1727 /// This class represents the LLVM 'select' instruction. 1728 /// 1729 class SelectInst : public Instruction { 1730 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1731 Instruction *InsertBefore) 1732 : Instruction(S1->getType(), Instruction::Select, 1733 &Op<0>(), 3, InsertBefore) { 1734 init(C, S1, S2); 1735 setName(NameStr); 1736 } 1737 1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, 1739 BasicBlock *InsertAtEnd) 1740 : Instruction(S1->getType(), Instruction::Select, 1741 &Op<0>(), 3, InsertAtEnd) { 1742 init(C, S1, S2); 1743 setName(NameStr); 1744 } 1745 1746 void init(Value *C, Value *S1, Value *S2) { 1747 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); 1748 Op<0>() = C; 1749 Op<1>() = S1; 1750 Op<2>() = S2; 1751 } 1752 1753 protected: 1754 // Note: Instruction needs to be a friend here to call cloneImpl. 1755 friend class Instruction; 1756 1757 SelectInst *cloneImpl() const; 1758 1759 public: 1760 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1761 const Twine &NameStr = "", 1762 Instruction *InsertBefore = nullptr, 1763 Instruction *MDFrom = nullptr) { 1764 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); 1765 if (MDFrom) 1766 Sel->copyMetadata(*MDFrom); 1767 return Sel; 1768 } 1769 1770 static SelectInst *Create(Value *C, Value *S1, Value *S2, 1771 const Twine &NameStr, 1772 BasicBlock *InsertAtEnd) { 1773 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); 1774 } 1775 1776 const Value *getCondition() const { return Op<0>(); } 1777 const Value *getTrueValue() const { return Op<1>(); } 1778 const Value *getFalseValue() const { return Op<2>(); } 1779 Value *getCondition() { return Op<0>(); } 1780 Value *getTrueValue() { return Op<1>(); } 1781 Value *getFalseValue() { return Op<2>(); } 1782 1783 void setCondition(Value *V) { Op<0>() = V; } 1784 void setTrueValue(Value *V) { Op<1>() = V; } 1785 void setFalseValue(Value *V) { Op<2>() = V; } 1786 1787 /// Swap the true and false values of the select instruction. 1788 /// This doesn't swap prof metadata. 1789 void swapValues() { Op<1>().swap(Op<2>()); } 1790 1791 /// Return a string if the specified operands are invalid 1792 /// for a select operation, otherwise return null. 1793 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); 1794 1795 /// Transparently provide more efficient getOperand methods. 1796 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1797 1798 OtherOps getOpcode() const { 1799 return static_cast<OtherOps>(Instruction::getOpcode()); 1800 } 1801 1802 // Methods for support type inquiry through isa, cast, and dyn_cast: 1803 static bool classof(const Instruction *I) { 1804 return I->getOpcode() == Instruction::Select; 1805 } 1806 static bool classof(const Value *V) { 1807 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1808 } 1809 }; 1810 1811 template <> 1812 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { 1813 }; 1814 1815 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) 1816 1817 //===----------------------------------------------------------------------===// 1818 // VAArgInst Class 1819 //===----------------------------------------------------------------------===// 1820 1821 /// This class represents the va_arg llvm instruction, which returns 1822 /// an argument of the specified type given a va_list and increments that list 1823 /// 1824 class VAArgInst : public UnaryInstruction { 1825 protected: 1826 // Note: Instruction needs to be a friend here to call cloneImpl. 1827 friend class Instruction; 1828 1829 VAArgInst *cloneImpl() const; 1830 1831 public: 1832 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", 1833 Instruction *InsertBefore = nullptr) 1834 : UnaryInstruction(Ty, VAArg, List, InsertBefore) { 1835 setName(NameStr); 1836 } 1837 1838 VAArgInst(Value *List, Type *Ty, const Twine &NameStr, 1839 BasicBlock *InsertAtEnd) 1840 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { 1841 setName(NameStr); 1842 } 1843 1844 Value *getPointerOperand() { return getOperand(0); } 1845 const Value *getPointerOperand() const { return getOperand(0); } 1846 static unsigned getPointerOperandIndex() { return 0U; } 1847 1848 // Methods for support type inquiry through isa, cast, and dyn_cast: 1849 static bool classof(const Instruction *I) { 1850 return I->getOpcode() == VAArg; 1851 } 1852 static bool classof(const Value *V) { 1853 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1854 } 1855 }; 1856 1857 //===----------------------------------------------------------------------===// 1858 // ExtractElementInst Class 1859 //===----------------------------------------------------------------------===// 1860 1861 /// This instruction extracts a single (scalar) 1862 /// element from a VectorType value 1863 /// 1864 class ExtractElementInst : public Instruction { 1865 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", 1866 Instruction *InsertBefore = nullptr); 1867 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, 1868 BasicBlock *InsertAtEnd); 1869 1870 protected: 1871 // Note: Instruction needs to be a friend here to call cloneImpl. 1872 friend class Instruction; 1873 1874 ExtractElementInst *cloneImpl() const; 1875 1876 public: 1877 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1878 const Twine &NameStr = "", 1879 Instruction *InsertBefore = nullptr) { 1880 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); 1881 } 1882 1883 static ExtractElementInst *Create(Value *Vec, Value *Idx, 1884 const Twine &NameStr, 1885 BasicBlock *InsertAtEnd) { 1886 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); 1887 } 1888 1889 /// Return true if an extractelement instruction can be 1890 /// formed with the specified operands. 1891 static bool isValidOperands(const Value *Vec, const Value *Idx); 1892 1893 Value *getVectorOperand() { return Op<0>(); } 1894 Value *getIndexOperand() { return Op<1>(); } 1895 const Value *getVectorOperand() const { return Op<0>(); } 1896 const Value *getIndexOperand() const { return Op<1>(); } 1897 1898 VectorType *getVectorOperandType() const { 1899 return cast<VectorType>(getVectorOperand()->getType()); 1900 } 1901 1902 /// Transparently provide more efficient getOperand methods. 1903 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1904 1905 // Methods for support type inquiry through isa, cast, and dyn_cast: 1906 static bool classof(const Instruction *I) { 1907 return I->getOpcode() == Instruction::ExtractElement; 1908 } 1909 static bool classof(const Value *V) { 1910 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1911 } 1912 }; 1913 1914 template <> 1915 struct OperandTraits<ExtractElementInst> : 1916 public FixedNumOperandTraits<ExtractElementInst, 2> { 1917 }; 1918 1919 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) 1920 1921 //===----------------------------------------------------------------------===// 1922 // InsertElementInst Class 1923 //===----------------------------------------------------------------------===// 1924 1925 /// This instruction inserts a single (scalar) 1926 /// element into a VectorType value 1927 /// 1928 class InsertElementInst : public Instruction { 1929 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, 1930 const Twine &NameStr = "", 1931 Instruction *InsertBefore = nullptr); 1932 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, 1933 BasicBlock *InsertAtEnd); 1934 1935 protected: 1936 // Note: Instruction needs to be a friend here to call cloneImpl. 1937 friend class Instruction; 1938 1939 InsertElementInst *cloneImpl() const; 1940 1941 public: 1942 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1943 const Twine &NameStr = "", 1944 Instruction *InsertBefore = nullptr) { 1945 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); 1946 } 1947 1948 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, 1949 const Twine &NameStr, 1950 BasicBlock *InsertAtEnd) { 1951 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); 1952 } 1953 1954 /// Return true if an insertelement instruction can be 1955 /// formed with the specified operands. 1956 static bool isValidOperands(const Value *Vec, const Value *NewElt, 1957 const Value *Idx); 1958 1959 /// Overload to return most specific vector type. 1960 /// 1961 VectorType *getType() const { 1962 return cast<VectorType>(Instruction::getType()); 1963 } 1964 1965 /// Transparently provide more efficient getOperand methods. 1966 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 1967 1968 // Methods for support type inquiry through isa, cast, and dyn_cast: 1969 static bool classof(const Instruction *I) { 1970 return I->getOpcode() == Instruction::InsertElement; 1971 } 1972 static bool classof(const Value *V) { 1973 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 1974 } 1975 }; 1976 1977 template <> 1978 struct OperandTraits<InsertElementInst> : 1979 public FixedNumOperandTraits<InsertElementInst, 3> { 1980 }; 1981 1982 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) 1983 1984 //===----------------------------------------------------------------------===// 1985 // ShuffleVectorInst Class 1986 //===----------------------------------------------------------------------===// 1987 1988 constexpr int UndefMaskElem = -1; 1989 1990 /// This instruction constructs a fixed permutation of two 1991 /// input vectors. 1992 /// 1993 /// For each element of the result vector, the shuffle mask selects an element 1994 /// from one of the input vectors to copy to the result. Non-negative elements 1995 /// in the mask represent an index into the concatenated pair of input vectors. 1996 /// UndefMaskElem (-1) specifies that the result element is undefined. 1997 /// 1998 /// For scalable vectors, all the elements of the mask must be 0 or -1. This 1999 /// requirement may be relaxed in the future. 2000 class ShuffleVectorInst : public Instruction { 2001 SmallVector<int, 4> ShuffleMask; 2002 Constant *ShuffleMaskForBitcode; 2003 2004 protected: 2005 // Note: Instruction needs to be a friend here to call cloneImpl. 2006 friend class Instruction; 2007 2008 ShuffleVectorInst *cloneImpl() const; 2009 2010 public: 2011 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2012 const Twine &NameStr = "", 2013 Instruction *InsertBefor = nullptr); 2014 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, 2015 const Twine &NameStr, BasicBlock *InsertAtEnd); 2016 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2017 const Twine &NameStr = "", 2018 Instruction *InsertBefor = nullptr); 2019 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, 2020 const Twine &NameStr, BasicBlock *InsertAtEnd); 2021 2022 void *operator new(size_t s) { return User::operator new(s, 2); } 2023 2024 /// Swap the operands and adjust the mask to preserve the semantics 2025 /// of the instruction. 2026 void commute(); 2027 2028 /// Return true if a shufflevector instruction can be 2029 /// formed with the specified operands. 2030 static bool isValidOperands(const Value *V1, const Value *V2, 2031 const Value *Mask); 2032 static bool isValidOperands(const Value *V1, const Value *V2, 2033 ArrayRef<int> Mask); 2034 2035 /// Overload to return most specific vector type. 2036 /// 2037 VectorType *getType() const { 2038 return cast<VectorType>(Instruction::getType()); 2039 } 2040 2041 /// Transparently provide more efficient getOperand methods. 2042 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2043 2044 /// Return the shuffle mask value of this instruction for the given element 2045 /// index. Return UndefMaskElem if the element is undef. 2046 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } 2047 2048 /// Convert the input shuffle mask operand to a vector of integers. Undefined 2049 /// elements of the mask are returned as UndefMaskElem. 2050 static void getShuffleMask(const Constant *Mask, 2051 SmallVectorImpl<int> &Result); 2052 2053 /// Return the mask for this instruction as a vector of integers. Undefined 2054 /// elements of the mask are returned as UndefMaskElem. 2055 void getShuffleMask(SmallVectorImpl<int> &Result) const { 2056 Result.assign(ShuffleMask.begin(), ShuffleMask.end()); 2057 } 2058 2059 /// Return the mask for this instruction, for use in bitcode. 2060 /// 2061 /// TODO: This is temporary until we decide a new bitcode encoding for 2062 /// shufflevector. 2063 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } 2064 2065 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, 2066 Type *ResultTy); 2067 2068 void setShuffleMask(ArrayRef<int> Mask); 2069 2070 ArrayRef<int> getShuffleMask() const { return ShuffleMask; } 2071 2072 /// Return true if this shuffle returns a vector with a different number of 2073 /// elements than its source vectors. 2074 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> 2075 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> 2076 bool changesLength() const { 2077 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2078 ->getElementCount() 2079 .getKnownMinValue(); 2080 unsigned NumMaskElts = ShuffleMask.size(); 2081 return NumSourceElts != NumMaskElts; 2082 } 2083 2084 /// Return true if this shuffle returns a vector with a greater number of 2085 /// elements than its source vectors. 2086 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> 2087 bool increasesLength() const { 2088 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) 2089 ->getElementCount() 2090 .getKnownMinValue(); 2091 unsigned NumMaskElts = ShuffleMask.size(); 2092 return NumSourceElts < NumMaskElts; 2093 } 2094 2095 /// Return true if this shuffle mask chooses elements from exactly one source 2096 /// vector. 2097 /// Example: <7,5,undef,7> 2098 /// This assumes that vector operands are the same length as the mask. 2099 static bool isSingleSourceMask(ArrayRef<int> Mask); 2100 static bool isSingleSourceMask(const Constant *Mask) { 2101 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2102 SmallVector<int, 16> MaskAsInts; 2103 getShuffleMask(Mask, MaskAsInts); 2104 return isSingleSourceMask(MaskAsInts); 2105 } 2106 2107 /// Return true if this shuffle chooses elements from exactly one source 2108 /// vector without changing the length of that vector. 2109 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> 2110 /// TODO: Optionally allow length-changing shuffles. 2111 bool isSingleSource() const { 2112 return !changesLength() && isSingleSourceMask(ShuffleMask); 2113 } 2114 2115 /// Return true if this shuffle mask chooses elements from exactly one source 2116 /// vector without lane crossings. A shuffle using this mask is not 2117 /// necessarily a no-op because it may change the number of elements from its 2118 /// input vectors or it may provide demanded bits knowledge via undef lanes. 2119 /// Example: <undef,undef,2,3> 2120 static bool isIdentityMask(ArrayRef<int> Mask); 2121 static bool isIdentityMask(const Constant *Mask) { 2122 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2123 SmallVector<int, 16> MaskAsInts; 2124 getShuffleMask(Mask, MaskAsInts); 2125 return isIdentityMask(MaskAsInts); 2126 } 2127 2128 /// Return true if this shuffle chooses elements from exactly one source 2129 /// vector without lane crossings and does not change the number of elements 2130 /// from its input vectors. 2131 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> 2132 bool isIdentity() const { 2133 return !changesLength() && isIdentityMask(ShuffleMask); 2134 } 2135 2136 /// Return true if this shuffle lengthens exactly one source vector with 2137 /// undefs in the high elements. 2138 bool isIdentityWithPadding() const; 2139 2140 /// Return true if this shuffle extracts the first N elements of exactly one 2141 /// source vector. 2142 bool isIdentityWithExtract() const; 2143 2144 /// Return true if this shuffle concatenates its 2 source vectors. This 2145 /// returns false if either input is undefined. In that case, the shuffle is 2146 /// is better classified as an identity with padding operation. 2147 bool isConcat() const; 2148 2149 /// Return true if this shuffle mask chooses elements from its source vectors 2150 /// without lane crossings. A shuffle using this mask would be 2151 /// equivalent to a vector select with a constant condition operand. 2152 /// Example: <4,1,6,undef> 2153 /// This returns false if the mask does not choose from both input vectors. 2154 /// In that case, the shuffle is better classified as an identity shuffle. 2155 /// This assumes that vector operands are the same length as the mask 2156 /// (a length-changing shuffle can never be equivalent to a vector select). 2157 static bool isSelectMask(ArrayRef<int> Mask); 2158 static bool isSelectMask(const Constant *Mask) { 2159 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2160 SmallVector<int, 16> MaskAsInts; 2161 getShuffleMask(Mask, MaskAsInts); 2162 return isSelectMask(MaskAsInts); 2163 } 2164 2165 /// Return true if this shuffle chooses elements from its source vectors 2166 /// without lane crossings and all operands have the same number of elements. 2167 /// In other words, this shuffle is equivalent to a vector select with a 2168 /// constant condition operand. 2169 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> 2170 /// This returns false if the mask does not choose from both input vectors. 2171 /// In that case, the shuffle is better classified as an identity shuffle. 2172 /// TODO: Optionally allow length-changing shuffles. 2173 bool isSelect() const { 2174 return !changesLength() && isSelectMask(ShuffleMask); 2175 } 2176 2177 /// Return true if this shuffle mask swaps the order of elements from exactly 2178 /// one source vector. 2179 /// Example: <7,6,undef,4> 2180 /// This assumes that vector operands are the same length as the mask. 2181 static bool isReverseMask(ArrayRef<int> Mask); 2182 static bool isReverseMask(const Constant *Mask) { 2183 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2184 SmallVector<int, 16> MaskAsInts; 2185 getShuffleMask(Mask, MaskAsInts); 2186 return isReverseMask(MaskAsInts); 2187 } 2188 2189 /// Return true if this shuffle swaps the order of elements from exactly 2190 /// one source vector. 2191 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> 2192 /// TODO: Optionally allow length-changing shuffles. 2193 bool isReverse() const { 2194 return !changesLength() && isReverseMask(ShuffleMask); 2195 } 2196 2197 /// Return true if this shuffle mask chooses all elements with the same value 2198 /// as the first element of exactly one source vector. 2199 /// Example: <4,undef,undef,4> 2200 /// This assumes that vector operands are the same length as the mask. 2201 static bool isZeroEltSplatMask(ArrayRef<int> Mask); 2202 static bool isZeroEltSplatMask(const Constant *Mask) { 2203 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2204 SmallVector<int, 16> MaskAsInts; 2205 getShuffleMask(Mask, MaskAsInts); 2206 return isZeroEltSplatMask(MaskAsInts); 2207 } 2208 2209 /// Return true if all elements of this shuffle are the same value as the 2210 /// first element of exactly one source vector without changing the length 2211 /// of that vector. 2212 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> 2213 /// TODO: Optionally allow length-changing shuffles. 2214 /// TODO: Optionally allow splats from other elements. 2215 bool isZeroEltSplat() const { 2216 return !changesLength() && isZeroEltSplatMask(ShuffleMask); 2217 } 2218 2219 /// Return true if this shuffle mask is a transpose mask. 2220 /// Transpose vector masks transpose a 2xn matrix. They read corresponding 2221 /// even- or odd-numbered vector elements from two n-dimensional source 2222 /// vectors and write each result into consecutive elements of an 2223 /// n-dimensional destination vector. Two shuffles are necessary to complete 2224 /// the transpose, one for the even elements and another for the odd elements. 2225 /// This description closely follows how the TRN1 and TRN2 AArch64 2226 /// instructions operate. 2227 /// 2228 /// For example, a simple 2x2 matrix can be transposed with: 2229 /// 2230 /// ; Original matrix 2231 /// m0 = < a, b > 2232 /// m1 = < c, d > 2233 /// 2234 /// ; Transposed matrix 2235 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > 2236 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > 2237 /// 2238 /// For matrices having greater than n columns, the resulting nx2 transposed 2239 /// matrix is stored in two result vectors such that one vector contains 2240 /// interleaved elements from all the even-numbered rows and the other vector 2241 /// contains interleaved elements from all the odd-numbered rows. For example, 2242 /// a 2x4 matrix can be transposed with: 2243 /// 2244 /// ; Original matrix 2245 /// m0 = < a, b, c, d > 2246 /// m1 = < e, f, g, h > 2247 /// 2248 /// ; Transposed matrix 2249 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > 2250 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > 2251 static bool isTransposeMask(ArrayRef<int> Mask); 2252 static bool isTransposeMask(const Constant *Mask) { 2253 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2254 SmallVector<int, 16> MaskAsInts; 2255 getShuffleMask(Mask, MaskAsInts); 2256 return isTransposeMask(MaskAsInts); 2257 } 2258 2259 /// Return true if this shuffle transposes the elements of its inputs without 2260 /// changing the length of the vectors. This operation may also be known as a 2261 /// merge or interleave. See the description for isTransposeMask() for the 2262 /// exact specification. 2263 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> 2264 bool isTranspose() const { 2265 return !changesLength() && isTransposeMask(ShuffleMask); 2266 } 2267 2268 /// Return true if this shuffle mask is an extract subvector mask. 2269 /// A valid extract subvector mask returns a smaller vector from a single 2270 /// source operand. The base extraction index is returned as well. 2271 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, 2272 int &Index); 2273 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, 2274 int &Index) { 2275 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); 2276 // Not possible to express a shuffle mask for a scalable vector for this 2277 // case. 2278 if (isa<ScalableVectorType>(Mask->getType())) 2279 return false; 2280 SmallVector<int, 16> MaskAsInts; 2281 getShuffleMask(Mask, MaskAsInts); 2282 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); 2283 } 2284 2285 /// Return true if this shuffle mask is an extract subvector mask. 2286 bool isExtractSubvectorMask(int &Index) const { 2287 // Not possible to express a shuffle mask for a scalable vector for this 2288 // case. 2289 if (isa<ScalableVectorType>(getType())) 2290 return false; 2291 2292 int NumSrcElts = 2293 cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); 2294 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); 2295 } 2296 2297 /// Change values in a shuffle permute mask assuming the two vector operands 2298 /// of length InVecNumElts have swapped position. 2299 static void commuteShuffleMask(MutableArrayRef<int> Mask, 2300 unsigned InVecNumElts) { 2301 for (int &Idx : Mask) { 2302 if (Idx == -1) 2303 continue; 2304 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; 2305 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && 2306 "shufflevector mask index out of range"); 2307 } 2308 } 2309 2310 // Methods for support type inquiry through isa, cast, and dyn_cast: 2311 static bool classof(const Instruction *I) { 2312 return I->getOpcode() == Instruction::ShuffleVector; 2313 } 2314 static bool classof(const Value *V) { 2315 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2316 } 2317 }; 2318 2319 template <> 2320 struct OperandTraits<ShuffleVectorInst> 2321 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; 2322 2323 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) 2324 2325 //===----------------------------------------------------------------------===// 2326 // ExtractValueInst Class 2327 //===----------------------------------------------------------------------===// 2328 2329 /// This instruction extracts a struct member or array 2330 /// element value from an aggregate value. 2331 /// 2332 class ExtractValueInst : public UnaryInstruction { 2333 SmallVector<unsigned, 4> Indices; 2334 2335 ExtractValueInst(const ExtractValueInst &EVI); 2336 2337 /// Constructors - Create a extractvalue instruction with a base aggregate 2338 /// value and a list of indices. The first ctor can optionally insert before 2339 /// an existing instruction, the second appends the new instruction to the 2340 /// specified BasicBlock. 2341 inline ExtractValueInst(Value *Agg, 2342 ArrayRef<unsigned> Idxs, 2343 const Twine &NameStr, 2344 Instruction *InsertBefore); 2345 inline ExtractValueInst(Value *Agg, 2346 ArrayRef<unsigned> Idxs, 2347 const Twine &NameStr, BasicBlock *InsertAtEnd); 2348 2349 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); 2350 2351 protected: 2352 // Note: Instruction needs to be a friend here to call cloneImpl. 2353 friend class Instruction; 2354 2355 ExtractValueInst *cloneImpl() const; 2356 2357 public: 2358 static ExtractValueInst *Create(Value *Agg, 2359 ArrayRef<unsigned> Idxs, 2360 const Twine &NameStr = "", 2361 Instruction *InsertBefore = nullptr) { 2362 return new 2363 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); 2364 } 2365 2366 static ExtractValueInst *Create(Value *Agg, 2367 ArrayRef<unsigned> Idxs, 2368 const Twine &NameStr, 2369 BasicBlock *InsertAtEnd) { 2370 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); 2371 } 2372 2373 /// Returns the type of the element that would be extracted 2374 /// with an extractvalue instruction with the specified parameters. 2375 /// 2376 /// Null is returned if the indices are invalid for the specified type. 2377 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); 2378 2379 using idx_iterator = const unsigned*; 2380 2381 inline idx_iterator idx_begin() const { return Indices.begin(); } 2382 inline idx_iterator idx_end() const { return Indices.end(); } 2383 inline iterator_range<idx_iterator> indices() const { 2384 return make_range(idx_begin(), idx_end()); 2385 } 2386 2387 Value *getAggregateOperand() { 2388 return getOperand(0); 2389 } 2390 const Value *getAggregateOperand() const { 2391 return getOperand(0); 2392 } 2393 static unsigned getAggregateOperandIndex() { 2394 return 0U; // get index for modifying correct operand 2395 } 2396 2397 ArrayRef<unsigned> getIndices() const { 2398 return Indices; 2399 } 2400 2401 unsigned getNumIndices() const { 2402 return (unsigned)Indices.size(); 2403 } 2404 2405 bool hasIndices() const { 2406 return true; 2407 } 2408 2409 // Methods for support type inquiry through isa, cast, and dyn_cast: 2410 static bool classof(const Instruction *I) { 2411 return I->getOpcode() == Instruction::ExtractValue; 2412 } 2413 static bool classof(const Value *V) { 2414 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2415 } 2416 }; 2417 2418 ExtractValueInst::ExtractValueInst(Value *Agg, 2419 ArrayRef<unsigned> Idxs, 2420 const Twine &NameStr, 2421 Instruction *InsertBefore) 2422 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2423 ExtractValue, Agg, InsertBefore) { 2424 init(Idxs, NameStr); 2425 } 2426 2427 ExtractValueInst::ExtractValueInst(Value *Agg, 2428 ArrayRef<unsigned> Idxs, 2429 const Twine &NameStr, 2430 BasicBlock *InsertAtEnd) 2431 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), 2432 ExtractValue, Agg, InsertAtEnd) { 2433 init(Idxs, NameStr); 2434 } 2435 2436 //===----------------------------------------------------------------------===// 2437 // InsertValueInst Class 2438 //===----------------------------------------------------------------------===// 2439 2440 /// This instruction inserts a struct field of array element 2441 /// value into an aggregate value. 2442 /// 2443 class InsertValueInst : public Instruction { 2444 SmallVector<unsigned, 4> Indices; 2445 2446 InsertValueInst(const InsertValueInst &IVI); 2447 2448 /// Constructors - Create a insertvalue instruction with a base aggregate 2449 /// value, a value to insert, and a list of indices. The first ctor can 2450 /// optionally insert before an existing instruction, the second appends 2451 /// the new instruction to the specified BasicBlock. 2452 inline InsertValueInst(Value *Agg, Value *Val, 2453 ArrayRef<unsigned> Idxs, 2454 const Twine &NameStr, 2455 Instruction *InsertBefore); 2456 inline InsertValueInst(Value *Agg, Value *Val, 2457 ArrayRef<unsigned> Idxs, 2458 const Twine &NameStr, BasicBlock *InsertAtEnd); 2459 2460 /// Constructors - These two constructors are convenience methods because one 2461 /// and two index insertvalue instructions are so common. 2462 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, 2463 const Twine &NameStr = "", 2464 Instruction *InsertBefore = nullptr); 2465 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, 2466 BasicBlock *InsertAtEnd); 2467 2468 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, 2469 const Twine &NameStr); 2470 2471 protected: 2472 // Note: Instruction needs to be a friend here to call cloneImpl. 2473 friend class Instruction; 2474 2475 InsertValueInst *cloneImpl() const; 2476 2477 public: 2478 // allocate space for exactly two operands 2479 void *operator new(size_t s) { 2480 return User::operator new(s, 2); 2481 } 2482 2483 static InsertValueInst *Create(Value *Agg, Value *Val, 2484 ArrayRef<unsigned> Idxs, 2485 const Twine &NameStr = "", 2486 Instruction *InsertBefore = nullptr) { 2487 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); 2488 } 2489 2490 static InsertValueInst *Create(Value *Agg, Value *Val, 2491 ArrayRef<unsigned> Idxs, 2492 const Twine &NameStr, 2493 BasicBlock *InsertAtEnd) { 2494 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); 2495 } 2496 2497 /// Transparently provide more efficient getOperand methods. 2498 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2499 2500 using idx_iterator = const unsigned*; 2501 2502 inline idx_iterator idx_begin() const { return Indices.begin(); } 2503 inline idx_iterator idx_end() const { return Indices.end(); } 2504 inline iterator_range<idx_iterator> indices() const { 2505 return make_range(idx_begin(), idx_end()); 2506 } 2507 2508 Value *getAggregateOperand() { 2509 return getOperand(0); 2510 } 2511 const Value *getAggregateOperand() const { 2512 return getOperand(0); 2513 } 2514 static unsigned getAggregateOperandIndex() { 2515 return 0U; // get index for modifying correct operand 2516 } 2517 2518 Value *getInsertedValueOperand() { 2519 return getOperand(1); 2520 } 2521 const Value *getInsertedValueOperand() const { 2522 return getOperand(1); 2523 } 2524 static unsigned getInsertedValueOperandIndex() { 2525 return 1U; // get index for modifying correct operand 2526 } 2527 2528 ArrayRef<unsigned> getIndices() const { 2529 return Indices; 2530 } 2531 2532 unsigned getNumIndices() const { 2533 return (unsigned)Indices.size(); 2534 } 2535 2536 bool hasIndices() const { 2537 return true; 2538 } 2539 2540 // Methods for support type inquiry through isa, cast, and dyn_cast: 2541 static bool classof(const Instruction *I) { 2542 return I->getOpcode() == Instruction::InsertValue; 2543 } 2544 static bool classof(const Value *V) { 2545 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2546 } 2547 }; 2548 2549 template <> 2550 struct OperandTraits<InsertValueInst> : 2551 public FixedNumOperandTraits<InsertValueInst, 2> { 2552 }; 2553 2554 InsertValueInst::InsertValueInst(Value *Agg, 2555 Value *Val, 2556 ArrayRef<unsigned> Idxs, 2557 const Twine &NameStr, 2558 Instruction *InsertBefore) 2559 : Instruction(Agg->getType(), InsertValue, 2560 OperandTraits<InsertValueInst>::op_begin(this), 2561 2, InsertBefore) { 2562 init(Agg, Val, Idxs, NameStr); 2563 } 2564 2565 InsertValueInst::InsertValueInst(Value *Agg, 2566 Value *Val, 2567 ArrayRef<unsigned> Idxs, 2568 const Twine &NameStr, 2569 BasicBlock *InsertAtEnd) 2570 : Instruction(Agg->getType(), InsertValue, 2571 OperandTraits<InsertValueInst>::op_begin(this), 2572 2, InsertAtEnd) { 2573 init(Agg, Val, Idxs, NameStr); 2574 } 2575 2576 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) 2577 2578 //===----------------------------------------------------------------------===// 2579 // PHINode Class 2580 //===----------------------------------------------------------------------===// 2581 2582 // PHINode - The PHINode class is used to represent the magical mystical PHI 2583 // node, that can not exist in nature, but can be synthesized in a computer 2584 // scientist's overactive imagination. 2585 // 2586 class PHINode : public Instruction { 2587 /// The number of operands actually allocated. NumOperands is 2588 /// the number actually in use. 2589 unsigned ReservedSpace; 2590 2591 PHINode(const PHINode &PN); 2592 2593 explicit PHINode(Type *Ty, unsigned NumReservedValues, 2594 const Twine &NameStr = "", 2595 Instruction *InsertBefore = nullptr) 2596 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), 2597 ReservedSpace(NumReservedValues) { 2598 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2599 setName(NameStr); 2600 allocHungoffUses(ReservedSpace); 2601 } 2602 2603 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, 2604 BasicBlock *InsertAtEnd) 2605 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), 2606 ReservedSpace(NumReservedValues) { 2607 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!"); 2608 setName(NameStr); 2609 allocHungoffUses(ReservedSpace); 2610 } 2611 2612 protected: 2613 // Note: Instruction needs to be a friend here to call cloneImpl. 2614 friend class Instruction; 2615 2616 PHINode *cloneImpl() const; 2617 2618 // allocHungoffUses - this is more complicated than the generic 2619 // User::allocHungoffUses, because we have to allocate Uses for the incoming 2620 // values and pointers to the incoming blocks, all in one allocation. 2621 void allocHungoffUses(unsigned N) { 2622 User::allocHungoffUses(N, /* IsPhi */ true); 2623 } 2624 2625 public: 2626 /// Constructors - NumReservedValues is a hint for the number of incoming 2627 /// edges that this phi node will have (use 0 if you really have no idea). 2628 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2629 const Twine &NameStr = "", 2630 Instruction *InsertBefore = nullptr) { 2631 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); 2632 } 2633 2634 static PHINode *Create(Type *Ty, unsigned NumReservedValues, 2635 const Twine &NameStr, BasicBlock *InsertAtEnd) { 2636 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); 2637 } 2638 2639 /// Provide fast operand accessors 2640 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2641 2642 // Block iterator interface. This provides access to the list of incoming 2643 // basic blocks, which parallels the list of incoming values. 2644 2645 using block_iterator = BasicBlock **; 2646 using const_block_iterator = BasicBlock * const *; 2647 2648 block_iterator block_begin() { 2649 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); 2650 } 2651 2652 const_block_iterator block_begin() const { 2653 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); 2654 } 2655 2656 block_iterator block_end() { 2657 return block_begin() + getNumOperands(); 2658 } 2659 2660 const_block_iterator block_end() const { 2661 return block_begin() + getNumOperands(); 2662 } 2663 2664 iterator_range<block_iterator> blocks() { 2665 return make_range(block_begin(), block_end()); 2666 } 2667 2668 iterator_range<const_block_iterator> blocks() const { 2669 return make_range(block_begin(), block_end()); 2670 } 2671 2672 op_range incoming_values() { return operands(); } 2673 2674 const_op_range incoming_values() const { return operands(); } 2675 2676 /// Return the number of incoming edges 2677 /// 2678 unsigned getNumIncomingValues() const { return getNumOperands(); } 2679 2680 /// Return incoming value number x 2681 /// 2682 Value *getIncomingValue(unsigned i) const { 2683 return getOperand(i); 2684 } 2685 void setIncomingValue(unsigned i, Value *V) { 2686 assert(V && "PHI node got a null value!"); 2687 assert(getType() == V->getType() && 2688 "All operands to PHI node must be the same type as the PHI node!"); 2689 setOperand(i, V); 2690 } 2691 2692 static unsigned getOperandNumForIncomingValue(unsigned i) { 2693 return i; 2694 } 2695 2696 static unsigned getIncomingValueNumForOperand(unsigned i) { 2697 return i; 2698 } 2699 2700 /// Return incoming basic block number @p i. 2701 /// 2702 BasicBlock *getIncomingBlock(unsigned i) const { 2703 return block_begin()[i]; 2704 } 2705 2706 /// Return incoming basic block corresponding 2707 /// to an operand of the PHI. 2708 /// 2709 BasicBlock *getIncomingBlock(const Use &U) const { 2710 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); 2711 return getIncomingBlock(unsigned(&U - op_begin())); 2712 } 2713 2714 /// Return incoming basic block corresponding 2715 /// to value use iterator. 2716 /// 2717 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { 2718 return getIncomingBlock(I.getUse()); 2719 } 2720 2721 void setIncomingBlock(unsigned i, BasicBlock *BB) { 2722 assert(BB && "PHI node got a null basic block!"); 2723 block_begin()[i] = BB; 2724 } 2725 2726 /// Replace every incoming basic block \p Old to basic block \p New. 2727 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { 2728 assert(New && Old && "PHI node got a null basic block!"); 2729 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2730 if (getIncomingBlock(Op) == Old) 2731 setIncomingBlock(Op, New); 2732 } 2733 2734 /// Add an incoming value to the end of the PHI list 2735 /// 2736 void addIncoming(Value *V, BasicBlock *BB) { 2737 if (getNumOperands() == ReservedSpace) 2738 growOperands(); // Get more space! 2739 // Initialize some new operands. 2740 setNumHungOffUseOperands(getNumOperands() + 1); 2741 setIncomingValue(getNumOperands() - 1, V); 2742 setIncomingBlock(getNumOperands() - 1, BB); 2743 } 2744 2745 /// Remove an incoming value. This is useful if a 2746 /// predecessor basic block is deleted. The value removed is returned. 2747 /// 2748 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty 2749 /// is true), the PHI node is destroyed and any uses of it are replaced with 2750 /// dummy values. The only time there should be zero incoming values to a PHI 2751 /// node is when the block is dead, so this strategy is sound. 2752 /// 2753 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); 2754 2755 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { 2756 int Idx = getBasicBlockIndex(BB); 2757 assert(Idx >= 0 && "Invalid basic block argument to remove!"); 2758 return removeIncomingValue(Idx, DeletePHIIfEmpty); 2759 } 2760 2761 /// Return the first index of the specified basic 2762 /// block in the value list for this PHI. Returns -1 if no instance. 2763 /// 2764 int getBasicBlockIndex(const BasicBlock *BB) const { 2765 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 2766 if (block_begin()[i] == BB) 2767 return i; 2768 return -1; 2769 } 2770 2771 Value *getIncomingValueForBlock(const BasicBlock *BB) const { 2772 int Idx = getBasicBlockIndex(BB); 2773 assert(Idx >= 0 && "Invalid basic block argument!"); 2774 return getIncomingValue(Idx); 2775 } 2776 2777 /// Set every incoming value(s) for block \p BB to \p V. 2778 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { 2779 assert(BB && "PHI node got a null basic block!"); 2780 bool Found = false; 2781 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) 2782 if (getIncomingBlock(Op) == BB) { 2783 Found = true; 2784 setIncomingValue(Op, V); 2785 } 2786 (void)Found; 2787 assert(Found && "Invalid basic block argument to set!"); 2788 } 2789 2790 /// If the specified PHI node always merges together the 2791 /// same value, return the value, otherwise return null. 2792 Value *hasConstantValue() const; 2793 2794 /// Whether the specified PHI node always merges 2795 /// together the same value, assuming undefs are equal to a unique 2796 /// non-undef value. 2797 bool hasConstantOrUndefValue() const; 2798 2799 /// If the PHI node is complete which means all of its parent's predecessors 2800 /// have incoming value in this PHI, return true, otherwise return false. 2801 bool isComplete() const { 2802 return llvm::all_of(predecessors(getParent()), 2803 [this](const BasicBlock *Pred) { 2804 return getBasicBlockIndex(Pred) >= 0; 2805 }); 2806 } 2807 2808 /// Methods for support type inquiry through isa, cast, and dyn_cast: 2809 static bool classof(const Instruction *I) { 2810 return I->getOpcode() == Instruction::PHI; 2811 } 2812 static bool classof(const Value *V) { 2813 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2814 } 2815 2816 private: 2817 void growOperands(); 2818 }; 2819 2820 template <> 2821 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { 2822 }; 2823 2824 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) 2825 2826 //===----------------------------------------------------------------------===// 2827 // LandingPadInst Class 2828 //===----------------------------------------------------------------------===// 2829 2830 //===--------------------------------------------------------------------------- 2831 /// The landingpad instruction holds all of the information 2832 /// necessary to generate correct exception handling. The landingpad instruction 2833 /// cannot be moved from the top of a landing pad block, which itself is 2834 /// accessible only from the 'unwind' edge of an invoke. This uses the 2835 /// SubclassData field in Value to store whether or not the landingpad is a 2836 /// cleanup. 2837 /// 2838 class LandingPadInst : public Instruction { 2839 using CleanupField = BoolBitfieldElementT<0>; 2840 2841 /// The number of operands actually allocated. NumOperands is 2842 /// the number actually in use. 2843 unsigned ReservedSpace; 2844 2845 LandingPadInst(const LandingPadInst &LP); 2846 2847 public: 2848 enum ClauseType { Catch, Filter }; 2849 2850 private: 2851 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2852 const Twine &NameStr, Instruction *InsertBefore); 2853 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, 2854 const Twine &NameStr, BasicBlock *InsertAtEnd); 2855 2856 // Allocate space for exactly zero operands. 2857 void *operator new(size_t s) { 2858 return User::operator new(s); 2859 } 2860 2861 void growOperands(unsigned Size); 2862 void init(unsigned NumReservedValues, const Twine &NameStr); 2863 2864 protected: 2865 // Note: Instruction needs to be a friend here to call cloneImpl. 2866 friend class Instruction; 2867 2868 LandingPadInst *cloneImpl() const; 2869 2870 public: 2871 /// Constructors - NumReservedClauses is a hint for the number of incoming 2872 /// clauses that this landingpad will have (use 0 if you really have no idea). 2873 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2874 const Twine &NameStr = "", 2875 Instruction *InsertBefore = nullptr); 2876 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, 2877 const Twine &NameStr, BasicBlock *InsertAtEnd); 2878 2879 /// Provide fast operand accessors 2880 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2881 2882 /// Return 'true' if this landingpad instruction is a 2883 /// cleanup. I.e., it should be run when unwinding even if its landing pad 2884 /// doesn't catch the exception. 2885 bool isCleanup() const { return getSubclassData<CleanupField>(); } 2886 2887 /// Indicate that this landingpad instruction is a cleanup. 2888 void setCleanup(bool V) { setSubclassData<CleanupField>(V); } 2889 2890 /// Add a catch or filter clause to the landing pad. 2891 void addClause(Constant *ClauseVal); 2892 2893 /// Get the value of the clause at index Idx. Use isCatch/isFilter to 2894 /// determine what type of clause this is. 2895 Constant *getClause(unsigned Idx) const { 2896 return cast<Constant>(getOperandList()[Idx]); 2897 } 2898 2899 /// Return 'true' if the clause and index Idx is a catch clause. 2900 bool isCatch(unsigned Idx) const { 2901 return !isa<ArrayType>(getOperandList()[Idx]->getType()); 2902 } 2903 2904 /// Return 'true' if the clause and index Idx is a filter clause. 2905 bool isFilter(unsigned Idx) const { 2906 return isa<ArrayType>(getOperandList()[Idx]->getType()); 2907 } 2908 2909 /// Get the number of clauses for this landing pad. 2910 unsigned getNumClauses() const { return getNumOperands(); } 2911 2912 /// Grow the size of the operand list to accommodate the new 2913 /// number of clauses. 2914 void reserveClauses(unsigned Size) { growOperands(Size); } 2915 2916 // Methods for support type inquiry through isa, cast, and dyn_cast: 2917 static bool classof(const Instruction *I) { 2918 return I->getOpcode() == Instruction::LandingPad; 2919 } 2920 static bool classof(const Value *V) { 2921 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2922 } 2923 }; 2924 2925 template <> 2926 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { 2927 }; 2928 2929 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) 2930 2931 //===----------------------------------------------------------------------===// 2932 // ReturnInst Class 2933 //===----------------------------------------------------------------------===// 2934 2935 //===--------------------------------------------------------------------------- 2936 /// Return a value (possibly void), from a function. Execution 2937 /// does not continue in this function any longer. 2938 /// 2939 class ReturnInst : public Instruction { 2940 ReturnInst(const ReturnInst &RI); 2941 2942 private: 2943 // ReturnInst constructors: 2944 // ReturnInst() - 'ret void' instruction 2945 // ReturnInst( null) - 'ret void' instruction 2946 // ReturnInst(Value* X) - 'ret X' instruction 2947 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I 2948 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I 2949 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B 2950 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B 2951 // 2952 // NOTE: If the Value* passed is of type void then the constructor behaves as 2953 // if it was passed NULL. 2954 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, 2955 Instruction *InsertBefore = nullptr); 2956 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); 2957 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); 2958 2959 protected: 2960 // Note: Instruction needs to be a friend here to call cloneImpl. 2961 friend class Instruction; 2962 2963 ReturnInst *cloneImpl() const; 2964 2965 public: 2966 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, 2967 Instruction *InsertBefore = nullptr) { 2968 return new(!!retVal) ReturnInst(C, retVal, InsertBefore); 2969 } 2970 2971 static ReturnInst* Create(LLVMContext &C, Value *retVal, 2972 BasicBlock *InsertAtEnd) { 2973 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); 2974 } 2975 2976 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { 2977 return new(0) ReturnInst(C, InsertAtEnd); 2978 } 2979 2980 /// Provide fast operand accessors 2981 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 2982 2983 /// Convenience accessor. Returns null if there is no return value. 2984 Value *getReturnValue() const { 2985 return getNumOperands() != 0 ? getOperand(0) : nullptr; 2986 } 2987 2988 unsigned getNumSuccessors() const { return 0; } 2989 2990 // Methods for support type inquiry through isa, cast, and dyn_cast: 2991 static bool classof(const Instruction *I) { 2992 return (I->getOpcode() == Instruction::Ret); 2993 } 2994 static bool classof(const Value *V) { 2995 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 2996 } 2997 2998 private: 2999 BasicBlock *getSuccessor(unsigned idx) const { 3000 llvm_unreachable("ReturnInst has no successors!"); 3001 } 3002 3003 void setSuccessor(unsigned idx, BasicBlock *B) { 3004 llvm_unreachable("ReturnInst has no successors!"); 3005 } 3006 }; 3007 3008 template <> 3009 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { 3010 }; 3011 3012 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) 3013 3014 //===----------------------------------------------------------------------===// 3015 // BranchInst Class 3016 //===----------------------------------------------------------------------===// 3017 3018 //===--------------------------------------------------------------------------- 3019 /// Conditional or Unconditional Branch instruction. 3020 /// 3021 class BranchInst : public Instruction { 3022 /// Ops list - Branches are strange. The operands are ordered: 3023 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because 3024 /// they don't have to check for cond/uncond branchness. These are mostly 3025 /// accessed relative from op_end(). 3026 BranchInst(const BranchInst &BI); 3027 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): 3028 // BranchInst(BB *B) - 'br B' 3029 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' 3030 // BranchInst(BB* B, Inst *I) - 'br B' insert before I 3031 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I 3032 // BranchInst(BB* B, BB *I) - 'br B' insert at end 3033 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end 3034 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); 3035 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3036 Instruction *InsertBefore = nullptr); 3037 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); 3038 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, 3039 BasicBlock *InsertAtEnd); 3040 3041 void AssertOK(); 3042 3043 protected: 3044 // Note: Instruction needs to be a friend here to call cloneImpl. 3045 friend class Instruction; 3046 3047 BranchInst *cloneImpl() const; 3048 3049 public: 3050 /// Iterator type that casts an operand to a basic block. 3051 /// 3052 /// This only makes sense because the successors are stored as adjacent 3053 /// operands for branch instructions. 3054 struct succ_op_iterator 3055 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3056 std::random_access_iterator_tag, BasicBlock *, 3057 ptrdiff_t, BasicBlock *, BasicBlock *> { 3058 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3059 3060 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3061 BasicBlock *operator->() const { return operator*(); } 3062 }; 3063 3064 /// The const version of `succ_op_iterator`. 3065 struct const_succ_op_iterator 3066 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3067 std::random_access_iterator_tag, 3068 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3069 const BasicBlock *> { 3070 explicit const_succ_op_iterator(const_value_op_iterator I) 3071 : iterator_adaptor_base(I) {} 3072 3073 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3074 const BasicBlock *operator->() const { return operator*(); } 3075 }; 3076 3077 static BranchInst *Create(BasicBlock *IfTrue, 3078 Instruction *InsertBefore = nullptr) { 3079 return new(1) BranchInst(IfTrue, InsertBefore); 3080 } 3081 3082 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3083 Value *Cond, Instruction *InsertBefore = nullptr) { 3084 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); 3085 } 3086 3087 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { 3088 return new(1) BranchInst(IfTrue, InsertAtEnd); 3089 } 3090 3091 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, 3092 Value *Cond, BasicBlock *InsertAtEnd) { 3093 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); 3094 } 3095 3096 /// Transparently provide more efficient getOperand methods. 3097 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3098 3099 bool isUnconditional() const { return getNumOperands() == 1; } 3100 bool isConditional() const { return getNumOperands() == 3; } 3101 3102 Value *getCondition() const { 3103 assert(isConditional() && "Cannot get condition of an uncond branch!"); 3104 return Op<-3>(); 3105 } 3106 3107 void setCondition(Value *V) { 3108 assert(isConditional() && "Cannot set condition of unconditional branch!"); 3109 Op<-3>() = V; 3110 } 3111 3112 unsigned getNumSuccessors() const { return 1+isConditional(); } 3113 3114 BasicBlock *getSuccessor(unsigned i) const { 3115 assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); 3116 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); 3117 } 3118 3119 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3120 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); 3121 *(&Op<-1>() - idx) = NewSucc; 3122 } 3123 3124 /// Swap the successors of this branch instruction. 3125 /// 3126 /// Swaps the successors of the branch instruction. This also swaps any 3127 /// branch weight metadata associated with the instruction so that it 3128 /// continues to map correctly to each operand. 3129 void swapSuccessors(); 3130 3131 iterator_range<succ_op_iterator> successors() { 3132 return make_range( 3133 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), 3134 succ_op_iterator(value_op_end())); 3135 } 3136 3137 iterator_range<const_succ_op_iterator> successors() const { 3138 return make_range(const_succ_op_iterator( 3139 std::next(value_op_begin(), isConditional() ? 1 : 0)), 3140 const_succ_op_iterator(value_op_end())); 3141 } 3142 3143 // Methods for support type inquiry through isa, cast, and dyn_cast: 3144 static bool classof(const Instruction *I) { 3145 return (I->getOpcode() == Instruction::Br); 3146 } 3147 static bool classof(const Value *V) { 3148 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3149 } 3150 }; 3151 3152 template <> 3153 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { 3154 }; 3155 3156 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) 3157 3158 //===----------------------------------------------------------------------===// 3159 // SwitchInst Class 3160 //===----------------------------------------------------------------------===// 3161 3162 //===--------------------------------------------------------------------------- 3163 /// Multiway switch 3164 /// 3165 class SwitchInst : public Instruction { 3166 unsigned ReservedSpace; 3167 3168 // Operand[0] = Value to switch on 3169 // Operand[1] = Default basic block destination 3170 // Operand[2n ] = Value to match 3171 // Operand[2n+1] = BasicBlock to go to on match 3172 SwitchInst(const SwitchInst &SI); 3173 3174 /// Create a new switch instruction, specifying a value to switch on and a 3175 /// default destination. The number of additional cases can be specified here 3176 /// to make memory allocation more efficient. This constructor can also 3177 /// auto-insert before another instruction. 3178 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3179 Instruction *InsertBefore); 3180 3181 /// Create a new switch instruction, specifying a value to switch on and a 3182 /// default destination. The number of additional cases can be specified here 3183 /// to make memory allocation more efficient. This constructor also 3184 /// auto-inserts at the end of the specified BasicBlock. 3185 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, 3186 BasicBlock *InsertAtEnd); 3187 3188 // allocate space for exactly zero operands 3189 void *operator new(size_t s) { 3190 return User::operator new(s); 3191 } 3192 3193 void init(Value *Value, BasicBlock *Default, unsigned NumReserved); 3194 void growOperands(); 3195 3196 protected: 3197 // Note: Instruction needs to be a friend here to call cloneImpl. 3198 friend class Instruction; 3199 3200 SwitchInst *cloneImpl() const; 3201 3202 public: 3203 // -2 3204 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); 3205 3206 template <typename CaseHandleT> class CaseIteratorImpl; 3207 3208 /// A handle to a particular switch case. It exposes a convenient interface 3209 /// to both the case value and the successor block. 3210 /// 3211 /// We define this as a template and instantiate it to form both a const and 3212 /// non-const handle. 3213 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> 3214 class CaseHandleImpl { 3215 // Directly befriend both const and non-const iterators. 3216 friend class SwitchInst::CaseIteratorImpl< 3217 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; 3218 3219 protected: 3220 // Expose the switch type we're parameterized with to the iterator. 3221 using SwitchInstType = SwitchInstT; 3222 3223 SwitchInstT *SI; 3224 ptrdiff_t Index; 3225 3226 CaseHandleImpl() = default; 3227 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} 3228 3229 public: 3230 /// Resolves case value for current case. 3231 ConstantIntT *getCaseValue() const { 3232 assert((unsigned)Index < SI->getNumCases() && 3233 "Index out the number of cases."); 3234 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); 3235 } 3236 3237 /// Resolves successor for current case. 3238 BasicBlockT *getCaseSuccessor() const { 3239 assert(((unsigned)Index < SI->getNumCases() || 3240 (unsigned)Index == DefaultPseudoIndex) && 3241 "Index out the number of cases."); 3242 return SI->getSuccessor(getSuccessorIndex()); 3243 } 3244 3245 /// Returns number of current case. 3246 unsigned getCaseIndex() const { return Index; } 3247 3248 /// Returns successor index for current case successor. 3249 unsigned getSuccessorIndex() const { 3250 assert(((unsigned)Index == DefaultPseudoIndex || 3251 (unsigned)Index < SI->getNumCases()) && 3252 "Index out the number of cases."); 3253 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; 3254 } 3255 3256 bool operator==(const CaseHandleImpl &RHS) const { 3257 assert(SI == RHS.SI && "Incompatible operators."); 3258 return Index == RHS.Index; 3259 } 3260 }; 3261 3262 using ConstCaseHandle = 3263 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; 3264 3265 class CaseHandle 3266 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { 3267 friend class SwitchInst::CaseIteratorImpl<CaseHandle>; 3268 3269 public: 3270 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} 3271 3272 /// Sets the new value for current case. 3273 void setValue(ConstantInt *V) { 3274 assert((unsigned)Index < SI->getNumCases() && 3275 "Index out the number of cases."); 3276 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); 3277 } 3278 3279 /// Sets the new successor for current case. 3280 void setSuccessor(BasicBlock *S) { 3281 SI->setSuccessor(getSuccessorIndex(), S); 3282 } 3283 }; 3284 3285 template <typename CaseHandleT> 3286 class CaseIteratorImpl 3287 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, 3288 std::random_access_iterator_tag, 3289 CaseHandleT> { 3290 using SwitchInstT = typename CaseHandleT::SwitchInstType; 3291 3292 CaseHandleT Case; 3293 3294 public: 3295 /// Default constructed iterator is in an invalid state until assigned to 3296 /// a case for a particular switch. 3297 CaseIteratorImpl() = default; 3298 3299 /// Initializes case iterator for given SwitchInst and for given 3300 /// case number. 3301 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} 3302 3303 /// Initializes case iterator for given SwitchInst and for given 3304 /// successor index. 3305 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, 3306 unsigned SuccessorIndex) { 3307 assert(SuccessorIndex < SI->getNumSuccessors() && 3308 "Successor index # out of range!"); 3309 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) 3310 : CaseIteratorImpl(SI, DefaultPseudoIndex); 3311 } 3312 3313 /// Support converting to the const variant. This will be a no-op for const 3314 /// variant. 3315 operator CaseIteratorImpl<ConstCaseHandle>() const { 3316 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); 3317 } 3318 3319 CaseIteratorImpl &operator+=(ptrdiff_t N) { 3320 // Check index correctness after addition. 3321 // Note: Index == getNumCases() means end(). 3322 assert(Case.Index + N >= 0 && 3323 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && 3324 "Case.Index out the number of cases."); 3325 Case.Index += N; 3326 return *this; 3327 } 3328 CaseIteratorImpl &operator-=(ptrdiff_t N) { 3329 // Check index correctness after subtraction. 3330 // Note: Case.Index == getNumCases() means end(). 3331 assert(Case.Index - N >= 0 && 3332 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && 3333 "Case.Index out the number of cases."); 3334 Case.Index -= N; 3335 return *this; 3336 } 3337 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { 3338 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3339 return Case.Index - RHS.Case.Index; 3340 } 3341 bool operator==(const CaseIteratorImpl &RHS) const { 3342 return Case == RHS.Case; 3343 } 3344 bool operator<(const CaseIteratorImpl &RHS) const { 3345 assert(Case.SI == RHS.Case.SI && "Incompatible operators."); 3346 return Case.Index < RHS.Case.Index; 3347 } 3348 CaseHandleT &operator*() { return Case; } 3349 const CaseHandleT &operator*() const { return Case; } 3350 }; 3351 3352 using CaseIt = CaseIteratorImpl<CaseHandle>; 3353 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; 3354 3355 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3356 unsigned NumCases, 3357 Instruction *InsertBefore = nullptr) { 3358 return new SwitchInst(Value, Default, NumCases, InsertBefore); 3359 } 3360 3361 static SwitchInst *Create(Value *Value, BasicBlock *Default, 3362 unsigned NumCases, BasicBlock *InsertAtEnd) { 3363 return new SwitchInst(Value, Default, NumCases, InsertAtEnd); 3364 } 3365 3366 /// Provide fast operand accessors 3367 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3368 3369 // Accessor Methods for Switch stmt 3370 Value *getCondition() const { return getOperand(0); } 3371 void setCondition(Value *V) { setOperand(0, V); } 3372 3373 BasicBlock *getDefaultDest() const { 3374 return cast<BasicBlock>(getOperand(1)); 3375 } 3376 3377 void setDefaultDest(BasicBlock *DefaultCase) { 3378 setOperand(1, reinterpret_cast<Value*>(DefaultCase)); 3379 } 3380 3381 /// Return the number of 'cases' in this switch instruction, excluding the 3382 /// default case. 3383 unsigned getNumCases() const { 3384 return getNumOperands()/2 - 1; 3385 } 3386 3387 /// Returns a read/write iterator that points to the first case in the 3388 /// SwitchInst. 3389 CaseIt case_begin() { 3390 return CaseIt(this, 0); 3391 } 3392 3393 /// Returns a read-only iterator that points to the first case in the 3394 /// SwitchInst. 3395 ConstCaseIt case_begin() const { 3396 return ConstCaseIt(this, 0); 3397 } 3398 3399 /// Returns a read/write iterator that points one past the last in the 3400 /// SwitchInst. 3401 CaseIt case_end() { 3402 return CaseIt(this, getNumCases()); 3403 } 3404 3405 /// Returns a read-only iterator that points one past the last in the 3406 /// SwitchInst. 3407 ConstCaseIt case_end() const { 3408 return ConstCaseIt(this, getNumCases()); 3409 } 3410 3411 /// Iteration adapter for range-for loops. 3412 iterator_range<CaseIt> cases() { 3413 return make_range(case_begin(), case_end()); 3414 } 3415 3416 /// Constant iteration adapter for range-for loops. 3417 iterator_range<ConstCaseIt> cases() const { 3418 return make_range(case_begin(), case_end()); 3419 } 3420 3421 /// Returns an iterator that points to the default case. 3422 /// Note: this iterator allows to resolve successor only. Attempt 3423 /// to resolve case value causes an assertion. 3424 /// Also note, that increment and decrement also causes an assertion and 3425 /// makes iterator invalid. 3426 CaseIt case_default() { 3427 return CaseIt(this, DefaultPseudoIndex); 3428 } 3429 ConstCaseIt case_default() const { 3430 return ConstCaseIt(this, DefaultPseudoIndex); 3431 } 3432 3433 /// Search all of the case values for the specified constant. If it is 3434 /// explicitly handled, return the case iterator of it, otherwise return 3435 /// default case iterator to indicate that it is handled by the default 3436 /// handler. 3437 CaseIt findCaseValue(const ConstantInt *C) { 3438 CaseIt I = llvm::find_if( 3439 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); 3440 if (I != case_end()) 3441 return I; 3442 3443 return case_default(); 3444 } 3445 ConstCaseIt findCaseValue(const ConstantInt *C) const { 3446 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { 3447 return Case.getCaseValue() == C; 3448 }); 3449 if (I != case_end()) 3450 return I; 3451 3452 return case_default(); 3453 } 3454 3455 /// Finds the unique case value for a given successor. Returns null if the 3456 /// successor is not found, not unique, or is the default case. 3457 ConstantInt *findCaseDest(BasicBlock *BB) { 3458 if (BB == getDefaultDest()) 3459 return nullptr; 3460 3461 ConstantInt *CI = nullptr; 3462 for (auto Case : cases()) { 3463 if (Case.getCaseSuccessor() != BB) 3464 continue; 3465 3466 if (CI) 3467 return nullptr; // Multiple cases lead to BB. 3468 3469 CI = Case.getCaseValue(); 3470 } 3471 3472 return CI; 3473 } 3474 3475 /// Add an entry to the switch instruction. 3476 /// Note: 3477 /// This action invalidates case_end(). Old case_end() iterator will 3478 /// point to the added case. 3479 void addCase(ConstantInt *OnVal, BasicBlock *Dest); 3480 3481 /// This method removes the specified case and its successor from the switch 3482 /// instruction. Note that this operation may reorder the remaining cases at 3483 /// index idx and above. 3484 /// Note: 3485 /// This action invalidates iterators for all cases following the one removed, 3486 /// including the case_end() iterator. It returns an iterator for the next 3487 /// case. 3488 CaseIt removeCase(CaseIt I); 3489 3490 unsigned getNumSuccessors() const { return getNumOperands()/2; } 3491 BasicBlock *getSuccessor(unsigned idx) const { 3492 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); 3493 return cast<BasicBlock>(getOperand(idx*2+1)); 3494 } 3495 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 3496 assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); 3497 setOperand(idx * 2 + 1, NewSucc); 3498 } 3499 3500 // Methods for support type inquiry through isa, cast, and dyn_cast: 3501 static bool classof(const Instruction *I) { 3502 return I->getOpcode() == Instruction::Switch; 3503 } 3504 static bool classof(const Value *V) { 3505 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3506 } 3507 }; 3508 3509 /// A wrapper class to simplify modification of SwitchInst cases along with 3510 /// their prof branch_weights metadata. 3511 class SwitchInstProfUpdateWrapper { 3512 SwitchInst &SI; 3513 Optional<SmallVector<uint32_t, 8> > Weights = None; 3514 bool Changed = false; 3515 3516 protected: 3517 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); 3518 3519 MDNode *buildProfBranchWeightsMD(); 3520 3521 void init(); 3522 3523 public: 3524 using CaseWeightOpt = Optional<uint32_t>; 3525 SwitchInst *operator->() { return &SI; } 3526 SwitchInst &operator*() { return SI; } 3527 operator SwitchInst *() { return &SI; } 3528 3529 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } 3530 3531 ~SwitchInstProfUpdateWrapper() { 3532 if (Changed) 3533 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); 3534 } 3535 3536 /// Delegate the call to the underlying SwitchInst::removeCase() and remove 3537 /// correspondent branch weight. 3538 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); 3539 3540 /// Delegate the call to the underlying SwitchInst::addCase() and set the 3541 /// specified branch weight for the added case. 3542 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); 3543 3544 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark 3545 /// this object to not touch the underlying SwitchInst in destructor. 3546 SymbolTableList<Instruction>::iterator eraseFromParent(); 3547 3548 void setSuccessorWeight(unsigned idx, CaseWeightOpt W); 3549 CaseWeightOpt getSuccessorWeight(unsigned idx); 3550 3551 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); 3552 }; 3553 3554 template <> 3555 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { 3556 }; 3557 3558 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) 3559 3560 //===----------------------------------------------------------------------===// 3561 // IndirectBrInst Class 3562 //===----------------------------------------------------------------------===// 3563 3564 //===--------------------------------------------------------------------------- 3565 /// Indirect Branch Instruction. 3566 /// 3567 class IndirectBrInst : public Instruction { 3568 unsigned ReservedSpace; 3569 3570 // Operand[0] = Address to jump to 3571 // Operand[n+1] = n-th destination 3572 IndirectBrInst(const IndirectBrInst &IBI); 3573 3574 /// Create a new indirectbr instruction, specifying an 3575 /// Address to jump to. The number of expected destinations can be specified 3576 /// here to make memory allocation more efficient. This constructor can also 3577 /// autoinsert before another instruction. 3578 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); 3579 3580 /// Create a new indirectbr instruction, specifying an 3581 /// Address to jump to. The number of expected destinations can be specified 3582 /// here to make memory allocation more efficient. This constructor also 3583 /// autoinserts at the end of the specified BasicBlock. 3584 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); 3585 3586 // allocate space for exactly zero operands 3587 void *operator new(size_t s) { 3588 return User::operator new(s); 3589 } 3590 3591 void init(Value *Address, unsigned NumDests); 3592 void growOperands(); 3593 3594 protected: 3595 // Note: Instruction needs to be a friend here to call cloneImpl. 3596 friend class Instruction; 3597 3598 IndirectBrInst *cloneImpl() const; 3599 3600 public: 3601 /// Iterator type that casts an operand to a basic block. 3602 /// 3603 /// This only makes sense because the successors are stored as adjacent 3604 /// operands for indirectbr instructions. 3605 struct succ_op_iterator 3606 : iterator_adaptor_base<succ_op_iterator, value_op_iterator, 3607 std::random_access_iterator_tag, BasicBlock *, 3608 ptrdiff_t, BasicBlock *, BasicBlock *> { 3609 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} 3610 3611 BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3612 BasicBlock *operator->() const { return operator*(); } 3613 }; 3614 3615 /// The const version of `succ_op_iterator`. 3616 struct const_succ_op_iterator 3617 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, 3618 std::random_access_iterator_tag, 3619 const BasicBlock *, ptrdiff_t, const BasicBlock *, 3620 const BasicBlock *> { 3621 explicit const_succ_op_iterator(const_value_op_iterator I) 3622 : iterator_adaptor_base(I) {} 3623 3624 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } 3625 const BasicBlock *operator->() const { return operator*(); } 3626 }; 3627 3628 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3629 Instruction *InsertBefore = nullptr) { 3630 return new IndirectBrInst(Address, NumDests, InsertBefore); 3631 } 3632 3633 static IndirectBrInst *Create(Value *Address, unsigned NumDests, 3634 BasicBlock *InsertAtEnd) { 3635 return new IndirectBrInst(Address, NumDests, InsertAtEnd); 3636 } 3637 3638 /// Provide fast operand accessors. 3639 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 3640 3641 // Accessor Methods for IndirectBrInst instruction. 3642 Value *getAddress() { return getOperand(0); } 3643 const Value *getAddress() const { return getOperand(0); } 3644 void setAddress(Value *V) { setOperand(0, V); } 3645 3646 /// return the number of possible destinations in this 3647 /// indirectbr instruction. 3648 unsigned getNumDestinations() const { return getNumOperands()-1; } 3649 3650 /// Return the specified destination. 3651 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } 3652 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } 3653 3654 /// Add a destination. 3655 /// 3656 void addDestination(BasicBlock *Dest); 3657 3658 /// This method removes the specified successor from the 3659 /// indirectbr instruction. 3660 void removeDestination(unsigned i); 3661 3662 unsigned getNumSuccessors() const { return getNumOperands()-1; } 3663 BasicBlock *getSuccessor(unsigned i) const { 3664 return cast<BasicBlock>(getOperand(i+1)); 3665 } 3666 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3667 setOperand(i + 1, NewSucc); 3668 } 3669 3670 iterator_range<succ_op_iterator> successors() { 3671 return make_range(succ_op_iterator(std::next(value_op_begin())), 3672 succ_op_iterator(value_op_end())); 3673 } 3674 3675 iterator_range<const_succ_op_iterator> successors() const { 3676 return make_range(const_succ_op_iterator(std::next(value_op_begin())), 3677 const_succ_op_iterator(value_op_end())); 3678 } 3679 3680 // Methods for support type inquiry through isa, cast, and dyn_cast: 3681 static bool classof(const Instruction *I) { 3682 return I->getOpcode() == Instruction::IndirectBr; 3683 } 3684 static bool classof(const Value *V) { 3685 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3686 } 3687 }; 3688 3689 template <> 3690 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { 3691 }; 3692 3693 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) 3694 3695 //===----------------------------------------------------------------------===// 3696 // InvokeInst Class 3697 //===----------------------------------------------------------------------===// 3698 3699 /// Invoke instruction. The SubclassData field is used to hold the 3700 /// calling convention of the call. 3701 /// 3702 class InvokeInst : public CallBase { 3703 /// The number of operands for this call beyond the called function, 3704 /// arguments, and operand bundles. 3705 static constexpr int NumExtraOperands = 2; 3706 3707 /// The index from the end of the operand array to the normal destination. 3708 static constexpr int NormalDestOpEndIdx = -3; 3709 3710 /// The index from the end of the operand array to the unwind destination. 3711 static constexpr int UnwindDestOpEndIdx = -2; 3712 3713 InvokeInst(const InvokeInst &BI); 3714 3715 /// Construct an InvokeInst given a range of arguments. 3716 /// 3717 /// Construct an InvokeInst from a range of arguments 3718 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3719 BasicBlock *IfException, ArrayRef<Value *> Args, 3720 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3721 const Twine &NameStr, Instruction *InsertBefore); 3722 3723 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3724 BasicBlock *IfException, ArrayRef<Value *> Args, 3725 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3726 const Twine &NameStr, BasicBlock *InsertAtEnd); 3727 3728 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3729 BasicBlock *IfException, ArrayRef<Value *> Args, 3730 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3731 3732 /// Compute the number of operands to allocate. 3733 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { 3734 // We need one operand for the called function, plus our extra operands and 3735 // the input operand counts provided. 3736 return 1 + NumExtraOperands + NumArgs + NumBundleInputs; 3737 } 3738 3739 protected: 3740 // Note: Instruction needs to be a friend here to call cloneImpl. 3741 friend class Instruction; 3742 3743 InvokeInst *cloneImpl() const; 3744 3745 public: 3746 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3747 BasicBlock *IfException, ArrayRef<Value *> Args, 3748 const Twine &NameStr, 3749 Instruction *InsertBefore = nullptr) { 3750 int NumOperands = ComputeNumOperands(Args.size()); 3751 return new (NumOperands) 3752 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3753 NameStr, InsertBefore); 3754 } 3755 3756 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3757 BasicBlock *IfException, ArrayRef<Value *> Args, 3758 ArrayRef<OperandBundleDef> Bundles = None, 3759 const Twine &NameStr = "", 3760 Instruction *InsertBefore = nullptr) { 3761 int NumOperands = 3762 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3763 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3764 3765 return new (NumOperands, DescriptorBytes) 3766 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3767 NameStr, InsertBefore); 3768 } 3769 3770 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3771 BasicBlock *IfException, ArrayRef<Value *> Args, 3772 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3773 int NumOperands = ComputeNumOperands(Args.size()); 3774 return new (NumOperands) 3775 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, 3776 NameStr, InsertAtEnd); 3777 } 3778 3779 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3780 BasicBlock *IfException, ArrayRef<Value *> Args, 3781 ArrayRef<OperandBundleDef> Bundles, 3782 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3783 int NumOperands = 3784 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); 3785 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3786 3787 return new (NumOperands, DescriptorBytes) 3788 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, 3789 NameStr, InsertAtEnd); 3790 } 3791 3792 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3793 BasicBlock *IfException, ArrayRef<Value *> Args, 3794 const Twine &NameStr, 3795 Instruction *InsertBefore = nullptr) { 3796 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3797 IfException, Args, None, NameStr, InsertBefore); 3798 } 3799 3800 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3801 BasicBlock *IfException, ArrayRef<Value *> Args, 3802 ArrayRef<OperandBundleDef> Bundles = None, 3803 const Twine &NameStr = "", 3804 Instruction *InsertBefore = nullptr) { 3805 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3806 IfException, Args, Bundles, NameStr, InsertBefore); 3807 } 3808 3809 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3810 BasicBlock *IfException, ArrayRef<Value *> Args, 3811 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3812 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3813 IfException, Args, NameStr, InsertAtEnd); 3814 } 3815 3816 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, 3817 BasicBlock *IfException, ArrayRef<Value *> Args, 3818 ArrayRef<OperandBundleDef> Bundles, 3819 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3820 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, 3821 IfException, Args, Bundles, NameStr, InsertAtEnd); 3822 } 3823 3824 /// Create a clone of \p II with a different set of operand bundles and 3825 /// insert it before \p InsertPt. 3826 /// 3827 /// The returned invoke instruction is identical to \p II in every way except 3828 /// that the operand bundles for the new instruction are set to the operand 3829 /// bundles in \p Bundles. 3830 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, 3831 Instruction *InsertPt = nullptr); 3832 3833 // get*Dest - Return the destination basic blocks... 3834 BasicBlock *getNormalDest() const { 3835 return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); 3836 } 3837 BasicBlock *getUnwindDest() const { 3838 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); 3839 } 3840 void setNormalDest(BasicBlock *B) { 3841 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3842 } 3843 void setUnwindDest(BasicBlock *B) { 3844 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); 3845 } 3846 3847 /// Get the landingpad instruction from the landing pad 3848 /// block (the unwind destination). 3849 LandingPadInst *getLandingPadInst() const; 3850 3851 BasicBlock *getSuccessor(unsigned i) const { 3852 assert(i < 2 && "Successor # out of range for invoke!"); 3853 return i == 0 ? getNormalDest() : getUnwindDest(); 3854 } 3855 3856 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 3857 assert(i < 2 && "Successor # out of range for invoke!"); 3858 if (i == 0) 3859 setNormalDest(NewSucc); 3860 else 3861 setUnwindDest(NewSucc); 3862 } 3863 3864 unsigned getNumSuccessors() const { return 2; } 3865 3866 // Methods for support type inquiry through isa, cast, and dyn_cast: 3867 static bool classof(const Instruction *I) { 3868 return (I->getOpcode() == Instruction::Invoke); 3869 } 3870 static bool classof(const Value *V) { 3871 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 3872 } 3873 3874 private: 3875 // Shadow Instruction::setInstructionSubclassData with a private forwarding 3876 // method so that subclasses cannot accidentally use it. 3877 template <typename Bitfield> 3878 void setSubclassData(typename Bitfield::Type Value) { 3879 Instruction::setSubclassData<Bitfield>(Value); 3880 } 3881 }; 3882 3883 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3884 BasicBlock *IfException, ArrayRef<Value *> Args, 3885 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3886 const Twine &NameStr, Instruction *InsertBefore) 3887 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3888 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3889 InsertBefore) { 3890 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3891 } 3892 3893 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, 3894 BasicBlock *IfException, ArrayRef<Value *> Args, 3895 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3896 const Twine &NameStr, BasicBlock *InsertAtEnd) 3897 : CallBase(Ty->getReturnType(), Instruction::Invoke, 3898 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 3899 InsertAtEnd) { 3900 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); 3901 } 3902 3903 //===----------------------------------------------------------------------===// 3904 // CallBrInst Class 3905 //===----------------------------------------------------------------------===// 3906 3907 /// CallBr instruction, tracking function calls that may not return control but 3908 /// instead transfer it to a third location. The SubclassData field is used to 3909 /// hold the calling convention of the call. 3910 /// 3911 class CallBrInst : public CallBase { 3912 3913 unsigned NumIndirectDests; 3914 3915 CallBrInst(const CallBrInst &BI); 3916 3917 /// Construct a CallBrInst given a range of arguments. 3918 /// 3919 /// Construct a CallBrInst from a range of arguments 3920 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3921 ArrayRef<BasicBlock *> IndirectDests, 3922 ArrayRef<Value *> Args, 3923 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3924 const Twine &NameStr, Instruction *InsertBefore); 3925 3926 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 3927 ArrayRef<BasicBlock *> IndirectDests, 3928 ArrayRef<Value *> Args, 3929 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 3930 const Twine &NameStr, BasicBlock *InsertAtEnd); 3931 3932 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, 3933 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, 3934 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); 3935 3936 /// Should the Indirect Destinations change, scan + update the Arg list. 3937 void updateArgBlockAddresses(unsigned i, BasicBlock *B); 3938 3939 /// Compute the number of operands to allocate. 3940 static int ComputeNumOperands(int NumArgs, int NumIndirectDests, 3941 int NumBundleInputs = 0) { 3942 // We need one operand for the called function, plus our extra operands and 3943 // the input operand counts provided. 3944 return 2 + NumIndirectDests + NumArgs + NumBundleInputs; 3945 } 3946 3947 protected: 3948 // Note: Instruction needs to be a friend here to call cloneImpl. 3949 friend class Instruction; 3950 3951 CallBrInst *cloneImpl() const; 3952 3953 public: 3954 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3955 BasicBlock *DefaultDest, 3956 ArrayRef<BasicBlock *> IndirectDests, 3957 ArrayRef<Value *> Args, const Twine &NameStr, 3958 Instruction *InsertBefore = nullptr) { 3959 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3960 return new (NumOperands) 3961 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 3962 NumOperands, NameStr, InsertBefore); 3963 } 3964 3965 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3966 BasicBlock *DefaultDest, 3967 ArrayRef<BasicBlock *> IndirectDests, 3968 ArrayRef<Value *> Args, 3969 ArrayRef<OperandBundleDef> Bundles = None, 3970 const Twine &NameStr = "", 3971 Instruction *InsertBefore = nullptr) { 3972 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 3973 CountBundleInputs(Bundles)); 3974 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 3975 3976 return new (NumOperands, DescriptorBytes) 3977 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 3978 NumOperands, NameStr, InsertBefore); 3979 } 3980 3981 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3982 BasicBlock *DefaultDest, 3983 ArrayRef<BasicBlock *> IndirectDests, 3984 ArrayRef<Value *> Args, const Twine &NameStr, 3985 BasicBlock *InsertAtEnd) { 3986 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); 3987 return new (NumOperands) 3988 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, 3989 NumOperands, NameStr, InsertAtEnd); 3990 } 3991 3992 static CallBrInst *Create(FunctionType *Ty, Value *Func, 3993 BasicBlock *DefaultDest, 3994 ArrayRef<BasicBlock *> IndirectDests, 3995 ArrayRef<Value *> Args, 3996 ArrayRef<OperandBundleDef> Bundles, 3997 const Twine &NameStr, BasicBlock *InsertAtEnd) { 3998 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), 3999 CountBundleInputs(Bundles)); 4000 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); 4001 4002 return new (NumOperands, DescriptorBytes) 4003 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, 4004 NumOperands, NameStr, InsertAtEnd); 4005 } 4006 4007 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4008 ArrayRef<BasicBlock *> IndirectDests, 4009 ArrayRef<Value *> Args, const Twine &NameStr, 4010 Instruction *InsertBefore = nullptr) { 4011 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4012 IndirectDests, Args, NameStr, InsertBefore); 4013 } 4014 4015 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4016 ArrayRef<BasicBlock *> IndirectDests, 4017 ArrayRef<Value *> Args, 4018 ArrayRef<OperandBundleDef> Bundles = None, 4019 const Twine &NameStr = "", 4020 Instruction *InsertBefore = nullptr) { 4021 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4022 IndirectDests, Args, Bundles, NameStr, InsertBefore); 4023 } 4024 4025 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, 4026 ArrayRef<BasicBlock *> IndirectDests, 4027 ArrayRef<Value *> Args, const Twine &NameStr, 4028 BasicBlock *InsertAtEnd) { 4029 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4030 IndirectDests, Args, NameStr, InsertAtEnd); 4031 } 4032 4033 static CallBrInst *Create(FunctionCallee Func, 4034 BasicBlock *DefaultDest, 4035 ArrayRef<BasicBlock *> IndirectDests, 4036 ArrayRef<Value *> Args, 4037 ArrayRef<OperandBundleDef> Bundles, 4038 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4039 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, 4040 IndirectDests, Args, Bundles, NameStr, InsertAtEnd); 4041 } 4042 4043 /// Create a clone of \p CBI with a different set of operand bundles and 4044 /// insert it before \p InsertPt. 4045 /// 4046 /// The returned callbr instruction is identical to \p CBI in every way 4047 /// except that the operand bundles for the new instruction are set to the 4048 /// operand bundles in \p Bundles. 4049 static CallBrInst *Create(CallBrInst *CBI, 4050 ArrayRef<OperandBundleDef> Bundles, 4051 Instruction *InsertPt = nullptr); 4052 4053 /// Return the number of callbr indirect dest labels. 4054 /// 4055 unsigned getNumIndirectDests() const { return NumIndirectDests; } 4056 4057 /// getIndirectDestLabel - Return the i-th indirect dest label. 4058 /// 4059 Value *getIndirectDestLabel(unsigned i) const { 4060 assert(i < getNumIndirectDests() && "Out of bounds!"); 4061 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + 4062 1); 4063 } 4064 4065 Value *getIndirectDestLabelUse(unsigned i) const { 4066 assert(i < getNumIndirectDests() && "Out of bounds!"); 4067 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + 4068 1); 4069 } 4070 4071 // Return the destination basic blocks... 4072 BasicBlock *getDefaultDest() const { 4073 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); 4074 } 4075 BasicBlock *getIndirectDest(unsigned i) const { 4076 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); 4077 } 4078 SmallVector<BasicBlock *, 16> getIndirectDests() const { 4079 SmallVector<BasicBlock *, 16> IndirectDests; 4080 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) 4081 IndirectDests.push_back(getIndirectDest(i)); 4082 return IndirectDests; 4083 } 4084 void setDefaultDest(BasicBlock *B) { 4085 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); 4086 } 4087 void setIndirectDest(unsigned i, BasicBlock *B) { 4088 updateArgBlockAddresses(i, B); 4089 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); 4090 } 4091 4092 BasicBlock *getSuccessor(unsigned i) const { 4093 assert(i < getNumSuccessors() + 1 && 4094 "Successor # out of range for callbr!"); 4095 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); 4096 } 4097 4098 void setSuccessor(unsigned i, BasicBlock *NewSucc) { 4099 assert(i < getNumIndirectDests() + 1 && 4100 "Successor # out of range for callbr!"); 4101 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); 4102 } 4103 4104 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } 4105 4106 // Methods for support type inquiry through isa, cast, and dyn_cast: 4107 static bool classof(const Instruction *I) { 4108 return (I->getOpcode() == Instruction::CallBr); 4109 } 4110 static bool classof(const Value *V) { 4111 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4112 } 4113 4114 private: 4115 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4116 // method so that subclasses cannot accidentally use it. 4117 template <typename Bitfield> 4118 void setSubclassData(typename Bitfield::Type Value) { 4119 Instruction::setSubclassData<Bitfield>(Value); 4120 } 4121 }; 4122 4123 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4124 ArrayRef<BasicBlock *> IndirectDests, 4125 ArrayRef<Value *> Args, 4126 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4127 const Twine &NameStr, Instruction *InsertBefore) 4128 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4129 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4130 InsertBefore) { 4131 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4132 } 4133 4134 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, 4135 ArrayRef<BasicBlock *> IndirectDests, 4136 ArrayRef<Value *> Args, 4137 ArrayRef<OperandBundleDef> Bundles, int NumOperands, 4138 const Twine &NameStr, BasicBlock *InsertAtEnd) 4139 : CallBase(Ty->getReturnType(), Instruction::CallBr, 4140 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, 4141 InsertAtEnd) { 4142 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); 4143 } 4144 4145 //===----------------------------------------------------------------------===// 4146 // ResumeInst Class 4147 //===----------------------------------------------------------------------===// 4148 4149 //===--------------------------------------------------------------------------- 4150 /// Resume the propagation of an exception. 4151 /// 4152 class ResumeInst : public Instruction { 4153 ResumeInst(const ResumeInst &RI); 4154 4155 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); 4156 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); 4157 4158 protected: 4159 // Note: Instruction needs to be a friend here to call cloneImpl. 4160 friend class Instruction; 4161 4162 ResumeInst *cloneImpl() const; 4163 4164 public: 4165 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { 4166 return new(1) ResumeInst(Exn, InsertBefore); 4167 } 4168 4169 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { 4170 return new(1) ResumeInst(Exn, InsertAtEnd); 4171 } 4172 4173 /// Provide fast operand accessors 4174 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4175 4176 /// Convenience accessor. 4177 Value *getValue() const { return Op<0>(); } 4178 4179 unsigned getNumSuccessors() const { return 0; } 4180 4181 // Methods for support type inquiry through isa, cast, and dyn_cast: 4182 static bool classof(const Instruction *I) { 4183 return I->getOpcode() == Instruction::Resume; 4184 } 4185 static bool classof(const Value *V) { 4186 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4187 } 4188 4189 private: 4190 BasicBlock *getSuccessor(unsigned idx) const { 4191 llvm_unreachable("ResumeInst has no successors!"); 4192 } 4193 4194 void setSuccessor(unsigned idx, BasicBlock *NewSucc) { 4195 llvm_unreachable("ResumeInst has no successors!"); 4196 } 4197 }; 4198 4199 template <> 4200 struct OperandTraits<ResumeInst> : 4201 public FixedNumOperandTraits<ResumeInst, 1> { 4202 }; 4203 4204 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) 4205 4206 //===----------------------------------------------------------------------===// 4207 // CatchSwitchInst Class 4208 //===----------------------------------------------------------------------===// 4209 class CatchSwitchInst : public Instruction { 4210 using UnwindDestField = BoolBitfieldElementT<0>; 4211 4212 /// The number of operands actually allocated. NumOperands is 4213 /// the number actually in use. 4214 unsigned ReservedSpace; 4215 4216 // Operand[0] = Outer scope 4217 // Operand[1] = Unwind block destination 4218 // Operand[n] = BasicBlock to go to on match 4219 CatchSwitchInst(const CatchSwitchInst &CSI); 4220 4221 /// Create a new switch instruction, specifying a 4222 /// default destination. The number of additional handlers can be specified 4223 /// here to make memory allocation more efficient. 4224 /// This constructor can also autoinsert before another instruction. 4225 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4226 unsigned NumHandlers, const Twine &NameStr, 4227 Instruction *InsertBefore); 4228 4229 /// Create a new switch instruction, specifying a 4230 /// default destination. The number of additional handlers can be specified 4231 /// here to make memory allocation more efficient. 4232 /// This constructor also autoinserts at the end of the specified BasicBlock. 4233 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, 4234 unsigned NumHandlers, const Twine &NameStr, 4235 BasicBlock *InsertAtEnd); 4236 4237 // allocate space for exactly zero operands 4238 void *operator new(size_t s) { return User::operator new(s); } 4239 4240 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); 4241 void growOperands(unsigned Size); 4242 4243 protected: 4244 // Note: Instruction needs to be a friend here to call cloneImpl. 4245 friend class Instruction; 4246 4247 CatchSwitchInst *cloneImpl() const; 4248 4249 public: 4250 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4251 unsigned NumHandlers, 4252 const Twine &NameStr = "", 4253 Instruction *InsertBefore = nullptr) { 4254 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4255 InsertBefore); 4256 } 4257 4258 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, 4259 unsigned NumHandlers, const Twine &NameStr, 4260 BasicBlock *InsertAtEnd) { 4261 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, 4262 InsertAtEnd); 4263 } 4264 4265 /// Provide fast operand accessors 4266 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4267 4268 // Accessor Methods for CatchSwitch stmt 4269 Value *getParentPad() const { return getOperand(0); } 4270 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } 4271 4272 // Accessor Methods for CatchSwitch stmt 4273 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4274 bool unwindsToCaller() const { return !hasUnwindDest(); } 4275 BasicBlock *getUnwindDest() const { 4276 if (hasUnwindDest()) 4277 return cast<BasicBlock>(getOperand(1)); 4278 return nullptr; 4279 } 4280 void setUnwindDest(BasicBlock *UnwindDest) { 4281 assert(UnwindDest); 4282 assert(hasUnwindDest()); 4283 setOperand(1, UnwindDest); 4284 } 4285 4286 /// return the number of 'handlers' in this catchswitch 4287 /// instruction, except the default handler 4288 unsigned getNumHandlers() const { 4289 if (hasUnwindDest()) 4290 return getNumOperands() - 2; 4291 return getNumOperands() - 1; 4292 } 4293 4294 private: 4295 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } 4296 static const BasicBlock *handler_helper(const Value *V) { 4297 return cast<BasicBlock>(V); 4298 } 4299 4300 public: 4301 using DerefFnTy = BasicBlock *(*)(Value *); 4302 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; 4303 using handler_range = iterator_range<handler_iterator>; 4304 using ConstDerefFnTy = const BasicBlock *(*)(const Value *); 4305 using const_handler_iterator = 4306 mapped_iterator<const_op_iterator, ConstDerefFnTy>; 4307 using const_handler_range = iterator_range<const_handler_iterator>; 4308 4309 /// Returns an iterator that points to the first handler in CatchSwitchInst. 4310 handler_iterator handler_begin() { 4311 op_iterator It = op_begin() + 1; 4312 if (hasUnwindDest()) 4313 ++It; 4314 return handler_iterator(It, DerefFnTy(handler_helper)); 4315 } 4316 4317 /// Returns an iterator that points to the first handler in the 4318 /// CatchSwitchInst. 4319 const_handler_iterator handler_begin() const { 4320 const_op_iterator It = op_begin() + 1; 4321 if (hasUnwindDest()) 4322 ++It; 4323 return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); 4324 } 4325 4326 /// Returns a read-only iterator that points one past the last 4327 /// handler in the CatchSwitchInst. 4328 handler_iterator handler_end() { 4329 return handler_iterator(op_end(), DerefFnTy(handler_helper)); 4330 } 4331 4332 /// Returns an iterator that points one past the last handler in the 4333 /// CatchSwitchInst. 4334 const_handler_iterator handler_end() const { 4335 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); 4336 } 4337 4338 /// iteration adapter for range-for loops. 4339 handler_range handlers() { 4340 return make_range(handler_begin(), handler_end()); 4341 } 4342 4343 /// iteration adapter for range-for loops. 4344 const_handler_range handlers() const { 4345 return make_range(handler_begin(), handler_end()); 4346 } 4347 4348 /// Add an entry to the switch instruction... 4349 /// Note: 4350 /// This action invalidates handler_end(). Old handler_end() iterator will 4351 /// point to the added handler. 4352 void addHandler(BasicBlock *Dest); 4353 4354 void removeHandler(handler_iterator HI); 4355 4356 unsigned getNumSuccessors() const { return getNumOperands() - 1; } 4357 BasicBlock *getSuccessor(unsigned Idx) const { 4358 assert(Idx < getNumSuccessors() && 4359 "Successor # out of range for catchswitch!"); 4360 return cast<BasicBlock>(getOperand(Idx + 1)); 4361 } 4362 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { 4363 assert(Idx < getNumSuccessors() && 4364 "Successor # out of range for catchswitch!"); 4365 setOperand(Idx + 1, NewSucc); 4366 } 4367 4368 // Methods for support type inquiry through isa, cast, and dyn_cast: 4369 static bool classof(const Instruction *I) { 4370 return I->getOpcode() == Instruction::CatchSwitch; 4371 } 4372 static bool classof(const Value *V) { 4373 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4374 } 4375 }; 4376 4377 template <> 4378 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; 4379 4380 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value) 4381 4382 //===----------------------------------------------------------------------===// 4383 // CleanupPadInst Class 4384 //===----------------------------------------------------------------------===// 4385 class CleanupPadInst : public FuncletPadInst { 4386 private: 4387 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4388 unsigned Values, const Twine &NameStr, 4389 Instruction *InsertBefore) 4390 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4391 NameStr, InsertBefore) {} 4392 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, 4393 unsigned Values, const Twine &NameStr, 4394 BasicBlock *InsertAtEnd) 4395 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, 4396 NameStr, InsertAtEnd) {} 4397 4398 public: 4399 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, 4400 const Twine &NameStr = "", 4401 Instruction *InsertBefore = nullptr) { 4402 unsigned Values = 1 + Args.size(); 4403 return new (Values) 4404 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); 4405 } 4406 4407 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, 4408 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4409 unsigned Values = 1 + Args.size(); 4410 return new (Values) 4411 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); 4412 } 4413 4414 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4415 static bool classof(const Instruction *I) { 4416 return I->getOpcode() == Instruction::CleanupPad; 4417 } 4418 static bool classof(const Value *V) { 4419 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4420 } 4421 }; 4422 4423 //===----------------------------------------------------------------------===// 4424 // CatchPadInst Class 4425 //===----------------------------------------------------------------------===// 4426 class CatchPadInst : public FuncletPadInst { 4427 private: 4428 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4429 unsigned Values, const Twine &NameStr, 4430 Instruction *InsertBefore) 4431 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4432 NameStr, InsertBefore) {} 4433 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, 4434 unsigned Values, const Twine &NameStr, 4435 BasicBlock *InsertAtEnd) 4436 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, 4437 NameStr, InsertAtEnd) {} 4438 4439 public: 4440 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4441 const Twine &NameStr = "", 4442 Instruction *InsertBefore = nullptr) { 4443 unsigned Values = 1 + Args.size(); 4444 return new (Values) 4445 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); 4446 } 4447 4448 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, 4449 const Twine &NameStr, BasicBlock *InsertAtEnd) { 4450 unsigned Values = 1 + Args.size(); 4451 return new (Values) 4452 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); 4453 } 4454 4455 /// Convenience accessors 4456 CatchSwitchInst *getCatchSwitch() const { 4457 return cast<CatchSwitchInst>(Op<-1>()); 4458 } 4459 void setCatchSwitch(Value *CatchSwitch) { 4460 assert(CatchSwitch); 4461 Op<-1>() = CatchSwitch; 4462 } 4463 4464 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4465 static bool classof(const Instruction *I) { 4466 return I->getOpcode() == Instruction::CatchPad; 4467 } 4468 static bool classof(const Value *V) { 4469 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4470 } 4471 }; 4472 4473 //===----------------------------------------------------------------------===// 4474 // CatchReturnInst Class 4475 //===----------------------------------------------------------------------===// 4476 4477 class CatchReturnInst : public Instruction { 4478 CatchReturnInst(const CatchReturnInst &RI); 4479 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); 4480 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); 4481 4482 void init(Value *CatchPad, BasicBlock *BB); 4483 4484 protected: 4485 // Note: Instruction needs to be a friend here to call cloneImpl. 4486 friend class Instruction; 4487 4488 CatchReturnInst *cloneImpl() const; 4489 4490 public: 4491 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4492 Instruction *InsertBefore = nullptr) { 4493 assert(CatchPad); 4494 assert(BB); 4495 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); 4496 } 4497 4498 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, 4499 BasicBlock *InsertAtEnd) { 4500 assert(CatchPad); 4501 assert(BB); 4502 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); 4503 } 4504 4505 /// Provide fast operand accessors 4506 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4507 4508 /// Convenience accessors. 4509 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } 4510 void setCatchPad(CatchPadInst *CatchPad) { 4511 assert(CatchPad); 4512 Op<0>() = CatchPad; 4513 } 4514 4515 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } 4516 void setSuccessor(BasicBlock *NewSucc) { 4517 assert(NewSucc); 4518 Op<1>() = NewSucc; 4519 } 4520 unsigned getNumSuccessors() const { return 1; } 4521 4522 /// Get the parentPad of this catchret's catchpad's catchswitch. 4523 /// The successor block is implicitly a member of this funclet. 4524 Value *getCatchSwitchParentPad() const { 4525 return getCatchPad()->getCatchSwitch()->getParentPad(); 4526 } 4527 4528 // Methods for support type inquiry through isa, cast, and dyn_cast: 4529 static bool classof(const Instruction *I) { 4530 return (I->getOpcode() == Instruction::CatchRet); 4531 } 4532 static bool classof(const Value *V) { 4533 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4534 } 4535 4536 private: 4537 BasicBlock *getSuccessor(unsigned Idx) const { 4538 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4539 return getSuccessor(); 4540 } 4541 4542 void setSuccessor(unsigned Idx, BasicBlock *B) { 4543 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!"); 4544 setSuccessor(B); 4545 } 4546 }; 4547 4548 template <> 4549 struct OperandTraits<CatchReturnInst> 4550 : public FixedNumOperandTraits<CatchReturnInst, 2> {}; 4551 4552 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value) 4553 4554 //===----------------------------------------------------------------------===// 4555 // CleanupReturnInst Class 4556 //===----------------------------------------------------------------------===// 4557 4558 class CleanupReturnInst : public Instruction { 4559 using UnwindDestField = BoolBitfieldElementT<0>; 4560 4561 private: 4562 CleanupReturnInst(const CleanupReturnInst &RI); 4563 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4564 Instruction *InsertBefore = nullptr); 4565 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, 4566 BasicBlock *InsertAtEnd); 4567 4568 void init(Value *CleanupPad, BasicBlock *UnwindBB); 4569 4570 protected: 4571 // Note: Instruction needs to be a friend here to call cloneImpl. 4572 friend class Instruction; 4573 4574 CleanupReturnInst *cloneImpl() const; 4575 4576 public: 4577 static CleanupReturnInst *Create(Value *CleanupPad, 4578 BasicBlock *UnwindBB = nullptr, 4579 Instruction *InsertBefore = nullptr) { 4580 assert(CleanupPad); 4581 unsigned Values = 1; 4582 if (UnwindBB) 4583 ++Values; 4584 return new (Values) 4585 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); 4586 } 4587 4588 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, 4589 BasicBlock *InsertAtEnd) { 4590 assert(CleanupPad); 4591 unsigned Values = 1; 4592 if (UnwindBB) 4593 ++Values; 4594 return new (Values) 4595 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); 4596 } 4597 4598 /// Provide fast operand accessors 4599 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); 4600 4601 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } 4602 bool unwindsToCaller() const { return !hasUnwindDest(); } 4603 4604 /// Convenience accessor. 4605 CleanupPadInst *getCleanupPad() const { 4606 return cast<CleanupPadInst>(Op<0>()); 4607 } 4608 void setCleanupPad(CleanupPadInst *CleanupPad) { 4609 assert(CleanupPad); 4610 Op<0>() = CleanupPad; 4611 } 4612 4613 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } 4614 4615 BasicBlock *getUnwindDest() const { 4616 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; 4617 } 4618 void setUnwindDest(BasicBlock *NewDest) { 4619 assert(NewDest); 4620 assert(hasUnwindDest()); 4621 Op<1>() = NewDest; 4622 } 4623 4624 // Methods for support type inquiry through isa, cast, and dyn_cast: 4625 static bool classof(const Instruction *I) { 4626 return (I->getOpcode() == Instruction::CleanupRet); 4627 } 4628 static bool classof(const Value *V) { 4629 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4630 } 4631 4632 private: 4633 BasicBlock *getSuccessor(unsigned Idx) const { 4634 assert(Idx == 0); 4635 return getUnwindDest(); 4636 } 4637 4638 void setSuccessor(unsigned Idx, BasicBlock *B) { 4639 assert(Idx == 0); 4640 setUnwindDest(B); 4641 } 4642 4643 // Shadow Instruction::setInstructionSubclassData with a private forwarding 4644 // method so that subclasses cannot accidentally use it. 4645 template <typename Bitfield> 4646 void setSubclassData(typename Bitfield::Type Value) { 4647 Instruction::setSubclassData<Bitfield>(Value); 4648 } 4649 }; 4650 4651 template <> 4652 struct OperandTraits<CleanupReturnInst> 4653 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; 4654 4655 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value) 4656 4657 //===----------------------------------------------------------------------===// 4658 // UnreachableInst Class 4659 //===----------------------------------------------------------------------===// 4660 4661 //===--------------------------------------------------------------------------- 4662 /// This function has undefined behavior. In particular, the 4663 /// presence of this instruction indicates some higher level knowledge that the 4664 /// end of the block cannot be reached. 4665 /// 4666 class UnreachableInst : public Instruction { 4667 protected: 4668 // Note: Instruction needs to be a friend here to call cloneImpl. 4669 friend class Instruction; 4670 4671 UnreachableInst *cloneImpl() const; 4672 4673 public: 4674 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); 4675 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); 4676 4677 // allocate space for exactly zero operands 4678 void *operator new(size_t s) { 4679 return User::operator new(s, 0); 4680 } 4681 4682 unsigned getNumSuccessors() const { return 0; } 4683 4684 // Methods for support type inquiry through isa, cast, and dyn_cast: 4685 static bool classof(const Instruction *I) { 4686 return I->getOpcode() == Instruction::Unreachable; 4687 } 4688 static bool classof(const Value *V) { 4689 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4690 } 4691 4692 private: 4693 BasicBlock *getSuccessor(unsigned idx) const { 4694 llvm_unreachable("UnreachableInst has no successors!"); 4695 } 4696 4697 void setSuccessor(unsigned idx, BasicBlock *B) { 4698 llvm_unreachable("UnreachableInst has no successors!"); 4699 } 4700 }; 4701 4702 //===----------------------------------------------------------------------===// 4703 // TruncInst Class 4704 //===----------------------------------------------------------------------===// 4705 4706 /// This class represents a truncation of integer types. 4707 class TruncInst : public CastInst { 4708 protected: 4709 // Note: Instruction needs to be a friend here to call cloneImpl. 4710 friend class Instruction; 4711 4712 /// Clone an identical TruncInst 4713 TruncInst *cloneImpl() const; 4714 4715 public: 4716 /// Constructor with insert-before-instruction semantics 4717 TruncInst( 4718 Value *S, ///< The value to be truncated 4719 Type *Ty, ///< The (smaller) type to truncate to 4720 const Twine &NameStr = "", ///< A name for the new instruction 4721 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4722 ); 4723 4724 /// Constructor with insert-at-end-of-block semantics 4725 TruncInst( 4726 Value *S, ///< The value to be truncated 4727 Type *Ty, ///< The (smaller) type to truncate to 4728 const Twine &NameStr, ///< A name for the new instruction 4729 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4730 ); 4731 4732 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4733 static bool classof(const Instruction *I) { 4734 return I->getOpcode() == Trunc; 4735 } 4736 static bool classof(const Value *V) { 4737 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4738 } 4739 }; 4740 4741 //===----------------------------------------------------------------------===// 4742 // ZExtInst Class 4743 //===----------------------------------------------------------------------===// 4744 4745 /// This class represents zero extension of integer types. 4746 class ZExtInst : public CastInst { 4747 protected: 4748 // Note: Instruction needs to be a friend here to call cloneImpl. 4749 friend class Instruction; 4750 4751 /// Clone an identical ZExtInst 4752 ZExtInst *cloneImpl() const; 4753 4754 public: 4755 /// Constructor with insert-before-instruction semantics 4756 ZExtInst( 4757 Value *S, ///< The value to be zero extended 4758 Type *Ty, ///< The type to zero extend to 4759 const Twine &NameStr = "", ///< A name for the new instruction 4760 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4761 ); 4762 4763 /// Constructor with insert-at-end semantics. 4764 ZExtInst( 4765 Value *S, ///< The value to be zero extended 4766 Type *Ty, ///< The type to zero extend to 4767 const Twine &NameStr, ///< A name for the new instruction 4768 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4769 ); 4770 4771 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4772 static bool classof(const Instruction *I) { 4773 return I->getOpcode() == ZExt; 4774 } 4775 static bool classof(const Value *V) { 4776 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4777 } 4778 }; 4779 4780 //===----------------------------------------------------------------------===// 4781 // SExtInst Class 4782 //===----------------------------------------------------------------------===// 4783 4784 /// This class represents a sign extension of integer types. 4785 class SExtInst : public CastInst { 4786 protected: 4787 // Note: Instruction needs to be a friend here to call cloneImpl. 4788 friend class Instruction; 4789 4790 /// Clone an identical SExtInst 4791 SExtInst *cloneImpl() const; 4792 4793 public: 4794 /// Constructor with insert-before-instruction semantics 4795 SExtInst( 4796 Value *S, ///< The value to be sign extended 4797 Type *Ty, ///< The type to sign extend to 4798 const Twine &NameStr = "", ///< A name for the new instruction 4799 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4800 ); 4801 4802 /// Constructor with insert-at-end-of-block semantics 4803 SExtInst( 4804 Value *S, ///< The value to be sign extended 4805 Type *Ty, ///< The type to sign extend to 4806 const Twine &NameStr, ///< A name for the new instruction 4807 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4808 ); 4809 4810 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4811 static bool classof(const Instruction *I) { 4812 return I->getOpcode() == SExt; 4813 } 4814 static bool classof(const Value *V) { 4815 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4816 } 4817 }; 4818 4819 //===----------------------------------------------------------------------===// 4820 // FPTruncInst Class 4821 //===----------------------------------------------------------------------===// 4822 4823 /// This class represents a truncation of floating point types. 4824 class FPTruncInst : public CastInst { 4825 protected: 4826 // Note: Instruction needs to be a friend here to call cloneImpl. 4827 friend class Instruction; 4828 4829 /// Clone an identical FPTruncInst 4830 FPTruncInst *cloneImpl() const; 4831 4832 public: 4833 /// Constructor with insert-before-instruction semantics 4834 FPTruncInst( 4835 Value *S, ///< The value to be truncated 4836 Type *Ty, ///< The type to truncate to 4837 const Twine &NameStr = "", ///< A name for the new instruction 4838 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4839 ); 4840 4841 /// Constructor with insert-before-instruction semantics 4842 FPTruncInst( 4843 Value *S, ///< The value to be truncated 4844 Type *Ty, ///< The type to truncate to 4845 const Twine &NameStr, ///< A name for the new instruction 4846 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4847 ); 4848 4849 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4850 static bool classof(const Instruction *I) { 4851 return I->getOpcode() == FPTrunc; 4852 } 4853 static bool classof(const Value *V) { 4854 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4855 } 4856 }; 4857 4858 //===----------------------------------------------------------------------===// 4859 // FPExtInst Class 4860 //===----------------------------------------------------------------------===// 4861 4862 /// This class represents an extension of floating point types. 4863 class FPExtInst : public CastInst { 4864 protected: 4865 // Note: Instruction needs to be a friend here to call cloneImpl. 4866 friend class Instruction; 4867 4868 /// Clone an identical FPExtInst 4869 FPExtInst *cloneImpl() const; 4870 4871 public: 4872 /// Constructor with insert-before-instruction semantics 4873 FPExtInst( 4874 Value *S, ///< The value to be extended 4875 Type *Ty, ///< The type to extend to 4876 const Twine &NameStr = "", ///< A name for the new instruction 4877 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4878 ); 4879 4880 /// Constructor with insert-at-end-of-block semantics 4881 FPExtInst( 4882 Value *S, ///< The value to be extended 4883 Type *Ty, ///< The type to extend to 4884 const Twine &NameStr, ///< A name for the new instruction 4885 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4886 ); 4887 4888 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4889 static bool classof(const Instruction *I) { 4890 return I->getOpcode() == FPExt; 4891 } 4892 static bool classof(const Value *V) { 4893 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4894 } 4895 }; 4896 4897 //===----------------------------------------------------------------------===// 4898 // UIToFPInst Class 4899 //===----------------------------------------------------------------------===// 4900 4901 /// This class represents a cast unsigned integer to floating point. 4902 class UIToFPInst : public CastInst { 4903 protected: 4904 // Note: Instruction needs to be a friend here to call cloneImpl. 4905 friend class Instruction; 4906 4907 /// Clone an identical UIToFPInst 4908 UIToFPInst *cloneImpl() const; 4909 4910 public: 4911 /// Constructor with insert-before-instruction semantics 4912 UIToFPInst( 4913 Value *S, ///< The value to be converted 4914 Type *Ty, ///< The type to convert to 4915 const Twine &NameStr = "", ///< A name for the new instruction 4916 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4917 ); 4918 4919 /// Constructor with insert-at-end-of-block semantics 4920 UIToFPInst( 4921 Value *S, ///< The value to be converted 4922 Type *Ty, ///< The type to convert to 4923 const Twine &NameStr, ///< A name for the new instruction 4924 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4925 ); 4926 4927 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4928 static bool classof(const Instruction *I) { 4929 return I->getOpcode() == UIToFP; 4930 } 4931 static bool classof(const Value *V) { 4932 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4933 } 4934 }; 4935 4936 //===----------------------------------------------------------------------===// 4937 // SIToFPInst Class 4938 //===----------------------------------------------------------------------===// 4939 4940 /// This class represents a cast from signed integer to floating point. 4941 class SIToFPInst : public CastInst { 4942 protected: 4943 // Note: Instruction needs to be a friend here to call cloneImpl. 4944 friend class Instruction; 4945 4946 /// Clone an identical SIToFPInst 4947 SIToFPInst *cloneImpl() const; 4948 4949 public: 4950 /// Constructor with insert-before-instruction semantics 4951 SIToFPInst( 4952 Value *S, ///< The value to be converted 4953 Type *Ty, ///< The type to convert to 4954 const Twine &NameStr = "", ///< A name for the new instruction 4955 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4956 ); 4957 4958 /// Constructor with insert-at-end-of-block semantics 4959 SIToFPInst( 4960 Value *S, ///< The value to be converted 4961 Type *Ty, ///< The type to convert to 4962 const Twine &NameStr, ///< A name for the new instruction 4963 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 4964 ); 4965 4966 /// Methods for support type inquiry through isa, cast, and dyn_cast: 4967 static bool classof(const Instruction *I) { 4968 return I->getOpcode() == SIToFP; 4969 } 4970 static bool classof(const Value *V) { 4971 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 4972 } 4973 }; 4974 4975 //===----------------------------------------------------------------------===// 4976 // FPToUIInst Class 4977 //===----------------------------------------------------------------------===// 4978 4979 /// This class represents a cast from floating point to unsigned integer 4980 class FPToUIInst : public CastInst { 4981 protected: 4982 // Note: Instruction needs to be a friend here to call cloneImpl. 4983 friend class Instruction; 4984 4985 /// Clone an identical FPToUIInst 4986 FPToUIInst *cloneImpl() const; 4987 4988 public: 4989 /// Constructor with insert-before-instruction semantics 4990 FPToUIInst( 4991 Value *S, ///< The value to be converted 4992 Type *Ty, ///< The type to convert to 4993 const Twine &NameStr = "", ///< A name for the new instruction 4994 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 4995 ); 4996 4997 /// Constructor with insert-at-end-of-block semantics 4998 FPToUIInst( 4999 Value *S, ///< The value to be converted 5000 Type *Ty, ///< The type to convert to 5001 const Twine &NameStr, ///< A name for the new instruction 5002 BasicBlock *InsertAtEnd ///< Where to insert the new instruction 5003 ); 5004 5005 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5006 static bool classof(const Instruction *I) { 5007 return I->getOpcode() == FPToUI; 5008 } 5009 static bool classof(const Value *V) { 5010 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5011 } 5012 }; 5013 5014 //===----------------------------------------------------------------------===// 5015 // FPToSIInst Class 5016 //===----------------------------------------------------------------------===// 5017 5018 /// This class represents a cast from floating point to signed integer. 5019 class FPToSIInst : public CastInst { 5020 protected: 5021 // Note: Instruction needs to be a friend here to call cloneImpl. 5022 friend class Instruction; 5023 5024 /// Clone an identical FPToSIInst 5025 FPToSIInst *cloneImpl() const; 5026 5027 public: 5028 /// Constructor with insert-before-instruction semantics 5029 FPToSIInst( 5030 Value *S, ///< The value to be converted 5031 Type *Ty, ///< The type to convert to 5032 const Twine &NameStr = "", ///< A name for the new instruction 5033 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5034 ); 5035 5036 /// Constructor with insert-at-end-of-block semantics 5037 FPToSIInst( 5038 Value *S, ///< The value to be converted 5039 Type *Ty, ///< The type to convert to 5040 const Twine &NameStr, ///< A name for the new instruction 5041 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5042 ); 5043 5044 /// Methods for support type inquiry through isa, cast, and dyn_cast: 5045 static bool classof(const Instruction *I) { 5046 return I->getOpcode() == FPToSI; 5047 } 5048 static bool classof(const Value *V) { 5049 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5050 } 5051 }; 5052 5053 //===----------------------------------------------------------------------===// 5054 // IntToPtrInst Class 5055 //===----------------------------------------------------------------------===// 5056 5057 /// This class represents a cast from an integer to a pointer. 5058 class IntToPtrInst : public CastInst { 5059 public: 5060 // Note: Instruction needs to be a friend here to call cloneImpl. 5061 friend class Instruction; 5062 5063 /// Constructor with insert-before-instruction semantics 5064 IntToPtrInst( 5065 Value *S, ///< The value to be converted 5066 Type *Ty, ///< The type to convert to 5067 const Twine &NameStr = "", ///< A name for the new instruction 5068 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5069 ); 5070 5071 /// Constructor with insert-at-end-of-block semantics 5072 IntToPtrInst( 5073 Value *S, ///< The value to be converted 5074 Type *Ty, ///< The type to convert to 5075 const Twine &NameStr, ///< A name for the new instruction 5076 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5077 ); 5078 5079 /// Clone an identical IntToPtrInst. 5080 IntToPtrInst *cloneImpl() const; 5081 5082 /// Returns the address space of this instruction's pointer type. 5083 unsigned getAddressSpace() const { 5084 return getType()->getPointerAddressSpace(); 5085 } 5086 5087 // Methods for support type inquiry through isa, cast, and dyn_cast: 5088 static bool classof(const Instruction *I) { 5089 return I->getOpcode() == IntToPtr; 5090 } 5091 static bool classof(const Value *V) { 5092 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5093 } 5094 }; 5095 5096 //===----------------------------------------------------------------------===// 5097 // PtrToIntInst Class 5098 //===----------------------------------------------------------------------===// 5099 5100 /// This class represents a cast from a pointer to an integer. 5101 class PtrToIntInst : public CastInst { 5102 protected: 5103 // Note: Instruction needs to be a friend here to call cloneImpl. 5104 friend class Instruction; 5105 5106 /// Clone an identical PtrToIntInst. 5107 PtrToIntInst *cloneImpl() const; 5108 5109 public: 5110 /// Constructor with insert-before-instruction semantics 5111 PtrToIntInst( 5112 Value *S, ///< The value to be converted 5113 Type *Ty, ///< The type to convert to 5114 const Twine &NameStr = "", ///< A name for the new instruction 5115 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5116 ); 5117 5118 /// Constructor with insert-at-end-of-block semantics 5119 PtrToIntInst( 5120 Value *S, ///< The value to be converted 5121 Type *Ty, ///< The type to convert to 5122 const Twine &NameStr, ///< A name for the new instruction 5123 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5124 ); 5125 5126 /// Gets the pointer operand. 5127 Value *getPointerOperand() { return getOperand(0); } 5128 /// Gets the pointer operand. 5129 const Value *getPointerOperand() const { return getOperand(0); } 5130 /// Gets the operand index of the pointer operand. 5131 static unsigned getPointerOperandIndex() { return 0U; } 5132 5133 /// Returns the address space of the pointer operand. 5134 unsigned getPointerAddressSpace() const { 5135 return getPointerOperand()->getType()->getPointerAddressSpace(); 5136 } 5137 5138 // Methods for support type inquiry through isa, cast, and dyn_cast: 5139 static bool classof(const Instruction *I) { 5140 return I->getOpcode() == PtrToInt; 5141 } 5142 static bool classof(const Value *V) { 5143 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5144 } 5145 }; 5146 5147 //===----------------------------------------------------------------------===// 5148 // BitCastInst Class 5149 //===----------------------------------------------------------------------===// 5150 5151 /// This class represents a no-op cast from one type to another. 5152 class BitCastInst : public CastInst { 5153 protected: 5154 // Note: Instruction needs to be a friend here to call cloneImpl. 5155 friend class Instruction; 5156 5157 /// Clone an identical BitCastInst. 5158 BitCastInst *cloneImpl() const; 5159 5160 public: 5161 /// Constructor with insert-before-instruction semantics 5162 BitCastInst( 5163 Value *S, ///< The value to be casted 5164 Type *Ty, ///< The type to casted to 5165 const Twine &NameStr = "", ///< A name for the new instruction 5166 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5167 ); 5168 5169 /// Constructor with insert-at-end-of-block semantics 5170 BitCastInst( 5171 Value *S, ///< The value to be casted 5172 Type *Ty, ///< The type to casted to 5173 const Twine &NameStr, ///< A name for the new instruction 5174 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5175 ); 5176 5177 // Methods for support type inquiry through isa, cast, and dyn_cast: 5178 static bool classof(const Instruction *I) { 5179 return I->getOpcode() == BitCast; 5180 } 5181 static bool classof(const Value *V) { 5182 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5183 } 5184 }; 5185 5186 //===----------------------------------------------------------------------===// 5187 // AddrSpaceCastInst Class 5188 //===----------------------------------------------------------------------===// 5189 5190 /// This class represents a conversion between pointers from one address space 5191 /// to another. 5192 class AddrSpaceCastInst : public CastInst { 5193 protected: 5194 // Note: Instruction needs to be a friend here to call cloneImpl. 5195 friend class Instruction; 5196 5197 /// Clone an identical AddrSpaceCastInst. 5198 AddrSpaceCastInst *cloneImpl() const; 5199 5200 public: 5201 /// Constructor with insert-before-instruction semantics 5202 AddrSpaceCastInst( 5203 Value *S, ///< The value to be casted 5204 Type *Ty, ///< The type to casted to 5205 const Twine &NameStr = "", ///< A name for the new instruction 5206 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction 5207 ); 5208 5209 /// Constructor with insert-at-end-of-block semantics 5210 AddrSpaceCastInst( 5211 Value *S, ///< The value to be casted 5212 Type *Ty, ///< The type to casted to 5213 const Twine &NameStr, ///< A name for the new instruction 5214 BasicBlock *InsertAtEnd ///< The block to insert the instruction into 5215 ); 5216 5217 // Methods for support type inquiry through isa, cast, and dyn_cast: 5218 static bool classof(const Instruction *I) { 5219 return I->getOpcode() == AddrSpaceCast; 5220 } 5221 static bool classof(const Value *V) { 5222 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5223 } 5224 5225 /// Gets the pointer operand. 5226 Value *getPointerOperand() { 5227 return getOperand(0); 5228 } 5229 5230 /// Gets the pointer operand. 5231 const Value *getPointerOperand() const { 5232 return getOperand(0); 5233 } 5234 5235 /// Gets the operand index of the pointer operand. 5236 static unsigned getPointerOperandIndex() { 5237 return 0U; 5238 } 5239 5240 /// Returns the address space of the pointer operand. 5241 unsigned getSrcAddressSpace() const { 5242 return getPointerOperand()->getType()->getPointerAddressSpace(); 5243 } 5244 5245 /// Returns the address space of the result. 5246 unsigned getDestAddressSpace() const { 5247 return getType()->getPointerAddressSpace(); 5248 } 5249 }; 5250 5251 /// A helper function that returns the pointer operand of a load or store 5252 /// instruction. Returns nullptr if not load or store. 5253 inline const Value *getLoadStorePointerOperand(const Value *V) { 5254 if (auto *Load = dyn_cast<LoadInst>(V)) 5255 return Load->getPointerOperand(); 5256 if (auto *Store = dyn_cast<StoreInst>(V)) 5257 return Store->getPointerOperand(); 5258 return nullptr; 5259 } 5260 inline Value *getLoadStorePointerOperand(Value *V) { 5261 return const_cast<Value *>( 5262 getLoadStorePointerOperand(static_cast<const Value *>(V))); 5263 } 5264 5265 /// A helper function that returns the pointer operand of a load, store 5266 /// or GEP instruction. Returns nullptr if not load, store, or GEP. 5267 inline const Value *getPointerOperand(const Value *V) { 5268 if (auto *Ptr = getLoadStorePointerOperand(V)) 5269 return Ptr; 5270 if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) 5271 return Gep->getPointerOperand(); 5272 return nullptr; 5273 } 5274 inline Value *getPointerOperand(Value *V) { 5275 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); 5276 } 5277 5278 /// A helper function that returns the alignment of load or store instruction. 5279 inline Align getLoadStoreAlignment(Value *I) { 5280 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5281 "Expected Load or Store instruction"); 5282 if (auto *LI = dyn_cast<LoadInst>(I)) 5283 return LI->getAlign(); 5284 return cast<StoreInst>(I)->getAlign(); 5285 } 5286 5287 /// A helper function that returns the address space of the pointer operand of 5288 /// load or store instruction. 5289 inline unsigned getLoadStoreAddressSpace(Value *I) { 5290 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5291 "Expected Load or Store instruction"); 5292 if (auto *LI = dyn_cast<LoadInst>(I)) 5293 return LI->getPointerAddressSpace(); 5294 return cast<StoreInst>(I)->getPointerAddressSpace(); 5295 } 5296 5297 /// A helper function that returns the type of a load or store instruction. 5298 inline Type *getLoadStoreType(Value *I) { 5299 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 5300 "Expected Load or Store instruction"); 5301 if (auto *LI = dyn_cast<LoadInst>(I)) 5302 return LI->getType(); 5303 return cast<StoreInst>(I)->getValueOperand()->getType(); 5304 } 5305 5306 //===----------------------------------------------------------------------===// 5307 // FreezeInst Class 5308 //===----------------------------------------------------------------------===// 5309 5310 /// This class represents a freeze function that returns random concrete 5311 /// value if an operand is either a poison value or an undef value 5312 class FreezeInst : public UnaryInstruction { 5313 protected: 5314 // Note: Instruction needs to be a friend here to call cloneImpl. 5315 friend class Instruction; 5316 5317 /// Clone an identical FreezeInst 5318 FreezeInst *cloneImpl() const; 5319 5320 public: 5321 explicit FreezeInst(Value *S, 5322 const Twine &NameStr = "", 5323 Instruction *InsertBefore = nullptr); 5324 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); 5325 5326 // Methods for support type inquiry through isa, cast, and dyn_cast: 5327 static inline bool classof(const Instruction *I) { 5328 return I->getOpcode() == Freeze; 5329 } 5330 static inline bool classof(const Value *V) { 5331 return isa<Instruction>(V) && classof(cast<Instruction>(V)); 5332 } 5333 }; 5334 5335 } // end namespace llvm 5336 5337 #endif // LLVM_IR_INSTRUCTIONS_H 5338