1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class.  This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/ADT/iterator.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/IR/CFG.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/InstrTypes.h"
30 #include "llvm/IR/Instruction.h"
31 #include "llvm/IR/OperandTraits.h"
32 #include "llvm/IR/Use.h"
33 #include "llvm/IR/User.h"
34 #include "llvm/Support/AtomicOrdering.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include <cassert>
37 #include <cstddef>
38 #include <cstdint>
39 #include <iterator>
40 #include <optional>
41 
42 namespace llvm {
43 
44 class APFloat;
45 class APInt;
46 class BasicBlock;
47 class ConstantInt;
48 class DataLayout;
49 class StringRef;
50 class Type;
51 class Value;
52 
53 //===----------------------------------------------------------------------===//
54 //                                AllocaInst Class
55 //===----------------------------------------------------------------------===//
56 
57 /// an instruction to allocate memory on the stack
58 class AllocaInst : public UnaryInstruction {
59   Type *AllocatedType;
60 
61   using AlignmentField = AlignmentBitfieldElementT<0>;
62   using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
63   using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
64   static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65                                         SwiftErrorField>(),
66                 "Bitfields must be contiguous");
67 
68 protected:
69   // Note: Instruction needs to be a friend here to call cloneImpl.
70   friend class Instruction;
71 
72   AllocaInst *cloneImpl() const;
73 
74 public:
75   explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76                       const Twine &Name, Instruction *InsertBefore);
77   AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78              const Twine &Name, BasicBlock *InsertAtEnd);
79 
80   AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81              Instruction *InsertBefore);
82   AllocaInst(Type *Ty, unsigned AddrSpace,
83              const Twine &Name, BasicBlock *InsertAtEnd);
84 
85   AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86              const Twine &Name = "", Instruction *InsertBefore = nullptr);
87   AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88              const Twine &Name, BasicBlock *InsertAtEnd);
89 
90   /// Return true if there is an allocation size parameter to the allocation
91   /// instruction that is not 1.
92   bool isArrayAllocation() const;
93 
94   /// Get the number of elements allocated. For a simple allocation of a single
95   /// element, this will return a constant 1 value.
getArraySize()96   const Value *getArraySize() const { return getOperand(0); }
getArraySize()97   Value *getArraySize() { return getOperand(0); }
98 
99   /// Overload to return most specific pointer type.
getType()100   PointerType *getType() const {
101     return cast<PointerType>(Instruction::getType());
102   }
103 
104   /// Return the address space for the allocation.
getAddressSpace()105   unsigned getAddressSpace() const {
106     return getType()->getAddressSpace();
107   }
108 
109   /// Get allocation size in bytes. Returns std::nullopt if size can't be
110   /// determined, e.g. in case of a VLA.
111   std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
112 
113   /// Get allocation size in bits. Returns std::nullopt if size can't be
114   /// determined, e.g. in case of a VLA.
115   std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116 
117   /// Return the type that is being allocated by the instruction.
getAllocatedType()118   Type *getAllocatedType() const { return AllocatedType; }
119   /// for use only in special circumstances that need to generically
120   /// transform a whole instruction (eg: IR linking and vectorization).
setAllocatedType(Type * Ty)121   void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122 
123   /// Return the alignment of the memory that is being allocated by the
124   /// instruction.
getAlign()125   Align getAlign() const {
126     return Align(1ULL << getSubclassData<AlignmentField>());
127   }
128 
setAlignment(Align Align)129   void setAlignment(Align Align) {
130     setSubclassData<AlignmentField>(Log2(Align));
131   }
132 
133   /// Return true if this alloca is in the entry block of the function and is a
134   /// constant size. If so, the code generator will fold it into the
135   /// prolog/epilog code, so it is basically free.
136   bool isStaticAlloca() const;
137 
138   /// Return true if this alloca is used as an inalloca argument to a call. Such
139   /// allocas are never considered static even if they are in the entry block.
isUsedWithInAlloca()140   bool isUsedWithInAlloca() const {
141     return getSubclassData<UsedWithInAllocaField>();
142   }
143 
144   /// Specify whether this alloca is used to represent the arguments to a call.
setUsedWithInAlloca(bool V)145   void setUsedWithInAlloca(bool V) {
146     setSubclassData<UsedWithInAllocaField>(V);
147   }
148 
149   /// Return true if this alloca is used as a swifterror argument to a call.
isSwiftError()150   bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
151   /// Specify whether this alloca is used to represent a swifterror.
setSwiftError(bool V)152   void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
153 
154   // Methods for support type inquiry through isa, cast, and dyn_cast:
classof(const Instruction * I)155   static bool classof(const Instruction *I) {
156     return (I->getOpcode() == Instruction::Alloca);
157   }
classof(const Value * V)158   static bool classof(const Value *V) {
159     return isa<Instruction>(V) && classof(cast<Instruction>(V));
160   }
161 
162 private:
163   // Shadow Instruction::setInstructionSubclassData with a private forwarding
164   // method so that subclasses cannot accidentally use it.
165   template <typename Bitfield>
setSubclassData(typename Bitfield::Type Value)166   void setSubclassData(typename Bitfield::Type Value) {
167     Instruction::setSubclassData<Bitfield>(Value);
168   }
169 };
170 
171 //===----------------------------------------------------------------------===//
172 //                                LoadInst Class
173 //===----------------------------------------------------------------------===//
174 
175 /// An instruction for reading from memory. This uses the SubclassData field in
176 /// Value to store whether or not the load is volatile.
177 class LoadInst : public UnaryInstruction {
178   using VolatileField = BoolBitfieldElementT<0>;
179   using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
180   using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
181   static_assert(
182       Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
183       "Bitfields must be contiguous");
184 
185   void AssertOK();
186 
187 protected:
188   // Note: Instruction needs to be a friend here to call cloneImpl.
189   friend class Instruction;
190 
191   LoadInst *cloneImpl() const;
192 
193 public:
194   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
195            Instruction *InsertBefore);
196   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
197   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198            Instruction *InsertBefore);
199   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200            BasicBlock *InsertAtEnd);
201   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202            Align Align, Instruction *InsertBefore = nullptr);
203   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204            Align Align, BasicBlock *InsertAtEnd);
205   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
206            Align Align, AtomicOrdering Order,
207            SyncScope::ID SSID = SyncScope::System,
208            Instruction *InsertBefore = nullptr);
209   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
210            Align Align, AtomicOrdering Order, SyncScope::ID SSID,
211            BasicBlock *InsertAtEnd);
212 
213   /// Return true if this is a load from a volatile memory location.
isVolatile()214   bool isVolatile() const { return getSubclassData<VolatileField>(); }
215 
216   /// Specify whether this is a volatile load or not.
setVolatile(bool V)217   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
218 
219   /// Return the alignment of the access that is being performed.
getAlign()220   Align getAlign() const {
221     return Align(1ULL << (getSubclassData<AlignmentField>()));
222   }
223 
setAlignment(Align Align)224   void setAlignment(Align Align) {
225     setSubclassData<AlignmentField>(Log2(Align));
226   }
227 
228   /// Returns the ordering constraint of this load instruction.
getOrdering()229   AtomicOrdering getOrdering() const {
230     return getSubclassData<OrderingField>();
231   }
232   /// Sets the ordering constraint of this load instruction.  May not be Release
233   /// or AcquireRelease.
setOrdering(AtomicOrdering Ordering)234   void setOrdering(AtomicOrdering Ordering) {
235     setSubclassData<OrderingField>(Ordering);
236   }
237 
238   /// Returns the synchronization scope ID of this load instruction.
getSyncScopeID()239   SyncScope::ID getSyncScopeID() const {
240     return SSID;
241   }
242 
243   /// Sets the synchronization scope ID of this load instruction.
setSyncScopeID(SyncScope::ID SSID)244   void setSyncScopeID(SyncScope::ID SSID) {
245     this->SSID = SSID;
246   }
247 
248   /// Sets the ordering constraint and the synchronization scope ID of this load
249   /// instruction.
250   void setAtomic(AtomicOrdering Ordering,
251                  SyncScope::ID SSID = SyncScope::System) {
252     setOrdering(Ordering);
253     setSyncScopeID(SSID);
254   }
255 
isSimple()256   bool isSimple() const { return !isAtomic() && !isVolatile(); }
257 
isUnordered()258   bool isUnordered() const {
259     return (getOrdering() == AtomicOrdering::NotAtomic ||
260             getOrdering() == AtomicOrdering::Unordered) &&
261            !isVolatile();
262   }
263 
getPointerOperand()264   Value *getPointerOperand() { return getOperand(0); }
getPointerOperand()265   const Value *getPointerOperand() const { return getOperand(0); }
getPointerOperandIndex()266   static unsigned getPointerOperandIndex() { return 0U; }
getPointerOperandType()267   Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
268 
269   /// Returns the address space of the pointer operand.
getPointerAddressSpace()270   unsigned getPointerAddressSpace() const {
271     return getPointerOperandType()->getPointerAddressSpace();
272   }
273 
274   // Methods for support type inquiry through isa, cast, and dyn_cast:
classof(const Instruction * I)275   static bool classof(const Instruction *I) {
276     return I->getOpcode() == Instruction::Load;
277   }
classof(const Value * V)278   static bool classof(const Value *V) {
279     return isa<Instruction>(V) && classof(cast<Instruction>(V));
280   }
281 
282 private:
283   // Shadow Instruction::setInstructionSubclassData with a private forwarding
284   // method so that subclasses cannot accidentally use it.
285   template <typename Bitfield>
setSubclassData(typename Bitfield::Type Value)286   void setSubclassData(typename Bitfield::Type Value) {
287     Instruction::setSubclassData<Bitfield>(Value);
288   }
289 
290   /// The synchronization scope ID of this load instruction.  Not quite enough
291   /// room in SubClassData for everything, so synchronization scope ID gets its
292   /// own field.
293   SyncScope::ID SSID;
294 };
295 
296 //===----------------------------------------------------------------------===//
297 //                                StoreInst Class
298 //===----------------------------------------------------------------------===//
299 
300 /// An instruction for storing to memory.
301 class StoreInst : public Instruction {
302   using VolatileField = BoolBitfieldElementT<0>;
303   using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
304   using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
305   static_assert(
306       Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
307       "Bitfields must be contiguous");
308 
309   void AssertOK();
310 
311 protected:
312   // Note: Instruction needs to be a friend here to call cloneImpl.
313   friend class Instruction;
314 
315   StoreInst *cloneImpl() const;
316 
317 public:
318   StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
319   StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
320   StoreInst(Value *Val, Value *Ptr, BasicBlock::iterator InsertBefore);
321   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
322   StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
323   StoreInst(Value *Val, Value *Ptr, bool isVolatile,
324             BasicBlock::iterator InsertBefore);
325   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326             Instruction *InsertBefore = nullptr);
327   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328             BasicBlock *InsertAtEnd);
329   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330             BasicBlock::iterator InsertBefore);
331   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332             AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
333             Instruction *InsertBefore = nullptr);
334   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
335             AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
336   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
337             AtomicOrdering Order, SyncScope::ID SSID,
338             BasicBlock::iterator InsertBefore);
339 
340   // allocate space for exactly two operands
new(size_t S)341   void *operator new(size_t S) { return User::operator new(S, 2); }
delete(void * Ptr)342   void operator delete(void *Ptr) { User::operator delete(Ptr); }
343 
344   /// Return true if this is a store to a volatile memory location.
isVolatile()345   bool isVolatile() const { return getSubclassData<VolatileField>(); }
346 
347   /// Specify whether this is a volatile store or not.
setVolatile(bool V)348   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
349 
350   /// Transparently provide more efficient getOperand methods.
351   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
352 
getAlign()353   Align getAlign() const {
354     return Align(1ULL << (getSubclassData<AlignmentField>()));
355   }
356 
setAlignment(Align Align)357   void setAlignment(Align Align) {
358     setSubclassData<AlignmentField>(Log2(Align));
359   }
360 
361   /// Returns the ordering constraint of this store instruction.
getOrdering()362   AtomicOrdering getOrdering() const {
363     return getSubclassData<OrderingField>();
364   }
365 
366   /// Sets the ordering constraint of this store instruction.  May not be
367   /// Acquire or AcquireRelease.
setOrdering(AtomicOrdering Ordering)368   void setOrdering(AtomicOrdering Ordering) {
369     setSubclassData<OrderingField>(Ordering);
370   }
371 
372   /// Returns the synchronization scope ID of this store instruction.
getSyncScopeID()373   SyncScope::ID getSyncScopeID() const {
374     return SSID;
375   }
376 
377   /// Sets the synchronization scope ID of this store instruction.
setSyncScopeID(SyncScope::ID SSID)378   void setSyncScopeID(SyncScope::ID SSID) {
379     this->SSID = SSID;
380   }
381 
382   /// Sets the ordering constraint and the synchronization scope ID of this
383   /// store instruction.
384   void setAtomic(AtomicOrdering Ordering,
385                  SyncScope::ID SSID = SyncScope::System) {
386     setOrdering(Ordering);
387     setSyncScopeID(SSID);
388   }
389 
isSimple()390   bool isSimple() const { return !isAtomic() && !isVolatile(); }
391 
isUnordered()392   bool isUnordered() const {
393     return (getOrdering() == AtomicOrdering::NotAtomic ||
394             getOrdering() == AtomicOrdering::Unordered) &&
395            !isVolatile();
396   }
397 
getValueOperand()398   Value *getValueOperand() { return getOperand(0); }
getValueOperand()399   const Value *getValueOperand() const { return getOperand(0); }
400 
getPointerOperand()401   Value *getPointerOperand() { return getOperand(1); }
getPointerOperand()402   const Value *getPointerOperand() const { return getOperand(1); }
getPointerOperandIndex()403   static unsigned getPointerOperandIndex() { return 1U; }
getPointerOperandType()404   Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405 
406   /// Returns the address space of the pointer operand.
getPointerAddressSpace()407   unsigned getPointerAddressSpace() const {
408     return getPointerOperandType()->getPointerAddressSpace();
409   }
410 
411   // Methods for support type inquiry through isa, cast, and dyn_cast:
classof(const Instruction * I)412   static bool classof(const Instruction *I) {
413     return I->getOpcode() == Instruction::Store;
414   }
classof(const Value * V)415   static bool classof(const Value *V) {
416     return isa<Instruction>(V) && classof(cast<Instruction>(V));
417   }
418 
419 private:
420   // Shadow Instruction::setInstructionSubclassData with a private forwarding
421   // method so that subclasses cannot accidentally use it.
422   template <typename Bitfield>
setSubclassData(typename Bitfield::Type Value)423   void setSubclassData(typename Bitfield::Type Value) {
424     Instruction::setSubclassData<Bitfield>(Value);
425   }
426 
427   /// The synchronization scope ID of this store instruction.  Not quite enough
428   /// room in SubClassData for everything, so synchronization scope ID gets its
429   /// own field.
430   SyncScope::ID SSID;
431 };
432 
433 template <>
434 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435 };
436 
437 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
438 
439 //===----------------------------------------------------------------------===//
440 //                                FenceInst Class
441 //===----------------------------------------------------------------------===//
442 
443 /// An instruction for ordering other memory operations.
444 class FenceInst : public Instruction {
445   using OrderingField = AtomicOrderingBitfieldElementT<0>;
446 
447   void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448 
449 protected:
450   // Note: Instruction needs to be a friend here to call cloneImpl.
451   friend class Instruction;
452 
453   FenceInst *cloneImpl() const;
454 
455 public:
456   // Ordering may only be Acquire, Release, AcquireRelease, or
457   // SequentiallyConsistent.
458   FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459             SyncScope::ID SSID = SyncScope::System,
460             Instruction *InsertBefore = nullptr);
461   FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462             BasicBlock *InsertAtEnd);
463 
464   // allocate space for exactly zero operands
465   void *operator new(size_t S) { return User::operator new(S, 0); }
466   void operator delete(void *Ptr) { User::operator delete(Ptr); }
467 
468   /// Returns the ordering constraint of this fence instruction.
469   AtomicOrdering getOrdering() const {
470     return getSubclassData<OrderingField>();
471   }
472 
473   /// Sets the ordering constraint of this fence instruction.  May only be
474   /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475   void setOrdering(AtomicOrdering Ordering) {
476     setSubclassData<OrderingField>(Ordering);
477   }
478 
479   /// Returns the synchronization scope ID of this fence instruction.
480   SyncScope::ID getSyncScopeID() const {
481     return SSID;
482   }
483 
484   /// Sets the synchronization scope ID of this fence instruction.
485   void setSyncScopeID(SyncScope::ID SSID) {
486     this->SSID = SSID;
487   }
488 
489   // Methods for support type inquiry through isa, cast, and dyn_cast:
490   static bool classof(const Instruction *I) {
491     return I->getOpcode() == Instruction::Fence;
492   }
493   static bool classof(const Value *V) {
494     return isa<Instruction>(V) && classof(cast<Instruction>(V));
495   }
496 
497 private:
498   // Shadow Instruction::setInstructionSubclassData with a private forwarding
499   // method so that subclasses cannot accidentally use it.
500   template <typename Bitfield>
501   void setSubclassData(typename Bitfield::Type Value) {
502     Instruction::setSubclassData<Bitfield>(Value);
503   }
504 
505   /// The synchronization scope ID of this fence instruction.  Not quite enough
506   /// room in SubClassData for everything, so synchronization scope ID gets its
507   /// own field.
508   SyncScope::ID SSID;
509 };
510 
511 //===----------------------------------------------------------------------===//
512 //                                AtomicCmpXchgInst Class
513 //===----------------------------------------------------------------------===//
514 
515 /// An instruction that atomically checks whether a
516 /// specified value is in a memory location, and, if it is, stores a new value
517 /// there. The value returned by this instruction is a pair containing the
518 /// original value as first element, and an i1 indicating success (true) or
519 /// failure (false) as second element.
520 ///
521 class AtomicCmpXchgInst : public Instruction {
522   void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523             AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524             SyncScope::ID SSID);
525 
526   template <unsigned Offset>
527   using AtomicOrderingBitfieldElement =
528       typename Bitfield::Element<AtomicOrdering, Offset, 3,
529                                  AtomicOrdering::LAST>;
530 
531 protected:
532   // Note: Instruction needs to be a friend here to call cloneImpl.
533   friend class Instruction;
534 
535   AtomicCmpXchgInst *cloneImpl() const;
536 
537 public:
538   AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539                     AtomicOrdering SuccessOrdering,
540                     AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541                     Instruction *InsertBefore = nullptr);
542   AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543                     AtomicOrdering SuccessOrdering,
544                     AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545                     BasicBlock *InsertAtEnd);
546 
547   // allocate space for exactly three operands
548   void *operator new(size_t S) { return User::operator new(S, 3); }
549   void operator delete(void *Ptr) { User::operator delete(Ptr); }
550 
551   using VolatileField = BoolBitfieldElementT<0>;
552   using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
553   using SuccessOrderingField =
554       AtomicOrderingBitfieldElementT<WeakField::NextBit>;
555   using FailureOrderingField =
556       AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
557   using AlignmentField =
558       AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
559   static_assert(
560       Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
561                               FailureOrderingField, AlignmentField>(),
562       "Bitfields must be contiguous");
563 
564   /// Return the alignment of the memory that is being allocated by the
565   /// instruction.
566   Align getAlign() const {
567     return Align(1ULL << getSubclassData<AlignmentField>());
568   }
569 
570   void setAlignment(Align Align) {
571     setSubclassData<AlignmentField>(Log2(Align));
572   }
573 
574   /// Return true if this is a cmpxchg from a volatile memory
575   /// location.
576   ///
577   bool isVolatile() const { return getSubclassData<VolatileField>(); }
578 
579   /// Specify whether this is a volatile cmpxchg.
580   ///
581   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582 
583   /// Return true if this cmpxchg may spuriously fail.
584   bool isWeak() const { return getSubclassData<WeakField>(); }
585 
586   void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587 
588   /// Transparently provide more efficient getOperand methods.
589   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
590 
591   static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592     return Ordering != AtomicOrdering::NotAtomic &&
593            Ordering != AtomicOrdering::Unordered;
594   }
595 
596   static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597     return Ordering != AtomicOrdering::NotAtomic &&
598            Ordering != AtomicOrdering::Unordered &&
599            Ordering != AtomicOrdering::AcquireRelease &&
600            Ordering != AtomicOrdering::Release;
601   }
602 
603   /// Returns the success ordering constraint of this cmpxchg instruction.
604   AtomicOrdering getSuccessOrdering() const {
605     return getSubclassData<SuccessOrderingField>();
606   }
607 
608   /// Sets the success ordering constraint of this cmpxchg instruction.
609   void setSuccessOrdering(AtomicOrdering Ordering) {
610     assert(isValidSuccessOrdering(Ordering) &&
611            "invalid CmpXchg success ordering");
612     setSubclassData<SuccessOrderingField>(Ordering);
613   }
614 
615   /// Returns the failure ordering constraint of this cmpxchg instruction.
616   AtomicOrdering getFailureOrdering() const {
617     return getSubclassData<FailureOrderingField>();
618   }
619 
620   /// Sets the failure ordering constraint of this cmpxchg instruction.
621   void setFailureOrdering(AtomicOrdering Ordering) {
622     assert(isValidFailureOrdering(Ordering) &&
623            "invalid CmpXchg failure ordering");
624     setSubclassData<FailureOrderingField>(Ordering);
625   }
626 
627   /// Returns a single ordering which is at least as strong as both the
628   /// success and failure orderings for this cmpxchg.
629   AtomicOrdering getMergedOrdering() const {
630     if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
631       return AtomicOrdering::SequentiallyConsistent;
632     if (getFailureOrdering() == AtomicOrdering::Acquire) {
633       if (getSuccessOrdering() == AtomicOrdering::Monotonic)
634         return AtomicOrdering::Acquire;
635       if (getSuccessOrdering() == AtomicOrdering::Release)
636         return AtomicOrdering::AcquireRelease;
637     }
638     return getSuccessOrdering();
639   }
640 
641   /// Returns the synchronization scope ID of this cmpxchg instruction.
642   SyncScope::ID getSyncScopeID() const {
643     return SSID;
644   }
645 
646   /// Sets the synchronization scope ID of this cmpxchg instruction.
647   void setSyncScopeID(SyncScope::ID SSID) {
648     this->SSID = SSID;
649   }
650 
651   Value *getPointerOperand() { return getOperand(0); }
652   const Value *getPointerOperand() const { return getOperand(0); }
653   static unsigned getPointerOperandIndex() { return 0U; }
654 
655   Value *getCompareOperand() { return getOperand(1); }
656   const Value *getCompareOperand() const { return getOperand(1); }
657 
658   Value *getNewValOperand() { return getOperand(2); }
659   const Value *getNewValOperand() const { return getOperand(2); }
660 
661   /// Returns the address space of the pointer operand.
662   unsigned getPointerAddressSpace() const {
663     return getPointerOperand()->getType()->getPointerAddressSpace();
664   }
665 
666   /// Returns the strongest permitted ordering on failure, given the
667   /// desired ordering on success.
668   ///
669   /// If the comparison in a cmpxchg operation fails, there is no atomic store
670   /// so release semantics cannot be provided. So this function drops explicit
671   /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672   /// operation would remain SequentiallyConsistent.
673   static AtomicOrdering
674   getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
675     switch (SuccessOrdering) {
676     default:
677       llvm_unreachable("invalid cmpxchg success ordering");
678     case AtomicOrdering::Release:
679     case AtomicOrdering::Monotonic:
680       return AtomicOrdering::Monotonic;
681     case AtomicOrdering::AcquireRelease:
682     case AtomicOrdering::Acquire:
683       return AtomicOrdering::Acquire;
684     case AtomicOrdering::SequentiallyConsistent:
685       return AtomicOrdering::SequentiallyConsistent;
686     }
687   }
688 
689   // Methods for support type inquiry through isa, cast, and dyn_cast:
690   static bool classof(const Instruction *I) {
691     return I->getOpcode() == Instruction::AtomicCmpXchg;
692   }
693   static bool classof(const Value *V) {
694     return isa<Instruction>(V) && classof(cast<Instruction>(V));
695   }
696 
697 private:
698   // Shadow Instruction::setInstructionSubclassData with a private forwarding
699   // method so that subclasses cannot accidentally use it.
700   template <typename Bitfield>
701   void setSubclassData(typename Bitfield::Type Value) {
702     Instruction::setSubclassData<Bitfield>(Value);
703   }
704 
705   /// The synchronization scope ID of this cmpxchg instruction.  Not quite
706   /// enough room in SubClassData for everything, so synchronization scope ID
707   /// gets its own field.
708   SyncScope::ID SSID;
709 };
710 
711 template <>
712 struct OperandTraits<AtomicCmpXchgInst> :
713     public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714 };
715 
716 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
717 
718 //===----------------------------------------------------------------------===//
719 //                                AtomicRMWInst Class
720 //===----------------------------------------------------------------------===//
721 
722 /// an instruction that atomically reads a memory location,
723 /// combines it with another value, and then stores the result back.  Returns
724 /// the old value.
725 ///
726 class AtomicRMWInst : public Instruction {
727 protected:
728   // Note: Instruction needs to be a friend here to call cloneImpl.
729   friend class Instruction;
730 
731   AtomicRMWInst *cloneImpl() const;
732 
733 public:
734   /// This enumeration lists the possible modifications atomicrmw can make.  In
735   /// the descriptions, 'p' is the pointer to the instruction's memory location,
736   /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737   /// instruction.  These instructions always return 'old'.
738   enum BinOp : unsigned {
739     /// *p = v
740     Xchg,
741     /// *p = old + v
742     Add,
743     /// *p = old - v
744     Sub,
745     /// *p = old & v
746     And,
747     /// *p = ~(old & v)
748     Nand,
749     /// *p = old | v
750     Or,
751     /// *p = old ^ v
752     Xor,
753     /// *p = old >signed v ? old : v
754     Max,
755     /// *p = old <signed v ? old : v
756     Min,
757     /// *p = old >unsigned v ? old : v
758     UMax,
759     /// *p = old <unsigned v ? old : v
760     UMin,
761 
762     /// *p = old + v
763     FAdd,
764 
765     /// *p = old - v
766     FSub,
767 
768     /// *p = maxnum(old, v)
769     /// \p maxnum matches the behavior of \p llvm.maxnum.*.
770     FMax,
771 
772     /// *p = minnum(old, v)
773     /// \p minnum matches the behavior of \p llvm.minnum.*.
774     FMin,
775 
776     /// Increment one up to a maximum value.
777     /// *p = (old u>= v) ? 0 : (old + 1)
778     UIncWrap,
779 
780     /// Decrement one until a minimum value or zero.
781     /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
782     UDecWrap,
783 
784     FIRST_BINOP = Xchg,
785     LAST_BINOP = UDecWrap,
786     BAD_BINOP
787   };
788 
789 private:
790   template <unsigned Offset>
791   using AtomicOrderingBitfieldElement =
792       typename Bitfield::Element<AtomicOrdering, Offset, 3,
793                                  AtomicOrdering::LAST>;
794 
795   template <unsigned Offset>
796   using BinOpBitfieldElement =
797       typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>;
798 
799 public:
800   AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
801                 AtomicOrdering Ordering, SyncScope::ID SSID,
802                 Instruction *InsertBefore = nullptr);
803   AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
804                 AtomicOrdering Ordering, SyncScope::ID SSID,
805                 BasicBlock *InsertAtEnd);
806 
807   // allocate space for exactly two operands
808   void *operator new(size_t S) { return User::operator new(S, 2); }
809   void operator delete(void *Ptr) { User::operator delete(Ptr); }
810 
811   using VolatileField = BoolBitfieldElementT<0>;
812   using AtomicOrderingField =
813       AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
814   using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
815   using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
816   static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
817                                         OperationField, AlignmentField>(),
818                 "Bitfields must be contiguous");
819 
820   BinOp getOperation() const { return getSubclassData<OperationField>(); }
821 
822   static StringRef getOperationName(BinOp Op);
823 
824   static bool isFPOperation(BinOp Op) {
825     switch (Op) {
826     case AtomicRMWInst::FAdd:
827     case AtomicRMWInst::FSub:
828     case AtomicRMWInst::FMax:
829     case AtomicRMWInst::FMin:
830       return true;
831     default:
832       return false;
833     }
834   }
835 
836   void setOperation(BinOp Operation) {
837     setSubclassData<OperationField>(Operation);
838   }
839 
840   /// Return the alignment of the memory that is being allocated by the
841   /// instruction.
842   Align getAlign() const {
843     return Align(1ULL << getSubclassData<AlignmentField>());
844   }
845 
846   void setAlignment(Align Align) {
847     setSubclassData<AlignmentField>(Log2(Align));
848   }
849 
850   /// Return true if this is a RMW on a volatile memory location.
851   ///
852   bool isVolatile() const { return getSubclassData<VolatileField>(); }
853 
854   /// Specify whether this is a volatile RMW or not.
855   ///
856   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
857 
858   /// Transparently provide more efficient getOperand methods.
859   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
860 
861   /// Returns the ordering constraint of this rmw instruction.
862   AtomicOrdering getOrdering() const {
863     return getSubclassData<AtomicOrderingField>();
864   }
865 
866   /// Sets the ordering constraint of this rmw instruction.
867   void setOrdering(AtomicOrdering Ordering) {
868     assert(Ordering != AtomicOrdering::NotAtomic &&
869            "atomicrmw instructions can only be atomic.");
870     assert(Ordering != AtomicOrdering::Unordered &&
871            "atomicrmw instructions cannot be unordered.");
872     setSubclassData<AtomicOrderingField>(Ordering);
873   }
874 
875   /// Returns the synchronization scope ID of this rmw instruction.
876   SyncScope::ID getSyncScopeID() const {
877     return SSID;
878   }
879 
880   /// Sets the synchronization scope ID of this rmw instruction.
881   void setSyncScopeID(SyncScope::ID SSID) {
882     this->SSID = SSID;
883   }
884 
885   Value *getPointerOperand() { return getOperand(0); }
886   const Value *getPointerOperand() const { return getOperand(0); }
887   static unsigned getPointerOperandIndex() { return 0U; }
888 
889   Value *getValOperand() { return getOperand(1); }
890   const Value *getValOperand() const { return getOperand(1); }
891 
892   /// Returns the address space of the pointer operand.
893   unsigned getPointerAddressSpace() const {
894     return getPointerOperand()->getType()->getPointerAddressSpace();
895   }
896 
897   bool isFloatingPointOperation() const {
898     return isFPOperation(getOperation());
899   }
900 
901   // Methods for support type inquiry through isa, cast, and dyn_cast:
902   static bool classof(const Instruction *I) {
903     return I->getOpcode() == Instruction::AtomicRMW;
904   }
905   static bool classof(const Value *V) {
906     return isa<Instruction>(V) && classof(cast<Instruction>(V));
907   }
908 
909 private:
910   void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
911             AtomicOrdering Ordering, SyncScope::ID SSID);
912 
913   // Shadow Instruction::setInstructionSubclassData with a private forwarding
914   // method so that subclasses cannot accidentally use it.
915   template <typename Bitfield>
916   void setSubclassData(typename Bitfield::Type Value) {
917     Instruction::setSubclassData<Bitfield>(Value);
918   }
919 
920   /// The synchronization scope ID of this rmw instruction.  Not quite enough
921   /// room in SubClassData for everything, so synchronization scope ID gets its
922   /// own field.
923   SyncScope::ID SSID;
924 };
925 
926 template <>
927 struct OperandTraits<AtomicRMWInst>
928     : public FixedNumOperandTraits<AtomicRMWInst,2> {
929 };
930 
931 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
932 
933 //===----------------------------------------------------------------------===//
934 //                             GetElementPtrInst Class
935 //===----------------------------------------------------------------------===//
936 
937 // checkGEPType - Simple wrapper function to give a better assertion failure
938 // message on bad indexes for a gep instruction.
939 //
940 inline Type *checkGEPType(Type *Ty) {
941   assert(Ty && "Invalid GetElementPtrInst indices for type!");
942   return Ty;
943 }
944 
945 /// an instruction for type-safe pointer arithmetic to
946 /// access elements of arrays and structs
947 ///
948 class GetElementPtrInst : public Instruction {
949   Type *SourceElementType;
950   Type *ResultElementType;
951 
952   GetElementPtrInst(const GetElementPtrInst &GEPI);
953 
954   /// Constructors - Create a getelementptr instruction with a base pointer an
955   /// list of indices. The first ctor can optionally insert before an existing
956   /// instruction, the second appends the new instruction to the specified
957   /// BasicBlock.
958   inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
959                            ArrayRef<Value *> IdxList, unsigned Values,
960                            const Twine &NameStr, Instruction *InsertBefore);
961   inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
962                            ArrayRef<Value *> IdxList, unsigned Values,
963                            const Twine &NameStr, BasicBlock *InsertAtEnd);
964 
965   void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
966 
967 protected:
968   // Note: Instruction needs to be a friend here to call cloneImpl.
969   friend class Instruction;
970 
971   GetElementPtrInst *cloneImpl() const;
972 
973 public:
974   static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
975                                    ArrayRef<Value *> IdxList,
976                                    const Twine &NameStr = "",
977                                    Instruction *InsertBefore = nullptr) {
978     unsigned Values = 1 + unsigned(IdxList.size());
979     assert(PointeeType && "Must specify element type");
980     return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
981                                           NameStr, InsertBefore);
982   }
983 
984   static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
985                                    ArrayRef<Value *> IdxList,
986                                    const Twine &NameStr,
987                                    BasicBlock *InsertAtEnd) {
988     unsigned Values = 1 + unsigned(IdxList.size());
989     assert(PointeeType && "Must specify element type");
990     return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
991                                           NameStr, InsertAtEnd);
992   }
993 
994   /// Create an "inbounds" getelementptr. See the documentation for the
995   /// "inbounds" flag in LangRef.html for details.
996   static GetElementPtrInst *
997   CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
998                  const Twine &NameStr = "",
999                  Instruction *InsertBefore = nullptr) {
1000     GetElementPtrInst *GEP =
1001         Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1002     GEP->setIsInBounds(true);
1003     return GEP;
1004   }
1005 
1006   static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1007                                            ArrayRef<Value *> IdxList,
1008                                            const Twine &NameStr,
1009                                            BasicBlock *InsertAtEnd) {
1010     GetElementPtrInst *GEP =
1011         Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1012     GEP->setIsInBounds(true);
1013     return GEP;
1014   }
1015 
1016   /// Transparently provide more efficient getOperand methods.
1017   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1018 
1019   Type *getSourceElementType() const { return SourceElementType; }
1020 
1021   void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1022   void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1023 
1024   Type *getResultElementType() const {
1025     return ResultElementType;
1026   }
1027 
1028   /// Returns the address space of this instruction's pointer type.
1029   unsigned getAddressSpace() const {
1030     // Note that this is always the same as the pointer operand's address space
1031     // and that is cheaper to compute, so cheat here.
1032     return getPointerAddressSpace();
1033   }
1034 
1035   /// Returns the result type of a getelementptr with the given source
1036   /// element type and indexes.
1037   ///
1038   /// Null is returned if the indices are invalid for the specified
1039   /// source element type.
1040   static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1041   static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1042   static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1043 
1044   /// Return the type of the element at the given index of an indexable
1045   /// type.  This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1046   ///
1047   /// Returns null if the type can't be indexed, or the given index is not
1048   /// legal for the given type.
1049   static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1050   static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1051 
1052   inline op_iterator       idx_begin()       { return op_begin()+1; }
1053   inline const_op_iterator idx_begin() const { return op_begin()+1; }
1054   inline op_iterator       idx_end()         { return op_end(); }
1055   inline const_op_iterator idx_end()   const { return op_end(); }
1056 
1057   inline iterator_range<op_iterator> indices() {
1058     return make_range(idx_begin(), idx_end());
1059   }
1060 
1061   inline iterator_range<const_op_iterator> indices() const {
1062     return make_range(idx_begin(), idx_end());
1063   }
1064 
1065   Value *getPointerOperand() {
1066     return getOperand(0);
1067   }
1068   const Value *getPointerOperand() const {
1069     return getOperand(0);
1070   }
1071   static unsigned getPointerOperandIndex() {
1072     return 0U;    // get index for modifying correct operand.
1073   }
1074 
1075   /// Method to return the pointer operand as a
1076   /// PointerType.
1077   Type *getPointerOperandType() const {
1078     return getPointerOperand()->getType();
1079   }
1080 
1081   /// Returns the address space of the pointer operand.
1082   unsigned getPointerAddressSpace() const {
1083     return getPointerOperandType()->getPointerAddressSpace();
1084   }
1085 
1086   /// Returns the pointer type returned by the GEP
1087   /// instruction, which may be a vector of pointers.
1088   static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
1089     // Vector GEP
1090     Type *Ty = Ptr->getType();
1091     if (Ty->isVectorTy())
1092       return Ty;
1093 
1094     for (Value *Index : IdxList)
1095       if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1096         ElementCount EltCount = IndexVTy->getElementCount();
1097         return VectorType::get(Ty, EltCount);
1098       }
1099     // Scalar GEP
1100     return Ty;
1101   }
1102 
1103   unsigned getNumIndices() const {  // Note: always non-negative
1104     return getNumOperands() - 1;
1105   }
1106 
1107   bool hasIndices() const {
1108     return getNumOperands() > 1;
1109   }
1110 
1111   /// Return true if all of the indices of this GEP are
1112   /// zeros.  If so, the result pointer and the first operand have the same
1113   /// value, just potentially different types.
1114   bool hasAllZeroIndices() const;
1115 
1116   /// Return true if all of the indices of this GEP are
1117   /// constant integers.  If so, the result pointer and the first operand have
1118   /// a constant offset between them.
1119   bool hasAllConstantIndices() const;
1120 
1121   /// Set or clear the inbounds flag on this GEP instruction.
1122   /// See LangRef.html for the meaning of inbounds on a getelementptr.
1123   void setIsInBounds(bool b = true);
1124 
1125   /// Determine whether the GEP has the inbounds flag.
1126   bool isInBounds() const;
1127 
1128   /// Accumulate the constant address offset of this GEP if possible.
1129   ///
1130   /// This routine accepts an APInt into which it will accumulate the constant
1131   /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1132   /// all-constant, it returns false and the value of the offset APInt is
1133   /// undefined (it is *not* preserved!). The APInt passed into this routine
1134   /// must be at least as wide as the IntPtr type for the address space of
1135   /// the base GEP pointer.
1136   bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1137   bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1138                      MapVector<Value *, APInt> &VariableOffsets,
1139                      APInt &ConstantOffset) const;
1140   // Methods for support type inquiry through isa, cast, and dyn_cast:
1141   static bool classof(const Instruction *I) {
1142     return (I->getOpcode() == Instruction::GetElementPtr);
1143   }
1144   static bool classof(const Value *V) {
1145     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1146   }
1147 };
1148 
1149 template <>
1150 struct OperandTraits<GetElementPtrInst> :
1151   public VariadicOperandTraits<GetElementPtrInst, 1> {
1152 };
1153 
1154 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1155                                      ArrayRef<Value *> IdxList, unsigned Values,
1156                                      const Twine &NameStr,
1157                                      Instruction *InsertBefore)
1158     : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1159                   OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1160                   Values, InsertBefore),
1161       SourceElementType(PointeeType),
1162       ResultElementType(getIndexedType(PointeeType, IdxList)) {
1163   init(Ptr, IdxList, NameStr);
1164 }
1165 
1166 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167                                      ArrayRef<Value *> IdxList, unsigned Values,
1168                                      const Twine &NameStr,
1169                                      BasicBlock *InsertAtEnd)
1170     : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1171                   OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172                   Values, InsertAtEnd),
1173       SourceElementType(PointeeType),
1174       ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175   init(Ptr, IdxList, NameStr);
1176 }
1177 
1178 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1179 
1180 //===----------------------------------------------------------------------===//
1181 //                               ICmpInst Class
1182 //===----------------------------------------------------------------------===//
1183 
1184 /// This instruction compares its operands according to the predicate given
1185 /// to the constructor. It only operates on integers or pointers. The operands
1186 /// must be identical types.
1187 /// Represent an integer comparison operator.
1188 class ICmpInst: public CmpInst {
1189   void AssertOK() {
1190     assert(isIntPredicate() &&
1191            "Invalid ICmp predicate value");
1192     assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1193           "Both operands to ICmp instruction are not of the same type!");
1194     // Check that the operands are the right type
1195     assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1196             getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1197            "Invalid operand types for ICmp instruction");
1198   }
1199 
1200 protected:
1201   // Note: Instruction needs to be a friend here to call cloneImpl.
1202   friend class Instruction;
1203 
1204   /// Clone an identical ICmpInst
1205   ICmpInst *cloneImpl() const;
1206 
1207 public:
1208   /// Constructor with insert-before-instruction semantics.
1209   ICmpInst(
1210     Instruction *InsertBefore,  ///< Where to insert
1211     Predicate pred,  ///< The predicate to use for the comparison
1212     Value *LHS,      ///< The left-hand-side of the expression
1213     Value *RHS,      ///< The right-hand-side of the expression
1214     const Twine &NameStr = ""  ///< Name of the instruction
1215   ) : CmpInst(makeCmpResultType(LHS->getType()),
1216               Instruction::ICmp, pred, LHS, RHS, NameStr,
1217               InsertBefore) {
1218 #ifndef NDEBUG
1219   AssertOK();
1220 #endif
1221   }
1222 
1223   /// Constructor with insert-at-end semantics.
1224   ICmpInst(
1225     BasicBlock &InsertAtEnd, ///< Block to insert into.
1226     Predicate pred,  ///< The predicate to use for the comparison
1227     Value *LHS,      ///< The left-hand-side of the expression
1228     Value *RHS,      ///< The right-hand-side of the expression
1229     const Twine &NameStr = ""  ///< Name of the instruction
1230   ) : CmpInst(makeCmpResultType(LHS->getType()),
1231               Instruction::ICmp, pred, LHS, RHS, NameStr,
1232               &InsertAtEnd) {
1233 #ifndef NDEBUG
1234   AssertOK();
1235 #endif
1236   }
1237 
1238   /// Constructor with no-insertion semantics
1239   ICmpInst(
1240     Predicate pred, ///< The predicate to use for the comparison
1241     Value *LHS,     ///< The left-hand-side of the expression
1242     Value *RHS,     ///< The right-hand-side of the expression
1243     const Twine &NameStr = "" ///< Name of the instruction
1244   ) : CmpInst(makeCmpResultType(LHS->getType()),
1245               Instruction::ICmp, pred, LHS, RHS, NameStr) {
1246 #ifndef NDEBUG
1247   AssertOK();
1248 #endif
1249   }
1250 
1251   /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1252   /// @returns the predicate that would be the result if the operand were
1253   /// regarded as signed.
1254   /// Return the signed version of the predicate
1255   Predicate getSignedPredicate() const {
1256     return getSignedPredicate(getPredicate());
1257   }
1258 
1259   /// This is a static version that you can use without an instruction.
1260   /// Return the signed version of the predicate.
1261   static Predicate getSignedPredicate(Predicate pred);
1262 
1263   /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1264   /// @returns the predicate that would be the result if the operand were
1265   /// regarded as unsigned.
1266   /// Return the unsigned version of the predicate
1267   Predicate getUnsignedPredicate() const {
1268     return getUnsignedPredicate(getPredicate());
1269   }
1270 
1271   /// This is a static version that you can use without an instruction.
1272   /// Return the unsigned version of the predicate.
1273   static Predicate getUnsignedPredicate(Predicate pred);
1274 
1275   /// Return true if this predicate is either EQ or NE.  This also
1276   /// tests for commutativity.
1277   static bool isEquality(Predicate P) {
1278     return P == ICMP_EQ || P == ICMP_NE;
1279   }
1280 
1281   /// Return true if this predicate is either EQ or NE.  This also
1282   /// tests for commutativity.
1283   bool isEquality() const {
1284     return isEquality(getPredicate());
1285   }
1286 
1287   /// @returns true if the predicate of this ICmpInst is commutative
1288   /// Determine if this relation is commutative.
1289   bool isCommutative() const { return isEquality(); }
1290 
1291   /// Return true if the predicate is relational (not EQ or NE).
1292   ///
1293   bool isRelational() const {
1294     return !isEquality();
1295   }
1296 
1297   /// Return true if the predicate is relational (not EQ or NE).
1298   ///
1299   static bool isRelational(Predicate P) {
1300     return !isEquality(P);
1301   }
1302 
1303   /// Return true if the predicate is SGT or UGT.
1304   ///
1305   static bool isGT(Predicate P) {
1306     return P == ICMP_SGT || P == ICMP_UGT;
1307   }
1308 
1309   /// Return true if the predicate is SLT or ULT.
1310   ///
1311   static bool isLT(Predicate P) {
1312     return P == ICMP_SLT || P == ICMP_ULT;
1313   }
1314 
1315   /// Return true if the predicate is SGE or UGE.
1316   ///
1317   static bool isGE(Predicate P) {
1318     return P == ICMP_SGE || P == ICMP_UGE;
1319   }
1320 
1321   /// Return true if the predicate is SLE or ULE.
1322   ///
1323   static bool isLE(Predicate P) {
1324     return P == ICMP_SLE || P == ICMP_ULE;
1325   }
1326 
1327   /// Returns the sequence of all ICmp predicates.
1328   ///
1329   static auto predicates() { return ICmpPredicates(); }
1330 
1331   /// Exchange the two operands to this instruction in such a way that it does
1332   /// not modify the semantics of the instruction. The predicate value may be
1333   /// changed to retain the same result if the predicate is order dependent
1334   /// (e.g. ult).
1335   /// Swap operands and adjust predicate.
1336   void swapOperands() {
1337     setPredicate(getSwappedPredicate());
1338     Op<0>().swap(Op<1>());
1339   }
1340 
1341   /// Return result of `LHS Pred RHS` comparison.
1342   static bool compare(const APInt &LHS, const APInt &RHS,
1343                       ICmpInst::Predicate Pred);
1344 
1345   // Methods for support type inquiry through isa, cast, and dyn_cast:
1346   static bool classof(const Instruction *I) {
1347     return I->getOpcode() == Instruction::ICmp;
1348   }
1349   static bool classof(const Value *V) {
1350     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1351   }
1352 };
1353 
1354 //===----------------------------------------------------------------------===//
1355 //                               FCmpInst Class
1356 //===----------------------------------------------------------------------===//
1357 
1358 /// This instruction compares its operands according to the predicate given
1359 /// to the constructor. It only operates on floating point values or packed
1360 /// vectors of floating point values. The operands must be identical types.
1361 /// Represents a floating point comparison operator.
1362 class FCmpInst: public CmpInst {
1363   void AssertOK() {
1364     assert(isFPPredicate() && "Invalid FCmp predicate value");
1365     assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1366            "Both operands to FCmp instruction are not of the same type!");
1367     // Check that the operands are the right type
1368     assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1369            "Invalid operand types for FCmp instruction");
1370   }
1371 
1372 protected:
1373   // Note: Instruction needs to be a friend here to call cloneImpl.
1374   friend class Instruction;
1375 
1376   /// Clone an identical FCmpInst
1377   FCmpInst *cloneImpl() const;
1378 
1379 public:
1380   /// Constructor with insert-before-instruction semantics.
1381   FCmpInst(
1382     Instruction *InsertBefore, ///< Where to insert
1383     Predicate pred,  ///< The predicate to use for the comparison
1384     Value *LHS,      ///< The left-hand-side of the expression
1385     Value *RHS,      ///< The right-hand-side of the expression
1386     const Twine &NameStr = ""  ///< Name of the instruction
1387   ) : CmpInst(makeCmpResultType(LHS->getType()),
1388               Instruction::FCmp, pred, LHS, RHS, NameStr,
1389               InsertBefore) {
1390     AssertOK();
1391   }
1392 
1393   /// Constructor with insert-at-end semantics.
1394   FCmpInst(
1395     BasicBlock &InsertAtEnd, ///< Block to insert into.
1396     Predicate pred,  ///< The predicate to use for the comparison
1397     Value *LHS,      ///< The left-hand-side of the expression
1398     Value *RHS,      ///< The right-hand-side of the expression
1399     const Twine &NameStr = ""  ///< Name of the instruction
1400   ) : CmpInst(makeCmpResultType(LHS->getType()),
1401               Instruction::FCmp, pred, LHS, RHS, NameStr,
1402               &InsertAtEnd) {
1403     AssertOK();
1404   }
1405 
1406   /// Constructor with no-insertion semantics
1407   FCmpInst(
1408     Predicate Pred, ///< The predicate to use for the comparison
1409     Value *LHS,     ///< The left-hand-side of the expression
1410     Value *RHS,     ///< The right-hand-side of the expression
1411     const Twine &NameStr = "", ///< Name of the instruction
1412     Instruction *FlagsSource = nullptr
1413   ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1414               RHS, NameStr, nullptr, FlagsSource) {
1415     AssertOK();
1416   }
1417 
1418   /// @returns true if the predicate of this instruction is EQ or NE.
1419   /// Determine if this is an equality predicate.
1420   static bool isEquality(Predicate Pred) {
1421     return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1422            Pred == FCMP_UNE;
1423   }
1424 
1425   /// @returns true if the predicate of this instruction is EQ or NE.
1426   /// Determine if this is an equality predicate.
1427   bool isEquality() const { return isEquality(getPredicate()); }
1428 
1429   /// @returns true if the predicate of this instruction is commutative.
1430   /// Determine if this is a commutative predicate.
1431   bool isCommutative() const {
1432     return isEquality() ||
1433            getPredicate() == FCMP_FALSE ||
1434            getPredicate() == FCMP_TRUE ||
1435            getPredicate() == FCMP_ORD ||
1436            getPredicate() == FCMP_UNO;
1437   }
1438 
1439   /// @returns true if the predicate is relational (not EQ or NE).
1440   /// Determine if this a relational predicate.
1441   bool isRelational() const { return !isEquality(); }
1442 
1443   /// Exchange the two operands to this instruction in such a way that it does
1444   /// not modify the semantics of the instruction. The predicate value may be
1445   /// changed to retain the same result if the predicate is order dependent
1446   /// (e.g. ult).
1447   /// Swap operands and adjust predicate.
1448   void swapOperands() {
1449     setPredicate(getSwappedPredicate());
1450     Op<0>().swap(Op<1>());
1451   }
1452 
1453   /// Returns the sequence of all FCmp predicates.
1454   ///
1455   static auto predicates() { return FCmpPredicates(); }
1456 
1457   /// Return result of `LHS Pred RHS` comparison.
1458   static bool compare(const APFloat &LHS, const APFloat &RHS,
1459                       FCmpInst::Predicate Pred);
1460 
1461   /// Methods for support type inquiry through isa, cast, and dyn_cast:
1462   static bool classof(const Instruction *I) {
1463     return I->getOpcode() == Instruction::FCmp;
1464   }
1465   static bool classof(const Value *V) {
1466     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1467   }
1468 };
1469 
1470 //===----------------------------------------------------------------------===//
1471 /// This class represents a function call, abstracting a target
1472 /// machine's calling convention.  This class uses low bit of the SubClassData
1473 /// field to indicate whether or not this is a tail call.  The rest of the bits
1474 /// hold the calling convention of the call.
1475 ///
1476 class CallInst : public CallBase {
1477   CallInst(const CallInst &CI);
1478 
1479   /// Construct a CallInst given a range of arguments.
1480   /// Construct a CallInst from a range of arguments
1481   inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1482                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1483                   Instruction *InsertBefore);
1484 
1485   inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1486                   const Twine &NameStr, Instruction *InsertBefore)
1487       : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1488 
1489   /// Construct a CallInst given a range of arguments.
1490   /// Construct a CallInst from a range of arguments
1491   inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1492                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1493                   BasicBlock *InsertAtEnd);
1494 
1495   explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1496                     Instruction *InsertBefore);
1497 
1498   CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1499            BasicBlock *InsertAtEnd);
1500 
1501   void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1502             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1503   void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1504 
1505   /// Compute the number of operands to allocate.
1506   static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1507     // We need one operand for the called function, plus the input operand
1508     // counts provided.
1509     return 1 + NumArgs + NumBundleInputs;
1510   }
1511 
1512 protected:
1513   // Note: Instruction needs to be a friend here to call cloneImpl.
1514   friend class Instruction;
1515 
1516   CallInst *cloneImpl() const;
1517 
1518 public:
1519   static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1520                           Instruction *InsertBefore = nullptr) {
1521     return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1522   }
1523 
1524   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1525                           const Twine &NameStr,
1526                           Instruction *InsertBefore = nullptr) {
1527     return new (ComputeNumOperands(Args.size()))
1528         CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1529   }
1530 
1531   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1532                           ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1533                           const Twine &NameStr = "",
1534                           Instruction *InsertBefore = nullptr) {
1535     const int NumOperands =
1536         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1537     const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1538 
1539     return new (NumOperands, DescriptorBytes)
1540         CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1541   }
1542 
1543   static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1544                           BasicBlock *InsertAtEnd) {
1545     return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1546   }
1547 
1548   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1549                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1550     return new (ComputeNumOperands(Args.size()))
1551         CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1552   }
1553 
1554   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1555                           ArrayRef<OperandBundleDef> Bundles,
1556                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1557     const int NumOperands =
1558         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1559     const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1560 
1561     return new (NumOperands, DescriptorBytes)
1562         CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1563   }
1564 
1565   static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1566                           Instruction *InsertBefore = nullptr) {
1567     return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1568                   InsertBefore);
1569   }
1570 
1571   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1572                           ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1573                           const Twine &NameStr = "",
1574                           Instruction *InsertBefore = nullptr) {
1575     return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576                   NameStr, InsertBefore);
1577   }
1578 
1579   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1580                           const Twine &NameStr,
1581                           Instruction *InsertBefore = nullptr) {
1582     return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1583                   InsertBefore);
1584   }
1585 
1586   static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1587                           BasicBlock *InsertAtEnd) {
1588     return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1589                   InsertAtEnd);
1590   }
1591 
1592   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1593                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1594     return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1595                   InsertAtEnd);
1596   }
1597 
1598   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1599                           ArrayRef<OperandBundleDef> Bundles,
1600                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1601     return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1602                   NameStr, InsertAtEnd);
1603   }
1604 
1605   /// Create a clone of \p CI with a different set of operand bundles and
1606   /// insert it before \p InsertPt.
1607   ///
1608   /// The returned call instruction is identical \p CI in every way except that
1609   /// the operand bundles for the new instruction are set to the operand bundles
1610   /// in \p Bundles.
1611   static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1612                           Instruction *InsertPt = nullptr);
1613 
1614   // Note that 'musttail' implies 'tail'.
1615   enum TailCallKind : unsigned {
1616     TCK_None = 0,
1617     TCK_Tail = 1,
1618     TCK_MustTail = 2,
1619     TCK_NoTail = 3,
1620     TCK_LAST = TCK_NoTail
1621   };
1622 
1623   using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1624   static_assert(
1625       Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1626       "Bitfields must be contiguous");
1627 
1628   TailCallKind getTailCallKind() const {
1629     return getSubclassData<TailCallKindField>();
1630   }
1631 
1632   bool isTailCall() const {
1633     TailCallKind Kind = getTailCallKind();
1634     return Kind == TCK_Tail || Kind == TCK_MustTail;
1635   }
1636 
1637   bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1638 
1639   bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1640 
1641   void setTailCallKind(TailCallKind TCK) {
1642     setSubclassData<TailCallKindField>(TCK);
1643   }
1644 
1645   void setTailCall(bool IsTc = true) {
1646     setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1647   }
1648 
1649   /// Return true if the call can return twice
1650   bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1651   void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1652 
1653   // Methods for support type inquiry through isa, cast, and dyn_cast:
1654   static bool classof(const Instruction *I) {
1655     return I->getOpcode() == Instruction::Call;
1656   }
1657   static bool classof(const Value *V) {
1658     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1659   }
1660 
1661   /// Updates profile metadata by scaling it by \p S / \p T.
1662   void updateProfWeight(uint64_t S, uint64_t T);
1663 
1664 private:
1665   // Shadow Instruction::setInstructionSubclassData with a private forwarding
1666   // method so that subclasses cannot accidentally use it.
1667   template <typename Bitfield>
1668   void setSubclassData(typename Bitfield::Type Value) {
1669     Instruction::setSubclassData<Bitfield>(Value);
1670   }
1671 };
1672 
1673 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1674                    ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1675                    BasicBlock *InsertAtEnd)
1676     : CallBase(Ty->getReturnType(), Instruction::Call,
1677                OperandTraits<CallBase>::op_end(this) -
1678                    (Args.size() + CountBundleInputs(Bundles) + 1),
1679                unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1680                InsertAtEnd) {
1681   init(Ty, Func, Args, Bundles, NameStr);
1682 }
1683 
1684 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1685                    ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1686                    Instruction *InsertBefore)
1687     : CallBase(Ty->getReturnType(), Instruction::Call,
1688                OperandTraits<CallBase>::op_end(this) -
1689                    (Args.size() + CountBundleInputs(Bundles) + 1),
1690                unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1691                InsertBefore) {
1692   init(Ty, Func, Args, Bundles, NameStr);
1693 }
1694 
1695 //===----------------------------------------------------------------------===//
1696 //                               SelectInst Class
1697 //===----------------------------------------------------------------------===//
1698 
1699 /// This class represents the LLVM 'select' instruction.
1700 ///
1701 class SelectInst : public Instruction {
1702   SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1703              Instruction *InsertBefore)
1704     : Instruction(S1->getType(), Instruction::Select,
1705                   &Op<0>(), 3, InsertBefore) {
1706     init(C, S1, S2);
1707     setName(NameStr);
1708   }
1709 
1710   SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1711              BasicBlock *InsertAtEnd)
1712     : Instruction(S1->getType(), Instruction::Select,
1713                   &Op<0>(), 3, InsertAtEnd) {
1714     init(C, S1, S2);
1715     setName(NameStr);
1716   }
1717 
1718   void init(Value *C, Value *S1, Value *S2) {
1719     assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1720     Op<0>() = C;
1721     Op<1>() = S1;
1722     Op<2>() = S2;
1723   }
1724 
1725 protected:
1726   // Note: Instruction needs to be a friend here to call cloneImpl.
1727   friend class Instruction;
1728 
1729   SelectInst *cloneImpl() const;
1730 
1731 public:
1732   static SelectInst *Create(Value *C, Value *S1, Value *S2,
1733                             const Twine &NameStr = "",
1734                             Instruction *InsertBefore = nullptr,
1735                             Instruction *MDFrom = nullptr) {
1736     SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1737     if (MDFrom)
1738       Sel->copyMetadata(*MDFrom);
1739     return Sel;
1740   }
1741 
1742   static SelectInst *Create(Value *C, Value *S1, Value *S2,
1743                             const Twine &NameStr,
1744                             BasicBlock *InsertAtEnd) {
1745     return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1746   }
1747 
1748   const Value *getCondition() const { return Op<0>(); }
1749   const Value *getTrueValue() const { return Op<1>(); }
1750   const Value *getFalseValue() const { return Op<2>(); }
1751   Value *getCondition() { return Op<0>(); }
1752   Value *getTrueValue() { return Op<1>(); }
1753   Value *getFalseValue() { return Op<2>(); }
1754 
1755   void setCondition(Value *V) { Op<0>() = V; }
1756   void setTrueValue(Value *V) { Op<1>() = V; }
1757   void setFalseValue(Value *V) { Op<2>() = V; }
1758 
1759   /// Swap the true and false values of the select instruction.
1760   /// This doesn't swap prof metadata.
1761   void swapValues() { Op<1>().swap(Op<2>()); }
1762 
1763   /// Return a string if the specified operands are invalid
1764   /// for a select operation, otherwise return null.
1765   static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1766 
1767   /// Transparently provide more efficient getOperand methods.
1768   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1769 
1770   OtherOps getOpcode() const {
1771     return static_cast<OtherOps>(Instruction::getOpcode());
1772   }
1773 
1774   // Methods for support type inquiry through isa, cast, and dyn_cast:
1775   static bool classof(const Instruction *I) {
1776     return I->getOpcode() == Instruction::Select;
1777   }
1778   static bool classof(const Value *V) {
1779     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1780   }
1781 };
1782 
1783 template <>
1784 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1785 };
1786 
1787 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
1788 
1789 //===----------------------------------------------------------------------===//
1790 //                                VAArgInst Class
1791 //===----------------------------------------------------------------------===//
1792 
1793 /// This class represents the va_arg llvm instruction, which returns
1794 /// an argument of the specified type given a va_list and increments that list
1795 ///
1796 class VAArgInst : public UnaryInstruction {
1797 protected:
1798   // Note: Instruction needs to be a friend here to call cloneImpl.
1799   friend class Instruction;
1800 
1801   VAArgInst *cloneImpl() const;
1802 
1803 public:
1804   VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1805              Instruction *InsertBefore = nullptr)
1806     : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1807     setName(NameStr);
1808   }
1809 
1810   VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1811             BasicBlock *InsertAtEnd)
1812     : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1813     setName(NameStr);
1814   }
1815 
1816   Value *getPointerOperand() { return getOperand(0); }
1817   const Value *getPointerOperand() const { return getOperand(0); }
1818   static unsigned getPointerOperandIndex() { return 0U; }
1819 
1820   // Methods for support type inquiry through isa, cast, and dyn_cast:
1821   static bool classof(const Instruction *I) {
1822     return I->getOpcode() == VAArg;
1823   }
1824   static bool classof(const Value *V) {
1825     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1826   }
1827 };
1828 
1829 //===----------------------------------------------------------------------===//
1830 //                                ExtractElementInst Class
1831 //===----------------------------------------------------------------------===//
1832 
1833 /// This instruction extracts a single (scalar)
1834 /// element from a VectorType value
1835 ///
1836 class ExtractElementInst : public Instruction {
1837   ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1838                      Instruction *InsertBefore = nullptr);
1839   ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1840                      BasicBlock *InsertAtEnd);
1841 
1842 protected:
1843   // Note: Instruction needs to be a friend here to call cloneImpl.
1844   friend class Instruction;
1845 
1846   ExtractElementInst *cloneImpl() const;
1847 
1848 public:
1849   static ExtractElementInst *Create(Value *Vec, Value *Idx,
1850                                    const Twine &NameStr = "",
1851                                    Instruction *InsertBefore = nullptr) {
1852     return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1853   }
1854 
1855   static ExtractElementInst *Create(Value *Vec, Value *Idx,
1856                                    const Twine &NameStr,
1857                                    BasicBlock *InsertAtEnd) {
1858     return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1859   }
1860 
1861   /// Return true if an extractelement instruction can be
1862   /// formed with the specified operands.
1863   static bool isValidOperands(const Value *Vec, const Value *Idx);
1864 
1865   Value *getVectorOperand() { return Op<0>(); }
1866   Value *getIndexOperand() { return Op<1>(); }
1867   const Value *getVectorOperand() const { return Op<0>(); }
1868   const Value *getIndexOperand() const { return Op<1>(); }
1869 
1870   VectorType *getVectorOperandType() const {
1871     return cast<VectorType>(getVectorOperand()->getType());
1872   }
1873 
1874   /// Transparently provide more efficient getOperand methods.
1875   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1876 
1877   // Methods for support type inquiry through isa, cast, and dyn_cast:
1878   static bool classof(const Instruction *I) {
1879     return I->getOpcode() == Instruction::ExtractElement;
1880   }
1881   static bool classof(const Value *V) {
1882     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1883   }
1884 };
1885 
1886 template <>
1887 struct OperandTraits<ExtractElementInst> :
1888   public FixedNumOperandTraits<ExtractElementInst, 2> {
1889 };
1890 
1891 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
1892 
1893 //===----------------------------------------------------------------------===//
1894 //                                InsertElementInst Class
1895 //===----------------------------------------------------------------------===//
1896 
1897 /// This instruction inserts a single (scalar)
1898 /// element into a VectorType value
1899 ///
1900 class InsertElementInst : public Instruction {
1901   InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1902                     const Twine &NameStr = "",
1903                     Instruction *InsertBefore = nullptr);
1904   InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1905                     BasicBlock *InsertAtEnd);
1906 
1907 protected:
1908   // Note: Instruction needs to be a friend here to call cloneImpl.
1909   friend class Instruction;
1910 
1911   InsertElementInst *cloneImpl() const;
1912 
1913 public:
1914   static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1915                                    const Twine &NameStr = "",
1916                                    Instruction *InsertBefore = nullptr) {
1917     return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1918   }
1919 
1920   static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1921                                    const Twine &NameStr,
1922                                    BasicBlock *InsertAtEnd) {
1923     return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1924   }
1925 
1926   /// Return true if an insertelement instruction can be
1927   /// formed with the specified operands.
1928   static bool isValidOperands(const Value *Vec, const Value *NewElt,
1929                               const Value *Idx);
1930 
1931   /// Overload to return most specific vector type.
1932   ///
1933   VectorType *getType() const {
1934     return cast<VectorType>(Instruction::getType());
1935   }
1936 
1937   /// Transparently provide more efficient getOperand methods.
1938   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1939 
1940   // Methods for support type inquiry through isa, cast, and dyn_cast:
1941   static bool classof(const Instruction *I) {
1942     return I->getOpcode() == Instruction::InsertElement;
1943   }
1944   static bool classof(const Value *V) {
1945     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1946   }
1947 };
1948 
1949 template <>
1950 struct OperandTraits<InsertElementInst> :
1951   public FixedNumOperandTraits<InsertElementInst, 3> {
1952 };
1953 
1954 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
1955 
1956 //===----------------------------------------------------------------------===//
1957 //                           ShuffleVectorInst Class
1958 //===----------------------------------------------------------------------===//
1959 
1960 constexpr int PoisonMaskElem = -1;
1961 
1962 /// This instruction constructs a fixed permutation of two
1963 /// input vectors.
1964 ///
1965 /// For each element of the result vector, the shuffle mask selects an element
1966 /// from one of the input vectors to copy to the result. Non-negative elements
1967 /// in the mask represent an index into the concatenated pair of input vectors.
1968 /// PoisonMaskElem (-1) specifies that the result element is poison.
1969 ///
1970 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
1971 /// requirement may be relaxed in the future.
1972 class ShuffleVectorInst : public Instruction {
1973   SmallVector<int, 4> ShuffleMask;
1974   Constant *ShuffleMaskForBitcode;
1975 
1976 protected:
1977   // Note: Instruction needs to be a friend here to call cloneImpl.
1978   friend class Instruction;
1979 
1980   ShuffleVectorInst *cloneImpl() const;
1981 
1982 public:
1983   ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1984                     Instruction *InsertBefore = nullptr);
1985   ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
1986                     BasicBlock *InsertAtEnd);
1987   ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
1988                     Instruction *InsertBefore = nullptr);
1989   ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
1990                     BasicBlock *InsertAtEnd);
1991   ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1992                     const Twine &NameStr = "",
1993                     Instruction *InsertBefor = nullptr);
1994   ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1995                     const Twine &NameStr, BasicBlock *InsertAtEnd);
1996   ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1997                     const Twine &NameStr = "",
1998                     Instruction *InsertBefor = nullptr);
1999   ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2000                     const Twine &NameStr, BasicBlock *InsertAtEnd);
2001 
2002   void *operator new(size_t S) { return User::operator new(S, 2); }
2003   void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2004 
2005   /// Swap the operands and adjust the mask to preserve the semantics
2006   /// of the instruction.
2007   void commute();
2008 
2009   /// Return true if a shufflevector instruction can be
2010   /// formed with the specified operands.
2011   static bool isValidOperands(const Value *V1, const Value *V2,
2012                               const Value *Mask);
2013   static bool isValidOperands(const Value *V1, const Value *V2,
2014                               ArrayRef<int> Mask);
2015 
2016   /// Overload to return most specific vector type.
2017   ///
2018   VectorType *getType() const {
2019     return cast<VectorType>(Instruction::getType());
2020   }
2021 
2022   /// Transparently provide more efficient getOperand methods.
2023   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2024 
2025   /// Return the shuffle mask value of this instruction for the given element
2026   /// index. Return PoisonMaskElem if the element is undef.
2027   int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2028 
2029   /// Convert the input shuffle mask operand to a vector of integers. Undefined
2030   /// elements of the mask are returned as PoisonMaskElem.
2031   static void getShuffleMask(const Constant *Mask,
2032                              SmallVectorImpl<int> &Result);
2033 
2034   /// Return the mask for this instruction as a vector of integers. Undefined
2035   /// elements of the mask are returned as PoisonMaskElem.
2036   void getShuffleMask(SmallVectorImpl<int> &Result) const {
2037     Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2038   }
2039 
2040   /// Return the mask for this instruction, for use in bitcode.
2041   ///
2042   /// TODO: This is temporary until we decide a new bitcode encoding for
2043   /// shufflevector.
2044   Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2045 
2046   static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2047                                                 Type *ResultTy);
2048 
2049   void setShuffleMask(ArrayRef<int> Mask);
2050 
2051   ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2052 
2053   /// Return true if this shuffle returns a vector with a different number of
2054   /// elements than its source vectors.
2055   /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2056   ///           shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2057   bool changesLength() const {
2058     unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2059                                  ->getElementCount()
2060                                  .getKnownMinValue();
2061     unsigned NumMaskElts = ShuffleMask.size();
2062     return NumSourceElts != NumMaskElts;
2063   }
2064 
2065   /// Return true if this shuffle returns a vector with a greater number of
2066   /// elements than its source vectors.
2067   /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2068   bool increasesLength() const {
2069     unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2070                                  ->getElementCount()
2071                                  .getKnownMinValue();
2072     unsigned NumMaskElts = ShuffleMask.size();
2073     return NumSourceElts < NumMaskElts;
2074   }
2075 
2076   /// Return true if this shuffle mask chooses elements from exactly one source
2077   /// vector.
2078   /// Example: <7,5,undef,7>
2079   /// This assumes that vector operands (of length \p NumSrcElts) are the same
2080   /// length as the mask.
2081   static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2082   static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2083     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2084     SmallVector<int, 16> MaskAsInts;
2085     getShuffleMask(Mask, MaskAsInts);
2086     return isSingleSourceMask(MaskAsInts, NumSrcElts);
2087   }
2088 
2089   /// Return true if this shuffle chooses elements from exactly one source
2090   /// vector without changing the length of that vector.
2091   /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2092   /// TODO: Optionally allow length-changing shuffles.
2093   bool isSingleSource() const {
2094     return !changesLength() &&
2095            isSingleSourceMask(ShuffleMask, ShuffleMask.size());
2096   }
2097 
2098   /// Return true if this shuffle mask chooses elements from exactly one source
2099   /// vector without lane crossings. A shuffle using this mask is not
2100   /// necessarily a no-op because it may change the number of elements from its
2101   /// input vectors or it may provide demanded bits knowledge via undef lanes.
2102   /// Example: <undef,undef,2,3>
2103   static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2104   static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2105     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2106 
2107     // Not possible to express a shuffle mask for a scalable vector for this
2108     // case.
2109     if (isa<ScalableVectorType>(Mask->getType()))
2110       return false;
2111 
2112     SmallVector<int, 16> MaskAsInts;
2113     getShuffleMask(Mask, MaskAsInts);
2114     return isIdentityMask(MaskAsInts, NumSrcElts);
2115   }
2116 
2117   /// Return true if this shuffle chooses elements from exactly one source
2118   /// vector without lane crossings and does not change the number of elements
2119   /// from its input vectors.
2120   /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2121   bool isIdentity() const {
2122     // Not possible to express a shuffle mask for a scalable vector for this
2123     // case.
2124     if (isa<ScalableVectorType>(getType()))
2125       return false;
2126 
2127     return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
2128   }
2129 
2130   /// Return true if this shuffle lengthens exactly one source vector with
2131   /// undefs in the high elements.
2132   bool isIdentityWithPadding() const;
2133 
2134   /// Return true if this shuffle extracts the first N elements of exactly one
2135   /// source vector.
2136   bool isIdentityWithExtract() const;
2137 
2138   /// Return true if this shuffle concatenates its 2 source vectors. This
2139   /// returns false if either input is undefined. In that case, the shuffle is
2140   /// is better classified as an identity with padding operation.
2141   bool isConcat() const;
2142 
2143   /// Return true if this shuffle mask chooses elements from its source vectors
2144   /// without lane crossings. A shuffle using this mask would be
2145   /// equivalent to a vector select with a constant condition operand.
2146   /// Example: <4,1,6,undef>
2147   /// This returns false if the mask does not choose from both input vectors.
2148   /// In that case, the shuffle is better classified as an identity shuffle.
2149   /// This assumes that vector operands are the same length as the mask
2150   /// (a length-changing shuffle can never be equivalent to a vector select).
2151   static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2152   static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2153     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2154     SmallVector<int, 16> MaskAsInts;
2155     getShuffleMask(Mask, MaskAsInts);
2156     return isSelectMask(MaskAsInts, NumSrcElts);
2157   }
2158 
2159   /// Return true if this shuffle chooses elements from its source vectors
2160   /// without lane crossings and all operands have the same number of elements.
2161   /// In other words, this shuffle is equivalent to a vector select with a
2162   /// constant condition operand.
2163   /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2164   /// This returns false if the mask does not choose from both input vectors.
2165   /// In that case, the shuffle is better classified as an identity shuffle.
2166   /// TODO: Optionally allow length-changing shuffles.
2167   bool isSelect() const {
2168     return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
2169   }
2170 
2171   /// Return true if this shuffle mask swaps the order of elements from exactly
2172   /// one source vector.
2173   /// Example: <7,6,undef,4>
2174   /// This assumes that vector operands (of length \p NumSrcElts) are the same
2175   /// length as the mask.
2176   static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2177   static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2178     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2179     SmallVector<int, 16> MaskAsInts;
2180     getShuffleMask(Mask, MaskAsInts);
2181     return isReverseMask(MaskAsInts, NumSrcElts);
2182   }
2183 
2184   /// Return true if this shuffle swaps the order of elements from exactly
2185   /// one source vector.
2186   /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2187   /// TODO: Optionally allow length-changing shuffles.
2188   bool isReverse() const {
2189     return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
2190   }
2191 
2192   /// Return true if this shuffle mask chooses all elements with the same value
2193   /// as the first element of exactly one source vector.
2194   /// Example: <4,undef,undef,4>
2195   /// This assumes that vector operands (of length \p NumSrcElts) are the same
2196   /// length as the mask.
2197   static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2198   static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2199     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2200     SmallVector<int, 16> MaskAsInts;
2201     getShuffleMask(Mask, MaskAsInts);
2202     return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
2203   }
2204 
2205   /// Return true if all elements of this shuffle are the same value as the
2206   /// first element of exactly one source vector without changing the length
2207   /// of that vector.
2208   /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2209   /// TODO: Optionally allow length-changing shuffles.
2210   /// TODO: Optionally allow splats from other elements.
2211   bool isZeroEltSplat() const {
2212     return !changesLength() &&
2213            isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
2214   }
2215 
2216   /// Return true if this shuffle mask is a transpose mask.
2217   /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2218   /// even- or odd-numbered vector elements from two n-dimensional source
2219   /// vectors and write each result into consecutive elements of an
2220   /// n-dimensional destination vector. Two shuffles are necessary to complete
2221   /// the transpose, one for the even elements and another for the odd elements.
2222   /// This description closely follows how the TRN1 and TRN2 AArch64
2223   /// instructions operate.
2224   ///
2225   /// For example, a simple 2x2 matrix can be transposed with:
2226   ///
2227   ///   ; Original matrix
2228   ///   m0 = < a, b >
2229   ///   m1 = < c, d >
2230   ///
2231   ///   ; Transposed matrix
2232   ///   t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2233   ///   t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2234   ///
2235   /// For matrices having greater than n columns, the resulting nx2 transposed
2236   /// matrix is stored in two result vectors such that one vector contains
2237   /// interleaved elements from all the even-numbered rows and the other vector
2238   /// contains interleaved elements from all the odd-numbered rows. For example,
2239   /// a 2x4 matrix can be transposed with:
2240   ///
2241   ///   ; Original matrix
2242   ///   m0 = < a, b, c, d >
2243   ///   m1 = < e, f, g, h >
2244   ///
2245   ///   ; Transposed matrix
2246   ///   t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2247   ///   t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2248   static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2249   static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2250     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2251     SmallVector<int, 16> MaskAsInts;
2252     getShuffleMask(Mask, MaskAsInts);
2253     return isTransposeMask(MaskAsInts, NumSrcElts);
2254   }
2255 
2256   /// Return true if this shuffle transposes the elements of its inputs without
2257   /// changing the length of the vectors. This operation may also be known as a
2258   /// merge or interleave. See the description for isTransposeMask() for the
2259   /// exact specification.
2260   /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2261   bool isTranspose() const {
2262     return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
2263   }
2264 
2265   /// Return true if this shuffle mask is a splice mask, concatenating the two
2266   /// inputs together and then extracts an original width vector starting from
2267   /// the splice index.
2268   /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2269   /// This assumes that vector operands (of length \p NumSrcElts) are the same
2270   /// length as the mask.
2271   static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2272   static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2273     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2274     SmallVector<int, 16> MaskAsInts;
2275     getShuffleMask(Mask, MaskAsInts);
2276     return isSpliceMask(MaskAsInts, NumSrcElts, Index);
2277   }
2278 
2279   /// Return true if this shuffle splices two inputs without changing the length
2280   /// of the vectors. This operation concatenates the two inputs together and
2281   /// then extracts an original width vector starting from the splice index.
2282   /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2283   bool isSplice(int &Index) const {
2284     return !changesLength() &&
2285            isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
2286   }
2287 
2288   /// Return true if this shuffle mask is an extract subvector mask.
2289   /// A valid extract subvector mask returns a smaller vector from a single
2290   /// source operand. The base extraction index is returned as well.
2291   static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2292                                      int &Index);
2293   static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2294                                      int &Index) {
2295     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2296     // Not possible to express a shuffle mask for a scalable vector for this
2297     // case.
2298     if (isa<ScalableVectorType>(Mask->getType()))
2299       return false;
2300     SmallVector<int, 16> MaskAsInts;
2301     getShuffleMask(Mask, MaskAsInts);
2302     return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2303   }
2304 
2305   /// Return true if this shuffle mask is an extract subvector mask.
2306   bool isExtractSubvectorMask(int &Index) const {
2307     // Not possible to express a shuffle mask for a scalable vector for this
2308     // case.
2309     if (isa<ScalableVectorType>(getType()))
2310       return false;
2311 
2312     int NumSrcElts =
2313         cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2314     return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2315   }
2316 
2317   /// Return true if this shuffle mask is an insert subvector mask.
2318   /// A valid insert subvector mask inserts the lowest elements of a second
2319   /// source operand into an in-place first source operand.
2320   /// Both the sub vector width and the insertion index is returned.
2321   static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2322                                     int &NumSubElts, int &Index);
2323   static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2324                                     int &NumSubElts, int &Index) {
2325     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2326     // Not possible to express a shuffle mask for a scalable vector for this
2327     // case.
2328     if (isa<ScalableVectorType>(Mask->getType()))
2329       return false;
2330     SmallVector<int, 16> MaskAsInts;
2331     getShuffleMask(Mask, MaskAsInts);
2332     return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2333   }
2334 
2335   /// Return true if this shuffle mask is an insert subvector mask.
2336   bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2337     // Not possible to express a shuffle mask for a scalable vector for this
2338     // case.
2339     if (isa<ScalableVectorType>(getType()))
2340       return false;
2341 
2342     int NumSrcElts =
2343         cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2344     return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2345   }
2346 
2347   /// Return true if this shuffle mask replicates each of the \p VF elements
2348   /// in a vector \p ReplicationFactor times.
2349   /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2350   ///   <0,0,0,1,1,1,2,2,2,3,3,3>
2351   static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2352                                 int &VF);
2353   static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2354                                 int &VF) {
2355     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2356     // Not possible to express a shuffle mask for a scalable vector for this
2357     // case.
2358     if (isa<ScalableVectorType>(Mask->getType()))
2359       return false;
2360     SmallVector<int, 16> MaskAsInts;
2361     getShuffleMask(Mask, MaskAsInts);
2362     return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2363   }
2364 
2365   /// Return true if this shuffle mask is a replication mask.
2366   bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2367 
2368   /// Return true if this shuffle mask represents "clustered" mask of size VF,
2369   /// i.e. each index between [0..VF) is used exactly once in each submask of
2370   /// size VF.
2371   /// For example, the mask for \p VF=4 is:
2372   /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2373   /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2374   /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2375   ///                          element 3 is used twice in the second submask
2376   ///                          (3,3,1,0) and index 2 is not used at all.
2377   static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2378 
2379   /// Return true if this shuffle mask is a one-use-single-source("clustered")
2380   /// mask.
2381   bool isOneUseSingleSourceMask(int VF) const;
2382 
2383   /// Change values in a shuffle permute mask assuming the two vector operands
2384   /// of length InVecNumElts have swapped position.
2385   static void commuteShuffleMask(MutableArrayRef<int> Mask,
2386                                  unsigned InVecNumElts) {
2387     for (int &Idx : Mask) {
2388       if (Idx == -1)
2389         continue;
2390       Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2391       assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2392              "shufflevector mask index out of range");
2393     }
2394   }
2395 
2396   /// Return if this shuffle interleaves its two input vectors together.
2397   bool isInterleave(unsigned Factor);
2398 
2399   /// Return true if the mask interleaves one or more input vectors together.
2400   ///
2401   /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2402   /// E.g. For a Factor of 2 (LaneLen=4):
2403   ///   <0, 4, 1, 5, 2, 6, 3, 7>
2404   /// E.g. For a Factor of 3 (LaneLen=4):
2405   ///   <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2406   /// E.g. For a Factor of 4 (LaneLen=2):
2407   ///   <0, 2, 6, 4, 1, 3, 7, 5>
2408   ///
2409   /// NumInputElts is the total number of elements in the input vectors.
2410   ///
2411   /// StartIndexes are the first indexes of each vector being interleaved,
2412   /// substituting any indexes that were undef
2413   /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2414   ///
2415   /// Note that this does not check if the input vectors are consecutive:
2416   /// It will return true for masks such as
2417   /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2418   static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2419                                unsigned NumInputElts,
2420                                SmallVectorImpl<unsigned> &StartIndexes);
2421   static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2422                                unsigned NumInputElts) {
2423     SmallVector<unsigned, 8> StartIndexes;
2424     return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2425   }
2426 
2427   /// Checks if the shuffle is a bit rotation of the first operand across
2428   /// multiple subelements, e.g:
2429   ///
2430   /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2431   ///
2432   /// could be expressed as
2433   ///
2434   /// rotl <4 x i16> %a, 8
2435   ///
2436   /// If it can be expressed as a rotation, returns the number of subelements to
2437   /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2438   static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2439                               unsigned MinSubElts, unsigned MaxSubElts,
2440                               unsigned &NumSubElts, unsigned &RotateAmt);
2441 
2442   // Methods for support type inquiry through isa, cast, and dyn_cast:
2443   static bool classof(const Instruction *I) {
2444     return I->getOpcode() == Instruction::ShuffleVector;
2445   }
2446   static bool classof(const Value *V) {
2447     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2448   }
2449 };
2450 
2451 template <>
2452 struct OperandTraits<ShuffleVectorInst>
2453     : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2454 
2455 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
2456 
2457 //===----------------------------------------------------------------------===//
2458 //                                ExtractValueInst Class
2459 //===----------------------------------------------------------------------===//
2460 
2461 /// This instruction extracts a struct member or array
2462 /// element value from an aggregate value.
2463 ///
2464 class ExtractValueInst : public UnaryInstruction {
2465   SmallVector<unsigned, 4> Indices;
2466 
2467   ExtractValueInst(const ExtractValueInst &EVI);
2468 
2469   /// Constructors - Create a extractvalue instruction with a base aggregate
2470   /// value and a list of indices.  The first ctor can optionally insert before
2471   /// an existing instruction, the second appends the new instruction to the
2472   /// specified BasicBlock.
2473   inline ExtractValueInst(Value *Agg,
2474                           ArrayRef<unsigned> Idxs,
2475                           const Twine &NameStr,
2476                           Instruction *InsertBefore);
2477   inline ExtractValueInst(Value *Agg,
2478                           ArrayRef<unsigned> Idxs,
2479                           const Twine &NameStr, BasicBlock *InsertAtEnd);
2480 
2481   void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2482 
2483 protected:
2484   // Note: Instruction needs to be a friend here to call cloneImpl.
2485   friend class Instruction;
2486 
2487   ExtractValueInst *cloneImpl() const;
2488 
2489 public:
2490   static ExtractValueInst *Create(Value *Agg,
2491                                   ArrayRef<unsigned> Idxs,
2492                                   const Twine &NameStr = "",
2493                                   Instruction *InsertBefore = nullptr) {
2494     return new
2495       ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2496   }
2497 
2498   static ExtractValueInst *Create(Value *Agg,
2499                                   ArrayRef<unsigned> Idxs,
2500                                   const Twine &NameStr,
2501                                   BasicBlock *InsertAtEnd) {
2502     return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2503   }
2504 
2505   /// Returns the type of the element that would be extracted
2506   /// with an extractvalue instruction with the specified parameters.
2507   ///
2508   /// Null is returned if the indices are invalid for the specified type.
2509   static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2510 
2511   using idx_iterator = const unsigned*;
2512 
2513   inline idx_iterator idx_begin() const { return Indices.begin(); }
2514   inline idx_iterator idx_end()   const { return Indices.end(); }
2515   inline iterator_range<idx_iterator> indices() const {
2516     return make_range(idx_begin(), idx_end());
2517   }
2518 
2519   Value *getAggregateOperand() {
2520     return getOperand(0);
2521   }
2522   const Value *getAggregateOperand() const {
2523     return getOperand(0);
2524   }
2525   static unsigned getAggregateOperandIndex() {
2526     return 0U;                      // get index for modifying correct operand
2527   }
2528 
2529   ArrayRef<unsigned> getIndices() const {
2530     return Indices;
2531   }
2532 
2533   unsigned getNumIndices() const {
2534     return (unsigned)Indices.size();
2535   }
2536 
2537   bool hasIndices() const {
2538     return true;
2539   }
2540 
2541   // Methods for support type inquiry through isa, cast, and dyn_cast:
2542   static bool classof(const Instruction *I) {
2543     return I->getOpcode() == Instruction::ExtractValue;
2544   }
2545   static bool classof(const Value *V) {
2546     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2547   }
2548 };
2549 
2550 ExtractValueInst::ExtractValueInst(Value *Agg,
2551                                    ArrayRef<unsigned> Idxs,
2552                                    const Twine &NameStr,
2553                                    Instruction *InsertBefore)
2554   : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2555                      ExtractValue, Agg, InsertBefore) {
2556   init(Idxs, NameStr);
2557 }
2558 
2559 ExtractValueInst::ExtractValueInst(Value *Agg,
2560                                    ArrayRef<unsigned> Idxs,
2561                                    const Twine &NameStr,
2562                                    BasicBlock *InsertAtEnd)
2563   : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2564                      ExtractValue, Agg, InsertAtEnd) {
2565   init(Idxs, NameStr);
2566 }
2567 
2568 //===----------------------------------------------------------------------===//
2569 //                                InsertValueInst Class
2570 //===----------------------------------------------------------------------===//
2571 
2572 /// This instruction inserts a struct field of array element
2573 /// value into an aggregate value.
2574 ///
2575 class InsertValueInst : public Instruction {
2576   SmallVector<unsigned, 4> Indices;
2577 
2578   InsertValueInst(const InsertValueInst &IVI);
2579 
2580   /// Constructors - Create a insertvalue instruction with a base aggregate
2581   /// value, a value to insert, and a list of indices.  The first ctor can
2582   /// optionally insert before an existing instruction, the second appends
2583   /// the new instruction to the specified BasicBlock.
2584   inline InsertValueInst(Value *Agg, Value *Val,
2585                          ArrayRef<unsigned> Idxs,
2586                          const Twine &NameStr,
2587                          Instruction *InsertBefore);
2588   inline InsertValueInst(Value *Agg, Value *Val,
2589                          ArrayRef<unsigned> Idxs,
2590                          const Twine &NameStr, BasicBlock *InsertAtEnd);
2591 
2592   /// Constructors - These two constructors are convenience methods because one
2593   /// and two index insertvalue instructions are so common.
2594   InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2595                   const Twine &NameStr = "",
2596                   Instruction *InsertBefore = nullptr);
2597   InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2598                   BasicBlock *InsertAtEnd);
2599 
2600   void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2601             const Twine &NameStr);
2602 
2603 protected:
2604   // Note: Instruction needs to be a friend here to call cloneImpl.
2605   friend class Instruction;
2606 
2607   InsertValueInst *cloneImpl() const;
2608 
2609 public:
2610   // allocate space for exactly two operands
2611   void *operator new(size_t S) { return User::operator new(S, 2); }
2612   void operator delete(void *Ptr) { User::operator delete(Ptr); }
2613 
2614   static InsertValueInst *Create(Value *Agg, Value *Val,
2615                                  ArrayRef<unsigned> Idxs,
2616                                  const Twine &NameStr = "",
2617                                  Instruction *InsertBefore = nullptr) {
2618     return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2619   }
2620 
2621   static InsertValueInst *Create(Value *Agg, Value *Val,
2622                                  ArrayRef<unsigned> Idxs,
2623                                  const Twine &NameStr,
2624                                  BasicBlock *InsertAtEnd) {
2625     return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2626   }
2627 
2628   /// Transparently provide more efficient getOperand methods.
2629   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2630 
2631   using idx_iterator = const unsigned*;
2632 
2633   inline idx_iterator idx_begin() const { return Indices.begin(); }
2634   inline idx_iterator idx_end()   const { return Indices.end(); }
2635   inline iterator_range<idx_iterator> indices() const {
2636     return make_range(idx_begin(), idx_end());
2637   }
2638 
2639   Value *getAggregateOperand() {
2640     return getOperand(0);
2641   }
2642   const Value *getAggregateOperand() const {
2643     return getOperand(0);
2644   }
2645   static unsigned getAggregateOperandIndex() {
2646     return 0U;                      // get index for modifying correct operand
2647   }
2648 
2649   Value *getInsertedValueOperand() {
2650     return getOperand(1);
2651   }
2652   const Value *getInsertedValueOperand() const {
2653     return getOperand(1);
2654   }
2655   static unsigned getInsertedValueOperandIndex() {
2656     return 1U;                      // get index for modifying correct operand
2657   }
2658 
2659   ArrayRef<unsigned> getIndices() const {
2660     return Indices;
2661   }
2662 
2663   unsigned getNumIndices() const {
2664     return (unsigned)Indices.size();
2665   }
2666 
2667   bool hasIndices() const {
2668     return true;
2669   }
2670 
2671   // Methods for support type inquiry through isa, cast, and dyn_cast:
2672   static bool classof(const Instruction *I) {
2673     return I->getOpcode() == Instruction::InsertValue;
2674   }
2675   static bool classof(const Value *V) {
2676     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2677   }
2678 };
2679 
2680 template <>
2681 struct OperandTraits<InsertValueInst> :
2682   public FixedNumOperandTraits<InsertValueInst, 2> {
2683 };
2684 
2685 InsertValueInst::InsertValueInst(Value *Agg,
2686                                  Value *Val,
2687                                  ArrayRef<unsigned> Idxs,
2688                                  const Twine &NameStr,
2689                                  Instruction *InsertBefore)
2690   : Instruction(Agg->getType(), InsertValue,
2691                 OperandTraits<InsertValueInst>::op_begin(this),
2692                 2, InsertBefore) {
2693   init(Agg, Val, Idxs, NameStr);
2694 }
2695 
2696 InsertValueInst::InsertValueInst(Value *Agg,
2697                                  Value *Val,
2698                                  ArrayRef<unsigned> Idxs,
2699                                  const Twine &NameStr,
2700                                  BasicBlock *InsertAtEnd)
2701   : Instruction(Agg->getType(), InsertValue,
2702                 OperandTraits<InsertValueInst>::op_begin(this),
2703                 2, InsertAtEnd) {
2704   init(Agg, Val, Idxs, NameStr);
2705 }
2706 
2707 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2708 
2709 //===----------------------------------------------------------------------===//
2710 //                               PHINode Class
2711 //===----------------------------------------------------------------------===//
2712 
2713 // PHINode - The PHINode class is used to represent the magical mystical PHI
2714 // node, that can not exist in nature, but can be synthesized in a computer
2715 // scientist's overactive imagination.
2716 //
2717 class PHINode : public Instruction {
2718   /// The number of operands actually allocated.  NumOperands is
2719   /// the number actually in use.
2720   unsigned ReservedSpace;
2721 
2722   PHINode(const PHINode &PN);
2723 
2724   explicit PHINode(Type *Ty, unsigned NumReservedValues,
2725                    const Twine &NameStr = "",
2726                    Instruction *InsertBefore = nullptr)
2727     : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2728       ReservedSpace(NumReservedValues) {
2729     assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2730     setName(NameStr);
2731     allocHungoffUses(ReservedSpace);
2732   }
2733 
2734   PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2735           BasicBlock *InsertAtEnd)
2736     : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2737       ReservedSpace(NumReservedValues) {
2738     assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2739     setName(NameStr);
2740     allocHungoffUses(ReservedSpace);
2741   }
2742 
2743 protected:
2744   // Note: Instruction needs to be a friend here to call cloneImpl.
2745   friend class Instruction;
2746 
2747   PHINode *cloneImpl() const;
2748 
2749   // allocHungoffUses - this is more complicated than the generic
2750   // User::allocHungoffUses, because we have to allocate Uses for the incoming
2751   // values and pointers to the incoming blocks, all in one allocation.
2752   void allocHungoffUses(unsigned N) {
2753     User::allocHungoffUses(N, /* IsPhi */ true);
2754   }
2755 
2756 public:
2757   /// Constructors - NumReservedValues is a hint for the number of incoming
2758   /// edges that this phi node will have (use 0 if you really have no idea).
2759   static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2760                          const Twine &NameStr = "",
2761                          Instruction *InsertBefore = nullptr) {
2762     return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2763   }
2764 
2765   static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2766                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
2767     return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2768   }
2769 
2770   /// Provide fast operand accessors
2771   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2772 
2773   // Block iterator interface. This provides access to the list of incoming
2774   // basic blocks, which parallels the list of incoming values.
2775   // Please note that we are not providing non-const iterators for blocks to
2776   // force all updates go through an interface function.
2777 
2778   using block_iterator = BasicBlock **;
2779   using const_block_iterator = BasicBlock * const *;
2780 
2781   const_block_iterator block_begin() const {
2782     return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2783   }
2784 
2785   const_block_iterator block_end() const {
2786     return block_begin() + getNumOperands();
2787   }
2788 
2789   iterator_range<const_block_iterator> blocks() const {
2790     return make_range(block_begin(), block_end());
2791   }
2792 
2793   op_range incoming_values() { return operands(); }
2794 
2795   const_op_range incoming_values() const { return operands(); }
2796 
2797   /// Return the number of incoming edges
2798   ///
2799   unsigned getNumIncomingValues() const { return getNumOperands(); }
2800 
2801   /// Return incoming value number x
2802   ///
2803   Value *getIncomingValue(unsigned i) const {
2804     return getOperand(i);
2805   }
2806   void setIncomingValue(unsigned i, Value *V) {
2807     assert(V && "PHI node got a null value!");
2808     assert(getType() == V->getType() &&
2809            "All operands to PHI node must be the same type as the PHI node!");
2810     setOperand(i, V);
2811   }
2812 
2813   static unsigned getOperandNumForIncomingValue(unsigned i) {
2814     return i;
2815   }
2816 
2817   static unsigned getIncomingValueNumForOperand(unsigned i) {
2818     return i;
2819   }
2820 
2821   /// Return incoming basic block number @p i.
2822   ///
2823   BasicBlock *getIncomingBlock(unsigned i) const {
2824     return block_begin()[i];
2825   }
2826 
2827   /// Return incoming basic block corresponding
2828   /// to an operand of the PHI.
2829   ///
2830   BasicBlock *getIncomingBlock(const Use &U) const {
2831     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2832     return getIncomingBlock(unsigned(&U - op_begin()));
2833   }
2834 
2835   /// Return incoming basic block corresponding
2836   /// to value use iterator.
2837   ///
2838   BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2839     return getIncomingBlock(I.getUse());
2840   }
2841 
2842   void setIncomingBlock(unsigned i, BasicBlock *BB) {
2843     const_cast<block_iterator>(block_begin())[i] = BB;
2844   }
2845 
2846   /// Copies the basic blocks from \p BBRange to the incoming basic block list
2847   /// of this PHINode, starting at \p ToIdx.
2848   void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
2849                           uint32_t ToIdx = 0) {
2850     copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2851   }
2852 
2853   /// Replace every incoming basic block \p Old to basic block \p New.
2854   void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2855     assert(New && Old && "PHI node got a null basic block!");
2856     for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2857       if (getIncomingBlock(Op) == Old)
2858         setIncomingBlock(Op, New);
2859   }
2860 
2861   /// Add an incoming value to the end of the PHI list
2862   ///
2863   void addIncoming(Value *V, BasicBlock *BB) {
2864     if (getNumOperands() == ReservedSpace)
2865       growOperands();  // Get more space!
2866     // Initialize some new operands.
2867     setNumHungOffUseOperands(getNumOperands() + 1);
2868     setIncomingValue(getNumOperands() - 1, V);
2869     setIncomingBlock(getNumOperands() - 1, BB);
2870   }
2871 
2872   /// Remove an incoming value.  This is useful if a
2873   /// predecessor basic block is deleted.  The value removed is returned.
2874   ///
2875   /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2876   /// is true), the PHI node is destroyed and any uses of it are replaced with
2877   /// dummy values.  The only time there should be zero incoming values to a PHI
2878   /// node is when the block is dead, so this strategy is sound.
2879   ///
2880   Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2881 
2882   Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2883     int Idx = getBasicBlockIndex(BB);
2884     assert(Idx >= 0 && "Invalid basic block argument to remove!");
2885     return removeIncomingValue(Idx, DeletePHIIfEmpty);
2886   }
2887 
2888   /// Remove all incoming values for which the predicate returns true.
2889   /// The predicate accepts the incoming value index.
2890   void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2891                              bool DeletePHIIfEmpty = true);
2892 
2893   /// Return the first index of the specified basic
2894   /// block in the value list for this PHI.  Returns -1 if no instance.
2895   ///
2896   int getBasicBlockIndex(const BasicBlock *BB) const {
2897     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2898       if (block_begin()[i] == BB)
2899         return i;
2900     return -1;
2901   }
2902 
2903   Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2904     int Idx = getBasicBlockIndex(BB);
2905     assert(Idx >= 0 && "Invalid basic block argument!");
2906     return getIncomingValue(Idx);
2907   }
2908 
2909   /// Set every incoming value(s) for block \p BB to \p V.
2910   void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2911     assert(BB && "PHI node got a null basic block!");
2912     bool Found = false;
2913     for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2914       if (getIncomingBlock(Op) == BB) {
2915         Found = true;
2916         setIncomingValue(Op, V);
2917       }
2918     (void)Found;
2919     assert(Found && "Invalid basic block argument to set!");
2920   }
2921 
2922   /// If the specified PHI node always merges together the
2923   /// same value, return the value, otherwise return null.
2924   Value *hasConstantValue() const;
2925 
2926   /// Whether the specified PHI node always merges
2927   /// together the same value, assuming undefs are equal to a unique
2928   /// non-undef value.
2929   bool hasConstantOrUndefValue() const;
2930 
2931   /// If the PHI node is complete which means all of its parent's predecessors
2932   /// have incoming value in this PHI, return true, otherwise return false.
2933   bool isComplete() const {
2934     return llvm::all_of(predecessors(getParent()),
2935                         [this](const BasicBlock *Pred) {
2936                           return getBasicBlockIndex(Pred) >= 0;
2937                         });
2938   }
2939 
2940   /// Methods for support type inquiry through isa, cast, and dyn_cast:
2941   static bool classof(const Instruction *I) {
2942     return I->getOpcode() == Instruction::PHI;
2943   }
2944   static bool classof(const Value *V) {
2945     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2946   }
2947 
2948 private:
2949   void growOperands();
2950 };
2951 
2952 template <>
2953 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2954 };
2955 
2956 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
2957 
2958 //===----------------------------------------------------------------------===//
2959 //                           LandingPadInst Class
2960 //===----------------------------------------------------------------------===//
2961 
2962 //===---------------------------------------------------------------------------
2963 /// The landingpad instruction holds all of the information
2964 /// necessary to generate correct exception handling. The landingpad instruction
2965 /// cannot be moved from the top of a landing pad block, which itself is
2966 /// accessible only from the 'unwind' edge of an invoke. This uses the
2967 /// SubclassData field in Value to store whether or not the landingpad is a
2968 /// cleanup.
2969 ///
2970 class LandingPadInst : public Instruction {
2971   using CleanupField = BoolBitfieldElementT<0>;
2972 
2973   /// The number of operands actually allocated.  NumOperands is
2974   /// the number actually in use.
2975   unsigned ReservedSpace;
2976 
2977   LandingPadInst(const LandingPadInst &LP);
2978 
2979 public:
2980   enum ClauseType { Catch, Filter };
2981 
2982 private:
2983   explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2984                           const Twine &NameStr, Instruction *InsertBefore);
2985   explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2986                           const Twine &NameStr, BasicBlock *InsertAtEnd);
2987 
2988   // Allocate space for exactly zero operands.
2989   void *operator new(size_t S) { return User::operator new(S); }
2990 
2991   void growOperands(unsigned Size);
2992   void init(unsigned NumReservedValues, const Twine &NameStr);
2993 
2994 protected:
2995   // Note: Instruction needs to be a friend here to call cloneImpl.
2996   friend class Instruction;
2997 
2998   LandingPadInst *cloneImpl() const;
2999 
3000 public:
3001   void operator delete(void *Ptr) { User::operator delete(Ptr); }
3002 
3003   /// Constructors - NumReservedClauses is a hint for the number of incoming
3004   /// clauses that this landingpad will have (use 0 if you really have no idea).
3005   static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3006                                 const Twine &NameStr = "",
3007                                 Instruction *InsertBefore = nullptr);
3008   static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3009                                 const Twine &NameStr, BasicBlock *InsertAtEnd);
3010 
3011   /// Provide fast operand accessors
3012   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3013 
3014   /// Return 'true' if this landingpad instruction is a
3015   /// cleanup. I.e., it should be run when unwinding even if its landing pad
3016   /// doesn't catch the exception.
3017   bool isCleanup() const { return getSubclassData<CleanupField>(); }
3018 
3019   /// Indicate that this landingpad instruction is a cleanup.
3020   void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3021 
3022   /// Add a catch or filter clause to the landing pad.
3023   void addClause(Constant *ClauseVal);
3024 
3025   /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3026   /// determine what type of clause this is.
3027   Constant *getClause(unsigned Idx) const {
3028     return cast<Constant>(getOperandList()[Idx]);
3029   }
3030 
3031   /// Return 'true' if the clause and index Idx is a catch clause.
3032   bool isCatch(unsigned Idx) const {
3033     return !isa<ArrayType>(getOperandList()[Idx]->getType());
3034   }
3035 
3036   /// Return 'true' if the clause and index Idx is a filter clause.
3037   bool isFilter(unsigned Idx) const {
3038     return isa<ArrayType>(getOperandList()[Idx]->getType());
3039   }
3040 
3041   /// Get the number of clauses for this landing pad.
3042   unsigned getNumClauses() const { return getNumOperands(); }
3043 
3044   /// Grow the size of the operand list to accommodate the new
3045   /// number of clauses.
3046   void reserveClauses(unsigned Size) { growOperands(Size); }
3047 
3048   // Methods for support type inquiry through isa, cast, and dyn_cast:
3049   static bool classof(const Instruction *I) {
3050     return I->getOpcode() == Instruction::LandingPad;
3051   }
3052   static bool classof(const Value *V) {
3053     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3054   }
3055 };
3056 
3057 template <>
3058 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3059 };
3060 
3061 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
3062 
3063 //===----------------------------------------------------------------------===//
3064 //                               ReturnInst Class
3065 //===----------------------------------------------------------------------===//
3066 
3067 //===---------------------------------------------------------------------------
3068 /// Return a value (possibly void), from a function.  Execution
3069 /// does not continue in this function any longer.
3070 ///
3071 class ReturnInst : public Instruction {
3072   ReturnInst(const ReturnInst &RI);
3073 
3074 private:
3075   // ReturnInst constructors:
3076   // ReturnInst()                  - 'ret void' instruction
3077   // ReturnInst(    null)          - 'ret void' instruction
3078   // ReturnInst(Value* X)          - 'ret X'    instruction
3079   // ReturnInst(    null, Inst *I) - 'ret void' instruction, insert before I
3080   // ReturnInst(Value* X, Inst *I) - 'ret X'    instruction, insert before I
3081   // ReturnInst(    null, BB *B)   - 'ret void' instruction, insert @ end of B
3082   // ReturnInst(Value* X, BB *B)   - 'ret X'    instruction, insert @ end of B
3083   //
3084   // NOTE: If the Value* passed is of type void then the constructor behaves as
3085   // if it was passed NULL.
3086   explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3087                       Instruction *InsertBefore = nullptr);
3088   ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3089   explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3090 
3091 protected:
3092   // Note: Instruction needs to be a friend here to call cloneImpl.
3093   friend class Instruction;
3094 
3095   ReturnInst *cloneImpl() const;
3096 
3097 public:
3098   static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3099                             Instruction *InsertBefore = nullptr) {
3100     return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3101   }
3102 
3103   static ReturnInst* Create(LLVMContext &C, Value *retVal,
3104                             BasicBlock *InsertAtEnd) {
3105     return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3106   }
3107 
3108   static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3109     return new(0) ReturnInst(C, InsertAtEnd);
3110   }
3111 
3112   /// Provide fast operand accessors
3113   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3114 
3115   /// Convenience accessor. Returns null if there is no return value.
3116   Value *getReturnValue() const {
3117     return getNumOperands() != 0 ? getOperand(0) : nullptr;
3118   }
3119 
3120   unsigned getNumSuccessors() const { return 0; }
3121 
3122   // Methods for support type inquiry through isa, cast, and dyn_cast:
3123   static bool classof(const Instruction *I) {
3124     return (I->getOpcode() == Instruction::Ret);
3125   }
3126   static bool classof(const Value *V) {
3127     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3128   }
3129 
3130 private:
3131   BasicBlock *getSuccessor(unsigned idx) const {
3132     llvm_unreachable("ReturnInst has no successors!");
3133   }
3134 
3135   void setSuccessor(unsigned idx, BasicBlock *B) {
3136     llvm_unreachable("ReturnInst has no successors!");
3137   }
3138 };
3139 
3140 template <>
3141 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3142 };
3143 
3144 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
3145 
3146 //===----------------------------------------------------------------------===//
3147 //                               BranchInst Class
3148 //===----------------------------------------------------------------------===//
3149 
3150 //===---------------------------------------------------------------------------
3151 /// Conditional or Unconditional Branch instruction.
3152 ///
3153 class BranchInst : public Instruction {
3154   /// Ops list - Branches are strange.  The operands are ordered:
3155   ///  [Cond, FalseDest,] TrueDest.  This makes some accessors faster because
3156   /// they don't have to check for cond/uncond branchness. These are mostly
3157   /// accessed relative from op_end().
3158   BranchInst(const BranchInst &BI);
3159   // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3160   // BranchInst(BB *B)                           - 'br B'
3161   // BranchInst(BB* T, BB *F, Value *C)          - 'br C, T, F'
3162   // BranchInst(BB* B, Inst *I)                  - 'br B'        insert before I
3163   // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3164   // BranchInst(BB* B, BB *I)                    - 'br B'        insert at end
3165   // BranchInst(BB* T, BB *F, Value *C, BB *I)   - 'br C, T, F', insert at end
3166   explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3167   BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3168              Instruction *InsertBefore = nullptr);
3169   BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3170   BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3171              BasicBlock *InsertAtEnd);
3172 
3173   void AssertOK();
3174 
3175 protected:
3176   // Note: Instruction needs to be a friend here to call cloneImpl.
3177   friend class Instruction;
3178 
3179   BranchInst *cloneImpl() const;
3180 
3181 public:
3182   /// Iterator type that casts an operand to a basic block.
3183   ///
3184   /// This only makes sense because the successors are stored as adjacent
3185   /// operands for branch instructions.
3186   struct succ_op_iterator
3187       : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3188                               std::random_access_iterator_tag, BasicBlock *,
3189                               ptrdiff_t, BasicBlock *, BasicBlock *> {
3190     explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3191 
3192     BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3193     BasicBlock *operator->() const { return operator*(); }
3194   };
3195 
3196   /// The const version of `succ_op_iterator`.
3197   struct const_succ_op_iterator
3198       : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3199                               std::random_access_iterator_tag,
3200                               const BasicBlock *, ptrdiff_t, const BasicBlock *,
3201                               const BasicBlock *> {
3202     explicit const_succ_op_iterator(const_value_op_iterator I)
3203         : iterator_adaptor_base(I) {}
3204 
3205     const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3206     const BasicBlock *operator->() const { return operator*(); }
3207   };
3208 
3209   static BranchInst *Create(BasicBlock *IfTrue,
3210                             Instruction *InsertBefore = nullptr) {
3211     return new(1) BranchInst(IfTrue, InsertBefore);
3212   }
3213 
3214   static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3215                             Value *Cond, Instruction *InsertBefore = nullptr) {
3216     return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3217   }
3218 
3219   static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3220     return new(1) BranchInst(IfTrue, InsertAtEnd);
3221   }
3222 
3223   static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3224                             Value *Cond, BasicBlock *InsertAtEnd) {
3225     return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3226   }
3227 
3228   /// Transparently provide more efficient getOperand methods.
3229   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3230 
3231   bool isUnconditional() const { return getNumOperands() == 1; }
3232   bool isConditional()   const { return getNumOperands() == 3; }
3233 
3234   Value *getCondition() const {
3235     assert(isConditional() && "Cannot get condition of an uncond branch!");
3236     return Op<-3>();
3237   }
3238 
3239   void setCondition(Value *V) {
3240     assert(isConditional() && "Cannot set condition of unconditional branch!");
3241     Op<-3>() = V;
3242   }
3243 
3244   unsigned getNumSuccessors() const { return 1+isConditional(); }
3245 
3246   BasicBlock *getSuccessor(unsigned i) const {
3247     assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3248     return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3249   }
3250 
3251   void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3252     assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3253     *(&Op<-1>() - idx) = NewSucc;
3254   }
3255 
3256   /// Swap the successors of this branch instruction.
3257   ///
3258   /// Swaps the successors of the branch instruction. This also swaps any
3259   /// branch weight metadata associated with the instruction so that it
3260   /// continues to map correctly to each operand.
3261   void swapSuccessors();
3262 
3263   iterator_range<succ_op_iterator> successors() {
3264     return make_range(
3265         succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3266         succ_op_iterator(value_op_end()));
3267   }
3268 
3269   iterator_range<const_succ_op_iterator> successors() const {
3270     return make_range(const_succ_op_iterator(
3271                           std::next(value_op_begin(), isConditional() ? 1 : 0)),
3272                       const_succ_op_iterator(value_op_end()));
3273   }
3274 
3275   // Methods for support type inquiry through isa, cast, and dyn_cast:
3276   static bool classof(const Instruction *I) {
3277     return (I->getOpcode() == Instruction::Br);
3278   }
3279   static bool classof(const Value *V) {
3280     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3281   }
3282 };
3283 
3284 template <>
3285 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3286 };
3287 
3288 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
3289 
3290 //===----------------------------------------------------------------------===//
3291 //                               SwitchInst Class
3292 //===----------------------------------------------------------------------===//
3293 
3294 //===---------------------------------------------------------------------------
3295 /// Multiway switch
3296 ///
3297 class SwitchInst : public Instruction {
3298   unsigned ReservedSpace;
3299 
3300   // Operand[0]    = Value to switch on
3301   // Operand[1]    = Default basic block destination
3302   // Operand[2n  ] = Value to match
3303   // Operand[2n+1] = BasicBlock to go to on match
3304   SwitchInst(const SwitchInst &SI);
3305 
3306   /// Create a new switch instruction, specifying a value to switch on and a
3307   /// default destination. The number of additional cases can be specified here
3308   /// to make memory allocation more efficient. This constructor can also
3309   /// auto-insert before another instruction.
3310   SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3311              Instruction *InsertBefore);
3312 
3313   /// Create a new switch instruction, specifying a value to switch on and a
3314   /// default destination. The number of additional cases can be specified here
3315   /// to make memory allocation more efficient. This constructor also
3316   /// auto-inserts at the end of the specified BasicBlock.
3317   SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3318              BasicBlock *InsertAtEnd);
3319 
3320   // allocate space for exactly zero operands
3321   void *operator new(size_t S) { return User::operator new(S); }
3322 
3323   void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3324   void growOperands();
3325 
3326 protected:
3327   // Note: Instruction needs to be a friend here to call cloneImpl.
3328   friend class Instruction;
3329 
3330   SwitchInst *cloneImpl() const;
3331 
3332 public:
3333   void operator delete(void *Ptr) { User::operator delete(Ptr); }
3334 
3335   // -2
3336   static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3337 
3338   template <typename CaseHandleT> class CaseIteratorImpl;
3339 
3340   /// A handle to a particular switch case. It exposes a convenient interface
3341   /// to both the case value and the successor block.
3342   ///
3343   /// We define this as a template and instantiate it to form both a const and
3344   /// non-const handle.
3345   template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3346   class CaseHandleImpl {
3347     // Directly befriend both const and non-const iterators.
3348     friend class SwitchInst::CaseIteratorImpl<
3349         CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3350 
3351   protected:
3352     // Expose the switch type we're parameterized with to the iterator.
3353     using SwitchInstType = SwitchInstT;
3354 
3355     SwitchInstT *SI;
3356     ptrdiff_t Index;
3357 
3358     CaseHandleImpl() = default;
3359     CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3360 
3361   public:
3362     /// Resolves case value for current case.
3363     ConstantIntT *getCaseValue() const {
3364       assert((unsigned)Index < SI->getNumCases() &&
3365              "Index out the number of cases.");
3366       return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3367     }
3368 
3369     /// Resolves successor for current case.
3370     BasicBlockT *getCaseSuccessor() const {
3371       assert(((unsigned)Index < SI->getNumCases() ||
3372               (unsigned)Index == DefaultPseudoIndex) &&
3373              "Index out the number of cases.");
3374       return SI->getSuccessor(getSuccessorIndex());
3375     }
3376 
3377     /// Returns number of current case.
3378     unsigned getCaseIndex() const { return Index; }
3379 
3380     /// Returns successor index for current case successor.
3381     unsigned getSuccessorIndex() const {
3382       assert(((unsigned)Index == DefaultPseudoIndex ||
3383               (unsigned)Index < SI->getNumCases()) &&
3384              "Index out the number of cases.");
3385       return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3386     }
3387 
3388     bool operator==(const CaseHandleImpl &RHS) const {
3389       assert(SI == RHS.SI && "Incompatible operators.");
3390       return Index == RHS.Index;
3391     }
3392   };
3393 
3394   using ConstCaseHandle =
3395       CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3396 
3397   class CaseHandle
3398       : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3399     friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3400 
3401   public:
3402     CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3403 
3404     /// Sets the new value for current case.
3405     void setValue(ConstantInt *V) const {
3406       assert((unsigned)Index < SI->getNumCases() &&
3407              "Index out the number of cases.");
3408       SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3409     }
3410 
3411     /// Sets the new successor for current case.
3412     void setSuccessor(BasicBlock *S) const {
3413       SI->setSuccessor(getSuccessorIndex(), S);
3414     }
3415   };
3416 
3417   template <typename CaseHandleT>
3418   class CaseIteratorImpl
3419       : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3420                                     std::random_access_iterator_tag,
3421                                     const CaseHandleT> {
3422     using SwitchInstT = typename CaseHandleT::SwitchInstType;
3423 
3424     CaseHandleT Case;
3425 
3426   public:
3427     /// Default constructed iterator is in an invalid state until assigned to
3428     /// a case for a particular switch.
3429     CaseIteratorImpl() = default;
3430 
3431     /// Initializes case iterator for given SwitchInst and for given
3432     /// case number.
3433     CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3434 
3435     /// Initializes case iterator for given SwitchInst and for given
3436     /// successor index.
3437     static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3438                                                unsigned SuccessorIndex) {
3439       assert(SuccessorIndex < SI->getNumSuccessors() &&
3440              "Successor index # out of range!");
3441       return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3442                                  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3443     }
3444 
3445     /// Support converting to the const variant. This will be a no-op for const
3446     /// variant.
3447     operator CaseIteratorImpl<ConstCaseHandle>() const {
3448       return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3449     }
3450 
3451     CaseIteratorImpl &operator+=(ptrdiff_t N) {
3452       // Check index correctness after addition.
3453       // Note: Index == getNumCases() means end().
3454       assert(Case.Index + N >= 0 &&
3455              (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3456              "Case.Index out the number of cases.");
3457       Case.Index += N;
3458       return *this;
3459     }
3460     CaseIteratorImpl &operator-=(ptrdiff_t N) {
3461       // Check index correctness after subtraction.
3462       // Note: Case.Index == getNumCases() means end().
3463       assert(Case.Index - N >= 0 &&
3464              (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3465              "Case.Index out the number of cases.");
3466       Case.Index -= N;
3467       return *this;
3468     }
3469     ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3470       assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3471       return Case.Index - RHS.Case.Index;
3472     }
3473     bool operator==(const CaseIteratorImpl &RHS) const {
3474       return Case == RHS.Case;
3475     }
3476     bool operator<(const CaseIteratorImpl &RHS) const {
3477       assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3478       return Case.Index < RHS.Case.Index;
3479     }
3480     const CaseHandleT &operator*() const { return Case; }
3481   };
3482 
3483   using CaseIt = CaseIteratorImpl<CaseHandle>;
3484   using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3485 
3486   static SwitchInst *Create(Value *Value, BasicBlock *Default,
3487                             unsigned NumCases,
3488                             Instruction *InsertBefore = nullptr) {
3489     return new SwitchInst(Value, Default, NumCases, InsertBefore);
3490   }
3491 
3492   static SwitchInst *Create(Value *Value, BasicBlock *Default,
3493                             unsigned NumCases, BasicBlock *InsertAtEnd) {
3494     return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3495   }
3496 
3497   /// Provide fast operand accessors
3498   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3499 
3500   // Accessor Methods for Switch stmt
3501   Value *getCondition() const { return getOperand(0); }
3502   void setCondition(Value *V) { setOperand(0, V); }
3503 
3504   BasicBlock *getDefaultDest() const {
3505     return cast<BasicBlock>(getOperand(1));
3506   }
3507 
3508   void setDefaultDest(BasicBlock *DefaultCase) {
3509     setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3510   }
3511 
3512   /// Return the number of 'cases' in this switch instruction, excluding the
3513   /// default case.
3514   unsigned getNumCases() const {
3515     return getNumOperands()/2 - 1;
3516   }
3517 
3518   /// Returns a read/write iterator that points to the first case in the
3519   /// SwitchInst.
3520   CaseIt case_begin() {
3521     return CaseIt(this, 0);
3522   }
3523 
3524   /// Returns a read-only iterator that points to the first case in the
3525   /// SwitchInst.
3526   ConstCaseIt case_begin() const {
3527     return ConstCaseIt(this, 0);
3528   }
3529 
3530   /// Returns a read/write iterator that points one past the last in the
3531   /// SwitchInst.
3532   CaseIt case_end() {
3533     return CaseIt(this, getNumCases());
3534   }
3535 
3536   /// Returns a read-only iterator that points one past the last in the
3537   /// SwitchInst.
3538   ConstCaseIt case_end() const {
3539     return ConstCaseIt(this, getNumCases());
3540   }
3541 
3542   /// Iteration adapter for range-for loops.
3543   iterator_range<CaseIt> cases() {
3544     return make_range(case_begin(), case_end());
3545   }
3546 
3547   /// Constant iteration adapter for range-for loops.
3548   iterator_range<ConstCaseIt> cases() const {
3549     return make_range(case_begin(), case_end());
3550   }
3551 
3552   /// Returns an iterator that points to the default case.
3553   /// Note: this iterator allows to resolve successor only. Attempt
3554   /// to resolve case value causes an assertion.
3555   /// Also note, that increment and decrement also causes an assertion and
3556   /// makes iterator invalid.
3557   CaseIt case_default() {
3558     return CaseIt(this, DefaultPseudoIndex);
3559   }
3560   ConstCaseIt case_default() const {
3561     return ConstCaseIt(this, DefaultPseudoIndex);
3562   }
3563 
3564   /// Search all of the case values for the specified constant. If it is
3565   /// explicitly handled, return the case iterator of it, otherwise return
3566   /// default case iterator to indicate that it is handled by the default
3567   /// handler.
3568   CaseIt findCaseValue(const ConstantInt *C) {
3569     return CaseIt(
3570         this,
3571         const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3572   }
3573   ConstCaseIt findCaseValue(const ConstantInt *C) const {
3574     ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3575       return Case.getCaseValue() == C;
3576     });
3577     if (I != case_end())
3578       return I;
3579 
3580     return case_default();
3581   }
3582 
3583   /// Finds the unique case value for a given successor. Returns null if the
3584   /// successor is not found, not unique, or is the default case.
3585   ConstantInt *findCaseDest(BasicBlock *BB) {
3586     if (BB == getDefaultDest())
3587       return nullptr;
3588 
3589     ConstantInt *CI = nullptr;
3590     for (auto Case : cases()) {
3591       if (Case.getCaseSuccessor() != BB)
3592         continue;
3593 
3594       if (CI)
3595         return nullptr; // Multiple cases lead to BB.
3596 
3597       CI = Case.getCaseValue();
3598     }
3599 
3600     return CI;
3601   }
3602 
3603   /// Add an entry to the switch instruction.
3604   /// Note:
3605   /// This action invalidates case_end(). Old case_end() iterator will
3606   /// point to the added case.
3607   void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3608 
3609   /// This method removes the specified case and its successor from the switch
3610   /// instruction. Note that this operation may reorder the remaining cases at
3611   /// index idx and above.
3612   /// Note:
3613   /// This action invalidates iterators for all cases following the one removed,
3614   /// including the case_end() iterator. It returns an iterator for the next
3615   /// case.
3616   CaseIt removeCase(CaseIt I);
3617 
3618   unsigned getNumSuccessors() const { return getNumOperands()/2; }
3619   BasicBlock *getSuccessor(unsigned idx) const {
3620     assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3621     return cast<BasicBlock>(getOperand(idx*2+1));
3622   }
3623   void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3624     assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3625     setOperand(idx * 2 + 1, NewSucc);
3626   }
3627 
3628   // Methods for support type inquiry through isa, cast, and dyn_cast:
3629   static bool classof(const Instruction *I) {
3630     return I->getOpcode() == Instruction::Switch;
3631   }
3632   static bool classof(const Value *V) {
3633     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3634   }
3635 };
3636 
3637 /// A wrapper class to simplify modification of SwitchInst cases along with
3638 /// their prof branch_weights metadata.
3639 class SwitchInstProfUpdateWrapper {
3640   SwitchInst &SI;
3641   std::optional<SmallVector<uint32_t, 8>> Weights;
3642   bool Changed = false;
3643 
3644 protected:
3645   MDNode *buildProfBranchWeightsMD();
3646 
3647   void init();
3648 
3649 public:
3650   using CaseWeightOpt = std::optional<uint32_t>;
3651   SwitchInst *operator->() { return &SI; }
3652   SwitchInst &operator*() { return SI; }
3653   operator SwitchInst *() { return &SI; }
3654 
3655   SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3656 
3657   ~SwitchInstProfUpdateWrapper() {
3658     if (Changed)
3659       SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3660   }
3661 
3662   /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3663   /// correspondent branch weight.
3664   SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3665 
3666   /// Delegate the call to the underlying SwitchInst::addCase() and set the
3667   /// specified branch weight for the added case.
3668   void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3669 
3670   /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3671   /// this object to not touch the underlying SwitchInst in destructor.
3672   Instruction::InstListType::iterator eraseFromParent();
3673 
3674   void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3675   CaseWeightOpt getSuccessorWeight(unsigned idx);
3676 
3677   static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3678 };
3679 
3680 template <>
3681 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3682 };
3683 
3684 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
3685 
3686 //===----------------------------------------------------------------------===//
3687 //                             IndirectBrInst Class
3688 //===----------------------------------------------------------------------===//
3689 
3690 //===---------------------------------------------------------------------------
3691 /// Indirect Branch Instruction.
3692 ///
3693 class IndirectBrInst : public Instruction {
3694   unsigned ReservedSpace;
3695 
3696   // Operand[0]   = Address to jump to
3697   // Operand[n+1] = n-th destination
3698   IndirectBrInst(const IndirectBrInst &IBI);
3699 
3700   /// Create a new indirectbr instruction, specifying an
3701   /// Address to jump to.  The number of expected destinations can be specified
3702   /// here to make memory allocation more efficient.  This constructor can also
3703   /// autoinsert before another instruction.
3704   IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3705 
3706   /// Create a new indirectbr instruction, specifying an
3707   /// Address to jump to.  The number of expected destinations can be specified
3708   /// here to make memory allocation more efficient.  This constructor also
3709   /// autoinserts at the end of the specified BasicBlock.
3710   IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3711 
3712   // allocate space for exactly zero operands
3713   void *operator new(size_t S) { return User::operator new(S); }
3714 
3715   void init(Value *Address, unsigned NumDests);
3716   void growOperands();
3717 
3718 protected:
3719   // Note: Instruction needs to be a friend here to call cloneImpl.
3720   friend class Instruction;
3721 
3722   IndirectBrInst *cloneImpl() const;
3723 
3724 public:
3725   void operator delete(void *Ptr) { User::operator delete(Ptr); }
3726 
3727   /// Iterator type that casts an operand to a basic block.
3728   ///
3729   /// This only makes sense because the successors are stored as adjacent
3730   /// operands for indirectbr instructions.
3731   struct succ_op_iterator
3732       : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3733                               std::random_access_iterator_tag, BasicBlock *,
3734                               ptrdiff_t, BasicBlock *, BasicBlock *> {
3735     explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3736 
3737     BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3738     BasicBlock *operator->() const { return operator*(); }
3739   };
3740 
3741   /// The const version of `succ_op_iterator`.
3742   struct const_succ_op_iterator
3743       : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3744                               std::random_access_iterator_tag,
3745                               const BasicBlock *, ptrdiff_t, const BasicBlock *,
3746                               const BasicBlock *> {
3747     explicit const_succ_op_iterator(const_value_op_iterator I)
3748         : iterator_adaptor_base(I) {}
3749 
3750     const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3751     const BasicBlock *operator->() const { return operator*(); }
3752   };
3753 
3754   static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3755                                 Instruction *InsertBefore = nullptr) {
3756     return new IndirectBrInst(Address, NumDests, InsertBefore);
3757   }
3758 
3759   static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3760                                 BasicBlock *InsertAtEnd) {
3761     return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3762   }
3763 
3764   /// Provide fast operand accessors.
3765   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3766 
3767   // Accessor Methods for IndirectBrInst instruction.
3768   Value *getAddress() { return getOperand(0); }
3769   const Value *getAddress() const { return getOperand(0); }
3770   void setAddress(Value *V) { setOperand(0, V); }
3771 
3772   /// return the number of possible destinations in this
3773   /// indirectbr instruction.
3774   unsigned getNumDestinations() const { return getNumOperands()-1; }
3775 
3776   /// Return the specified destination.
3777   BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3778   const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3779 
3780   /// Add a destination.
3781   ///
3782   void addDestination(BasicBlock *Dest);
3783 
3784   /// This method removes the specified successor from the
3785   /// indirectbr instruction.
3786   void removeDestination(unsigned i);
3787 
3788   unsigned getNumSuccessors() const { return getNumOperands()-1; }
3789   BasicBlock *getSuccessor(unsigned i) const {
3790     return cast<BasicBlock>(getOperand(i+1));
3791   }
3792   void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3793     setOperand(i + 1, NewSucc);
3794   }
3795 
3796   iterator_range<succ_op_iterator> successors() {
3797     return make_range(succ_op_iterator(std::next(value_op_begin())),
3798                       succ_op_iterator(value_op_end()));
3799   }
3800 
3801   iterator_range<const_succ_op_iterator> successors() const {
3802     return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3803                       const_succ_op_iterator(value_op_end()));
3804   }
3805 
3806   // Methods for support type inquiry through isa, cast, and dyn_cast:
3807   static bool classof(const Instruction *I) {
3808     return I->getOpcode() == Instruction::IndirectBr;
3809   }
3810   static bool classof(const Value *V) {
3811     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3812   }
3813 };
3814 
3815 template <>
3816 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3817 };
3818 
3819 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
3820 
3821 //===----------------------------------------------------------------------===//
3822 //                               InvokeInst Class
3823 //===----------------------------------------------------------------------===//
3824 
3825 /// Invoke instruction.  The SubclassData field is used to hold the
3826 /// calling convention of the call.
3827 ///
3828 class InvokeInst : public CallBase {
3829   /// The number of operands for this call beyond the called function,
3830   /// arguments, and operand bundles.
3831   static constexpr int NumExtraOperands = 2;
3832 
3833   /// The index from the end of the operand array to the normal destination.
3834   static constexpr int NormalDestOpEndIdx = -3;
3835 
3836   /// The index from the end of the operand array to the unwind destination.
3837   static constexpr int UnwindDestOpEndIdx = -2;
3838 
3839   InvokeInst(const InvokeInst &BI);
3840 
3841   /// Construct an InvokeInst given a range of arguments.
3842   ///
3843   /// Construct an InvokeInst from a range of arguments
3844   inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3845                     BasicBlock *IfException, ArrayRef<Value *> Args,
3846                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3847                     const Twine &NameStr, Instruction *InsertBefore);
3848 
3849   inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3850                     BasicBlock *IfException, ArrayRef<Value *> Args,
3851                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3852                     const Twine &NameStr, BasicBlock *InsertAtEnd);
3853 
3854   void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3855             BasicBlock *IfException, ArrayRef<Value *> Args,
3856             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3857 
3858   /// Compute the number of operands to allocate.
3859   static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3860     // We need one operand for the called function, plus our extra operands and
3861     // the input operand counts provided.
3862     return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3863   }
3864 
3865 protected:
3866   // Note: Instruction needs to be a friend here to call cloneImpl.
3867   friend class Instruction;
3868 
3869   InvokeInst *cloneImpl() const;
3870 
3871 public:
3872   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3873                             BasicBlock *IfException, ArrayRef<Value *> Args,
3874                             const Twine &NameStr,
3875                             Instruction *InsertBefore = nullptr) {
3876     int NumOperands = ComputeNumOperands(Args.size());
3877     return new (NumOperands)
3878         InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3879                    NumOperands, NameStr, InsertBefore);
3880   }
3881 
3882   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3883                             BasicBlock *IfException, ArrayRef<Value *> Args,
3884                             ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3885                             const Twine &NameStr = "",
3886                             Instruction *InsertBefore = nullptr) {
3887     int NumOperands =
3888         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3889     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3890 
3891     return new (NumOperands, DescriptorBytes)
3892         InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3893                    NameStr, InsertBefore);
3894   }
3895 
3896   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3897                             BasicBlock *IfException, ArrayRef<Value *> Args,
3898                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3899     int NumOperands = ComputeNumOperands(Args.size());
3900     return new (NumOperands)
3901         InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3902                    NumOperands, NameStr, InsertAtEnd);
3903   }
3904 
3905   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3906                             BasicBlock *IfException, ArrayRef<Value *> Args,
3907                             ArrayRef<OperandBundleDef> Bundles,
3908                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3909     int NumOperands =
3910         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3911     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3912 
3913     return new (NumOperands, DescriptorBytes)
3914         InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3915                    NameStr, InsertAtEnd);
3916   }
3917 
3918   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3919                             BasicBlock *IfException, ArrayRef<Value *> Args,
3920                             const Twine &NameStr,
3921                             Instruction *InsertBefore = nullptr) {
3922     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3923                   IfException, Args, std::nullopt, NameStr, InsertBefore);
3924   }
3925 
3926   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3927                             BasicBlock *IfException, ArrayRef<Value *> Args,
3928                             ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3929                             const Twine &NameStr = "",
3930                             Instruction *InsertBefore = nullptr) {
3931     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3932                   IfException, Args, Bundles, NameStr, InsertBefore);
3933   }
3934 
3935   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3936                             BasicBlock *IfException, ArrayRef<Value *> Args,
3937                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3938     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3939                   IfException, Args, NameStr, InsertAtEnd);
3940   }
3941 
3942   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3943                             BasicBlock *IfException, ArrayRef<Value *> Args,
3944                             ArrayRef<OperandBundleDef> Bundles,
3945                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3946     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3947                   IfException, Args, Bundles, NameStr, InsertAtEnd);
3948   }
3949 
3950   /// Create a clone of \p II with a different set of operand bundles and
3951   /// insert it before \p InsertPt.
3952   ///
3953   /// The returned invoke instruction is identical to \p II in every way except
3954   /// that the operand bundles for the new instruction are set to the operand
3955   /// bundles in \p Bundles.
3956   static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3957                             Instruction *InsertPt = nullptr);
3958 
3959   // get*Dest - Return the destination basic blocks...
3960   BasicBlock *getNormalDest() const {
3961     return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3962   }
3963   BasicBlock *getUnwindDest() const {
3964     return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3965   }
3966   void setNormalDest(BasicBlock *B) {
3967     Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3968   }
3969   void setUnwindDest(BasicBlock *B) {
3970     Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3971   }
3972 
3973   /// Get the landingpad instruction from the landing pad
3974   /// block (the unwind destination).
3975   LandingPadInst *getLandingPadInst() const;
3976 
3977   BasicBlock *getSuccessor(unsigned i) const {
3978     assert(i < 2 && "Successor # out of range for invoke!");
3979     return i == 0 ? getNormalDest() : getUnwindDest();
3980   }
3981 
3982   void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3983     assert(i < 2 && "Successor # out of range for invoke!");
3984     if (i == 0)
3985       setNormalDest(NewSucc);
3986     else
3987       setUnwindDest(NewSucc);
3988   }
3989 
3990   unsigned getNumSuccessors() const { return 2; }
3991 
3992   // Methods for support type inquiry through isa, cast, and dyn_cast:
3993   static bool classof(const Instruction *I) {
3994     return (I->getOpcode() == Instruction::Invoke);
3995   }
3996   static bool classof(const Value *V) {
3997     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3998   }
3999 
4000 private:
4001   // Shadow Instruction::setInstructionSubclassData with a private forwarding
4002   // method so that subclasses cannot accidentally use it.
4003   template <typename Bitfield>
4004   void setSubclassData(typename Bitfield::Type Value) {
4005     Instruction::setSubclassData<Bitfield>(Value);
4006   }
4007 };
4008 
4009 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4010                        BasicBlock *IfException, ArrayRef<Value *> Args,
4011                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4012                        const Twine &NameStr, Instruction *InsertBefore)
4013     : CallBase(Ty->getReturnType(), Instruction::Invoke,
4014                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4015                InsertBefore) {
4016   init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4017 }
4018 
4019 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4020                        BasicBlock *IfException, ArrayRef<Value *> Args,
4021                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4022                        const Twine &NameStr, BasicBlock *InsertAtEnd)
4023     : CallBase(Ty->getReturnType(), Instruction::Invoke,
4024                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4025                InsertAtEnd) {
4026   init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4027 }
4028 
4029 //===----------------------------------------------------------------------===//
4030 //                              CallBrInst Class
4031 //===----------------------------------------------------------------------===//
4032 
4033 /// CallBr instruction, tracking function calls that may not return control but
4034 /// instead transfer it to a third location. The SubclassData field is used to
4035 /// hold the calling convention of the call.
4036 ///
4037 class CallBrInst : public CallBase {
4038 
4039   unsigned NumIndirectDests;
4040 
4041   CallBrInst(const CallBrInst &BI);
4042 
4043   /// Construct a CallBrInst given a range of arguments.
4044   ///
4045   /// Construct a CallBrInst from a range of arguments
4046   inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4047                     ArrayRef<BasicBlock *> IndirectDests,
4048                     ArrayRef<Value *> Args,
4049                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4050                     const Twine &NameStr, Instruction *InsertBefore);
4051 
4052   inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4053                     ArrayRef<BasicBlock *> IndirectDests,
4054                     ArrayRef<Value *> Args,
4055                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4056                     const Twine &NameStr, BasicBlock *InsertAtEnd);
4057 
4058   void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4059             ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4060             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4061 
4062   /// Compute the number of operands to allocate.
4063   static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4064                                 int NumBundleInputs = 0) {
4065     // We need one operand for the called function, plus our extra operands and
4066     // the input operand counts provided.
4067     return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4068   }
4069 
4070 protected:
4071   // Note: Instruction needs to be a friend here to call cloneImpl.
4072   friend class Instruction;
4073 
4074   CallBrInst *cloneImpl() const;
4075 
4076 public:
4077   static CallBrInst *Create(FunctionType *Ty, Value *Func,
4078                             BasicBlock *DefaultDest,
4079                             ArrayRef<BasicBlock *> IndirectDests,
4080                             ArrayRef<Value *> Args, const Twine &NameStr,
4081                             Instruction *InsertBefore = nullptr) {
4082     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4083     return new (NumOperands)
4084         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4085                    NumOperands, NameStr, InsertBefore);
4086   }
4087 
4088   static CallBrInst *
4089   Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4090          ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4091          ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4092          const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4093     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4094                                          CountBundleInputs(Bundles));
4095     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4096 
4097     return new (NumOperands, DescriptorBytes)
4098         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4099                    NumOperands, NameStr, InsertBefore);
4100   }
4101 
4102   static CallBrInst *Create(FunctionType *Ty, Value *Func,
4103                             BasicBlock *DefaultDest,
4104                             ArrayRef<BasicBlock *> IndirectDests,
4105                             ArrayRef<Value *> Args, const Twine &NameStr,
4106                             BasicBlock *InsertAtEnd) {
4107     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4108     return new (NumOperands)
4109         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4110                    NumOperands, NameStr, InsertAtEnd);
4111   }
4112 
4113   static CallBrInst *Create(FunctionType *Ty, Value *Func,
4114                             BasicBlock *DefaultDest,
4115                             ArrayRef<BasicBlock *> IndirectDests,
4116                             ArrayRef<Value *> Args,
4117                             ArrayRef<OperandBundleDef> Bundles,
4118                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
4119     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4120                                          CountBundleInputs(Bundles));
4121     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4122 
4123     return new (NumOperands, DescriptorBytes)
4124         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4125                    NumOperands, NameStr, InsertAtEnd);
4126   }
4127 
4128   static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4129                             ArrayRef<BasicBlock *> IndirectDests,
4130                             ArrayRef<Value *> Args, const Twine &NameStr,
4131                             Instruction *InsertBefore = nullptr) {
4132     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4133                   IndirectDests, Args, NameStr, InsertBefore);
4134   }
4135 
4136   static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4137                             ArrayRef<BasicBlock *> IndirectDests,
4138                             ArrayRef<Value *> Args,
4139                             ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4140                             const Twine &NameStr = "",
4141                             Instruction *InsertBefore = nullptr) {
4142     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4143                   IndirectDests, Args, Bundles, NameStr, InsertBefore);
4144   }
4145 
4146   static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4147                             ArrayRef<BasicBlock *> IndirectDests,
4148                             ArrayRef<Value *> Args, const Twine &NameStr,
4149                             BasicBlock *InsertAtEnd) {
4150     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4151                   IndirectDests, Args, NameStr, InsertAtEnd);
4152   }
4153 
4154   static CallBrInst *Create(FunctionCallee Func,
4155                             BasicBlock *DefaultDest,
4156                             ArrayRef<BasicBlock *> IndirectDests,
4157                             ArrayRef<Value *> Args,
4158                             ArrayRef<OperandBundleDef> Bundles,
4159                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
4160     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4161                   IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4162   }
4163 
4164   /// Create a clone of \p CBI with a different set of operand bundles and
4165   /// insert it before \p InsertPt.
4166   ///
4167   /// The returned callbr instruction is identical to \p CBI in every way
4168   /// except that the operand bundles for the new instruction are set to the
4169   /// operand bundles in \p Bundles.
4170   static CallBrInst *Create(CallBrInst *CBI,
4171                             ArrayRef<OperandBundleDef> Bundles,
4172                             Instruction *InsertPt = nullptr);
4173 
4174   /// Return the number of callbr indirect dest labels.
4175   ///
4176   unsigned getNumIndirectDests() const { return NumIndirectDests; }
4177 
4178   /// getIndirectDestLabel - Return the i-th indirect dest label.
4179   ///
4180   Value *getIndirectDestLabel(unsigned i) const {
4181     assert(i < getNumIndirectDests() && "Out of bounds!");
4182     return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4183   }
4184 
4185   Value *getIndirectDestLabelUse(unsigned i) const {
4186     assert(i < getNumIndirectDests() && "Out of bounds!");
4187     return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4188   }
4189 
4190   // Return the destination basic blocks...
4191   BasicBlock *getDefaultDest() const {
4192     return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4193   }
4194   BasicBlock *getIndirectDest(unsigned i) const {
4195     return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4196   }
4197   SmallVector<BasicBlock *, 16> getIndirectDests() const {
4198     SmallVector<BasicBlock *, 16> IndirectDests;
4199     for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4200       IndirectDests.push_back(getIndirectDest(i));
4201     return IndirectDests;
4202   }
4203   void setDefaultDest(BasicBlock *B) {
4204     *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4205   }
4206   void setIndirectDest(unsigned i, BasicBlock *B) {
4207     *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4208   }
4209 
4210   BasicBlock *getSuccessor(unsigned i) const {
4211     assert(i < getNumSuccessors() + 1 &&
4212            "Successor # out of range for callbr!");
4213     return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4214   }
4215 
4216   void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4217     assert(i < getNumIndirectDests() + 1 &&
4218            "Successor # out of range for callbr!");
4219     return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4220   }
4221 
4222   unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4223 
4224   // Methods for support type inquiry through isa, cast, and dyn_cast:
4225   static bool classof(const Instruction *I) {
4226     return (I->getOpcode() == Instruction::CallBr);
4227   }
4228   static bool classof(const Value *V) {
4229     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4230   }
4231 
4232 private:
4233   // Shadow Instruction::setInstructionSubclassData with a private forwarding
4234   // method so that subclasses cannot accidentally use it.
4235   template <typename Bitfield>
4236   void setSubclassData(typename Bitfield::Type Value) {
4237     Instruction::setSubclassData<Bitfield>(Value);
4238   }
4239 };
4240 
4241 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4242                        ArrayRef<BasicBlock *> IndirectDests,
4243                        ArrayRef<Value *> Args,
4244                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4245                        const Twine &NameStr, Instruction *InsertBefore)
4246     : CallBase(Ty->getReturnType(), Instruction::CallBr,
4247                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4248                InsertBefore) {
4249   init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4250 }
4251 
4252 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4253                        ArrayRef<BasicBlock *> IndirectDests,
4254                        ArrayRef<Value *> Args,
4255                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4256                        const Twine &NameStr, BasicBlock *InsertAtEnd)
4257     : CallBase(Ty->getReturnType(), Instruction::CallBr,
4258                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4259                InsertAtEnd) {
4260   init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4261 }
4262 
4263 //===----------------------------------------------------------------------===//
4264 //                              ResumeInst Class
4265 //===----------------------------------------------------------------------===//
4266 
4267 //===---------------------------------------------------------------------------
4268 /// Resume the propagation of an exception.
4269 ///
4270 class ResumeInst : public Instruction {
4271   ResumeInst(const ResumeInst &RI);
4272 
4273   explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4274   ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4275 
4276 protected:
4277   // Note: Instruction needs to be a friend here to call cloneImpl.
4278   friend class Instruction;
4279 
4280   ResumeInst *cloneImpl() const;
4281 
4282 public:
4283   static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4284     return new(1) ResumeInst(Exn, InsertBefore);
4285   }
4286 
4287   static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4288     return new(1) ResumeInst(Exn, InsertAtEnd);
4289   }
4290 
4291   /// Provide fast operand accessors
4292   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4293 
4294   /// Convenience accessor.
4295   Value *getValue() const { return Op<0>(); }
4296 
4297   unsigned getNumSuccessors() const { return 0; }
4298 
4299   // Methods for support type inquiry through isa, cast, and dyn_cast:
4300   static bool classof(const Instruction *I) {
4301     return I->getOpcode() == Instruction::Resume;
4302   }
4303   static bool classof(const Value *V) {
4304     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4305   }
4306 
4307 private:
4308   BasicBlock *getSuccessor(unsigned idx) const {
4309     llvm_unreachable("ResumeInst has no successors!");
4310   }
4311 
4312   void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4313     llvm_unreachable("ResumeInst has no successors!");
4314   }
4315 };
4316 
4317 template <>
4318 struct OperandTraits<ResumeInst> :
4319     public FixedNumOperandTraits<ResumeInst, 1> {
4320 };
4321 
4322 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
4323 
4324 //===----------------------------------------------------------------------===//
4325 //                         CatchSwitchInst Class
4326 //===----------------------------------------------------------------------===//
4327 class CatchSwitchInst : public Instruction {
4328   using UnwindDestField = BoolBitfieldElementT<0>;
4329 
4330   /// The number of operands actually allocated.  NumOperands is
4331   /// the number actually in use.
4332   unsigned ReservedSpace;
4333 
4334   // Operand[0] = Outer scope
4335   // Operand[1] = Unwind block destination
4336   // Operand[n] = BasicBlock to go to on match
4337   CatchSwitchInst(const CatchSwitchInst &CSI);
4338 
4339   /// Create a new switch instruction, specifying a
4340   /// default destination.  The number of additional handlers can be specified
4341   /// here to make memory allocation more efficient.
4342   /// This constructor can also autoinsert before another instruction.
4343   CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4344                   unsigned NumHandlers, const Twine &NameStr,
4345                   Instruction *InsertBefore);
4346 
4347   /// Create a new switch instruction, specifying a
4348   /// default destination.  The number of additional handlers can be specified
4349   /// here to make memory allocation more efficient.
4350   /// This constructor also autoinserts at the end of the specified BasicBlock.
4351   CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4352                   unsigned NumHandlers, const Twine &NameStr,
4353                   BasicBlock *InsertAtEnd);
4354 
4355   // allocate space for exactly zero operands
4356   void *operator new(size_t S) { return User::operator new(S); }
4357 
4358   void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4359   void growOperands(unsigned Size);
4360 
4361 protected:
4362   // Note: Instruction needs to be a friend here to call cloneImpl.
4363   friend class Instruction;
4364 
4365   CatchSwitchInst *cloneImpl() const;
4366 
4367 public:
4368   void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4369 
4370   static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4371                                  unsigned NumHandlers,
4372                                  const Twine &NameStr = "",
4373                                  Instruction *InsertBefore = nullptr) {
4374     return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4375                                InsertBefore);
4376   }
4377 
4378   static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4379                                  unsigned NumHandlers, const Twine &NameStr,
4380                                  BasicBlock *InsertAtEnd) {
4381     return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4382                                InsertAtEnd);
4383   }
4384 
4385   /// Provide fast operand accessors
4386   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4387 
4388   // Accessor Methods for CatchSwitch stmt
4389   Value *getParentPad() const { return getOperand(0); }
4390   void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4391 
4392   // Accessor Methods for CatchSwitch stmt
4393   bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4394   bool unwindsToCaller() const { return !hasUnwindDest(); }
4395   BasicBlock *getUnwindDest() const {
4396     if (hasUnwindDest())
4397       return cast<BasicBlock>(getOperand(1));
4398     return nullptr;
4399   }
4400   void setUnwindDest(BasicBlock *UnwindDest) {
4401     assert(UnwindDest);
4402     assert(hasUnwindDest());
4403     setOperand(1, UnwindDest);
4404   }
4405 
4406   /// return the number of 'handlers' in this catchswitch
4407   /// instruction, except the default handler
4408   unsigned getNumHandlers() const {
4409     if (hasUnwindDest())
4410       return getNumOperands() - 2;
4411     return getNumOperands() - 1;
4412   }
4413 
4414 private:
4415   static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4416   static const BasicBlock *handler_helper(const Value *V) {
4417     return cast<BasicBlock>(V);
4418   }
4419 
4420 public:
4421   using DerefFnTy = BasicBlock *(*)(Value *);
4422   using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4423   using handler_range = iterator_range<handler_iterator>;
4424   using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4425   using const_handler_iterator =
4426       mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4427   using const_handler_range = iterator_range<const_handler_iterator>;
4428 
4429   /// Returns an iterator that points to the first handler in CatchSwitchInst.
4430   handler_iterator handler_begin() {
4431     op_iterator It = op_begin() + 1;
4432     if (hasUnwindDest())
4433       ++It;
4434     return handler_iterator(It, DerefFnTy(handler_helper));
4435   }
4436 
4437   /// Returns an iterator that points to the first handler in the
4438   /// CatchSwitchInst.
4439   const_handler_iterator handler_begin() const {
4440     const_op_iterator It = op_begin() + 1;
4441     if (hasUnwindDest())
4442       ++It;
4443     return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4444   }
4445 
4446   /// Returns a read-only iterator that points one past the last
4447   /// handler in the CatchSwitchInst.
4448   handler_iterator handler_end() {
4449     return handler_iterator(op_end(), DerefFnTy(handler_helper));
4450   }
4451 
4452   /// Returns an iterator that points one past the last handler in the
4453   /// CatchSwitchInst.
4454   const_handler_iterator handler_end() const {
4455     return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4456   }
4457 
4458   /// iteration adapter for range-for loops.
4459   handler_range handlers() {
4460     return make_range(handler_begin(), handler_end());
4461   }
4462 
4463   /// iteration adapter for range-for loops.
4464   const_handler_range handlers() const {
4465     return make_range(handler_begin(), handler_end());
4466   }
4467 
4468   /// Add an entry to the switch instruction...
4469   /// Note:
4470   /// This action invalidates handler_end(). Old handler_end() iterator will
4471   /// point to the added handler.
4472   void addHandler(BasicBlock *Dest);
4473 
4474   void removeHandler(handler_iterator HI);
4475 
4476   unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4477   BasicBlock *getSuccessor(unsigned Idx) const {
4478     assert(Idx < getNumSuccessors() &&
4479            "Successor # out of range for catchswitch!");
4480     return cast<BasicBlock>(getOperand(Idx + 1));
4481   }
4482   void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4483     assert(Idx < getNumSuccessors() &&
4484            "Successor # out of range for catchswitch!");
4485     setOperand(Idx + 1, NewSucc);
4486   }
4487 
4488   // Methods for support type inquiry through isa, cast, and dyn_cast:
4489   static bool classof(const Instruction *I) {
4490     return I->getOpcode() == Instruction::CatchSwitch;
4491   }
4492   static bool classof(const Value *V) {
4493     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4494   }
4495 };
4496 
4497 template <>
4498 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4499 
4500 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)
4501 
4502 //===----------------------------------------------------------------------===//
4503 //                               CleanupPadInst Class
4504 //===----------------------------------------------------------------------===//
4505 class CleanupPadInst : public FuncletPadInst {
4506 private:
4507   explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4508                           unsigned Values, const Twine &NameStr,
4509                           Instruction *InsertBefore)
4510       : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4511                        NameStr, InsertBefore) {}
4512   explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4513                           unsigned Values, const Twine &NameStr,
4514                           BasicBlock *InsertAtEnd)
4515       : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4516                        NameStr, InsertAtEnd) {}
4517 
4518 public:
4519   static CleanupPadInst *Create(Value *ParentPad,
4520                                 ArrayRef<Value *> Args = std::nullopt,
4521                                 const Twine &NameStr = "",
4522                                 Instruction *InsertBefore = nullptr) {
4523     unsigned Values = 1 + Args.size();
4524     return new (Values)
4525         CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4526   }
4527 
4528   static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4529                                 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4530     unsigned Values = 1 + Args.size();
4531     return new (Values)
4532         CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4533   }
4534 
4535   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4536   static bool classof(const Instruction *I) {
4537     return I->getOpcode() == Instruction::CleanupPad;
4538   }
4539   static bool classof(const Value *V) {
4540     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4541   }
4542 };
4543 
4544 //===----------------------------------------------------------------------===//
4545 //                               CatchPadInst Class
4546 //===----------------------------------------------------------------------===//
4547 class CatchPadInst : public FuncletPadInst {
4548 private:
4549   explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4550                         unsigned Values, const Twine &NameStr,
4551                         Instruction *InsertBefore)
4552       : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4553                        NameStr, InsertBefore) {}
4554   explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4555                         unsigned Values, const Twine &NameStr,
4556                         BasicBlock *InsertAtEnd)
4557       : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4558                        NameStr, InsertAtEnd) {}
4559 
4560 public:
4561   static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4562                               const Twine &NameStr = "",
4563                               Instruction *InsertBefore = nullptr) {
4564     unsigned Values = 1 + Args.size();
4565     return new (Values)
4566         CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4567   }
4568 
4569   static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4570                               const Twine &NameStr, BasicBlock *InsertAtEnd) {
4571     unsigned Values = 1 + Args.size();
4572     return new (Values)
4573         CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4574   }
4575 
4576   /// Convenience accessors
4577   CatchSwitchInst *getCatchSwitch() const {
4578     return cast<CatchSwitchInst>(Op<-1>());
4579   }
4580   void setCatchSwitch(Value *CatchSwitch) {
4581     assert(CatchSwitch);
4582     Op<-1>() = CatchSwitch;
4583   }
4584 
4585   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4586   static bool classof(const Instruction *I) {
4587     return I->getOpcode() == Instruction::CatchPad;
4588   }
4589   static bool classof(const Value *V) {
4590     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4591   }
4592 };
4593 
4594 //===----------------------------------------------------------------------===//
4595 //                               CatchReturnInst Class
4596 //===----------------------------------------------------------------------===//
4597 
4598 class CatchReturnInst : public Instruction {
4599   CatchReturnInst(const CatchReturnInst &RI);
4600   CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4601   CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4602 
4603   void init(Value *CatchPad, BasicBlock *BB);
4604 
4605 protected:
4606   // Note: Instruction needs to be a friend here to call cloneImpl.
4607   friend class Instruction;
4608 
4609   CatchReturnInst *cloneImpl() const;
4610 
4611 public:
4612   static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4613                                  Instruction *InsertBefore = nullptr) {
4614     assert(CatchPad);
4615     assert(BB);
4616     return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4617   }
4618 
4619   static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4620                                  BasicBlock *InsertAtEnd) {
4621     assert(CatchPad);
4622     assert(BB);
4623     return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4624   }
4625 
4626   /// Provide fast operand accessors
4627   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4628 
4629   /// Convenience accessors.
4630   CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4631   void setCatchPad(CatchPadInst *CatchPad) {
4632     assert(CatchPad);
4633     Op<0>() = CatchPad;
4634   }
4635 
4636   BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4637   void setSuccessor(BasicBlock *NewSucc) {
4638     assert(NewSucc);
4639     Op<1>() = NewSucc;
4640   }
4641   unsigned getNumSuccessors() const { return 1; }
4642 
4643   /// Get the parentPad of this catchret's catchpad's catchswitch.
4644   /// The successor block is implicitly a member of this funclet.
4645   Value *getCatchSwitchParentPad() const {
4646     return getCatchPad()->getCatchSwitch()->getParentPad();
4647   }
4648 
4649   // Methods for support type inquiry through isa, cast, and dyn_cast:
4650   static bool classof(const Instruction *I) {
4651     return (I->getOpcode() == Instruction::CatchRet);
4652   }
4653   static bool classof(const Value *V) {
4654     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4655   }
4656 
4657 private:
4658   BasicBlock *getSuccessor(unsigned Idx) const {
4659     assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4660     return getSuccessor();
4661   }
4662 
4663   void setSuccessor(unsigned Idx, BasicBlock *B) {
4664     assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4665     setSuccessor(B);
4666   }
4667 };
4668 
4669 template <>
4670 struct OperandTraits<CatchReturnInst>
4671     : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4672 
4673 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
4674 
4675 //===----------------------------------------------------------------------===//
4676 //                               CleanupReturnInst Class
4677 //===----------------------------------------------------------------------===//
4678 
4679 class CleanupReturnInst : public Instruction {
4680   using UnwindDestField = BoolBitfieldElementT<0>;
4681 
4682 private:
4683   CleanupReturnInst(const CleanupReturnInst &RI);
4684   CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4685                     Instruction *InsertBefore = nullptr);
4686   CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4687                     BasicBlock *InsertAtEnd);
4688 
4689   void init(Value *CleanupPad, BasicBlock *UnwindBB);
4690 
4691 protected:
4692   // Note: Instruction needs to be a friend here to call cloneImpl.
4693   friend class Instruction;
4694 
4695   CleanupReturnInst *cloneImpl() const;
4696 
4697 public:
4698   static CleanupReturnInst *Create(Value *CleanupPad,
4699                                    BasicBlock *UnwindBB = nullptr,
4700                                    Instruction *InsertBefore = nullptr) {
4701     assert(CleanupPad);
4702     unsigned Values = 1;
4703     if (UnwindBB)
4704       ++Values;
4705     return new (Values)
4706         CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4707   }
4708 
4709   static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4710                                    BasicBlock *InsertAtEnd) {
4711     assert(CleanupPad);
4712     unsigned Values = 1;
4713     if (UnwindBB)
4714       ++Values;
4715     return new (Values)
4716         CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4717   }
4718 
4719   /// Provide fast operand accessors
4720   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4721 
4722   bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4723   bool unwindsToCaller() const { return !hasUnwindDest(); }
4724 
4725   /// Convenience accessor.
4726   CleanupPadInst *getCleanupPad() const {
4727     return cast<CleanupPadInst>(Op<0>());
4728   }
4729   void setCleanupPad(CleanupPadInst *CleanupPad) {
4730     assert(CleanupPad);
4731     Op<0>() = CleanupPad;
4732   }
4733 
4734   unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4735 
4736   BasicBlock *getUnwindDest() const {
4737     return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4738   }
4739   void setUnwindDest(BasicBlock *NewDest) {
4740     assert(NewDest);
4741     assert(hasUnwindDest());
4742     Op<1>() = NewDest;
4743   }
4744 
4745   // Methods for support type inquiry through isa, cast, and dyn_cast:
4746   static bool classof(const Instruction *I) {
4747     return (I->getOpcode() == Instruction::CleanupRet);
4748   }
4749   static bool classof(const Value *V) {
4750     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4751   }
4752 
4753 private:
4754   BasicBlock *getSuccessor(unsigned Idx) const {
4755     assert(Idx == 0);
4756     return getUnwindDest();
4757   }
4758 
4759   void setSuccessor(unsigned Idx, BasicBlock *B) {
4760     assert(Idx == 0);
4761     setUnwindDest(B);
4762   }
4763 
4764   // Shadow Instruction::setInstructionSubclassData with a private forwarding
4765   // method so that subclasses cannot accidentally use it.
4766   template <typename Bitfield>
4767   void setSubclassData(typename Bitfield::Type Value) {
4768     Instruction::setSubclassData<Bitfield>(Value);
4769   }
4770 };
4771 
4772 template <>
4773 struct OperandTraits<CleanupReturnInst>
4774     : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4775 
4776 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
4777 
4778 //===----------------------------------------------------------------------===//
4779 //                           UnreachableInst Class
4780 //===----------------------------------------------------------------------===//
4781 
4782 //===---------------------------------------------------------------------------
4783 /// This function has undefined behavior.  In particular, the
4784 /// presence of this instruction indicates some higher level knowledge that the
4785 /// end of the block cannot be reached.
4786 ///
4787 class UnreachableInst : public Instruction {
4788 protected:
4789   // Note: Instruction needs to be a friend here to call cloneImpl.
4790   friend class Instruction;
4791 
4792   UnreachableInst *cloneImpl() const;
4793 
4794 public:
4795   explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4796   explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4797 
4798   // allocate space for exactly zero operands
4799   void *operator new(size_t S) { return User::operator new(S, 0); }
4800   void operator delete(void *Ptr) { User::operator delete(Ptr); }
4801 
4802   unsigned getNumSuccessors() const { return 0; }
4803 
4804   // Methods for support type inquiry through isa, cast, and dyn_cast:
4805   static bool classof(const Instruction *I) {
4806     return I->getOpcode() == Instruction::Unreachable;
4807   }
4808   static bool classof(const Value *V) {
4809     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4810   }
4811 
4812 private:
4813   BasicBlock *getSuccessor(unsigned idx) const {
4814     llvm_unreachable("UnreachableInst has no successors!");
4815   }
4816 
4817   void setSuccessor(unsigned idx, BasicBlock *B) {
4818     llvm_unreachable("UnreachableInst has no successors!");
4819   }
4820 };
4821 
4822 //===----------------------------------------------------------------------===//
4823 //                                 TruncInst Class
4824 //===----------------------------------------------------------------------===//
4825 
4826 /// This class represents a truncation of integer types.
4827 class TruncInst : public CastInst {
4828 protected:
4829   // Note: Instruction needs to be a friend here to call cloneImpl.
4830   friend class Instruction;
4831 
4832   /// Clone an identical TruncInst
4833   TruncInst *cloneImpl() const;
4834 
4835 public:
4836   /// Constructor with insert-before-instruction semantics
4837   TruncInst(
4838     Value *S,                           ///< The value to be truncated
4839     Type *Ty,                           ///< The (smaller) type to truncate to
4840     const Twine &NameStr = "",          ///< A name for the new instruction
4841     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4842   );
4843 
4844   /// Constructor with insert-at-end-of-block semantics
4845   TruncInst(
4846     Value *S,                     ///< The value to be truncated
4847     Type *Ty,                     ///< The (smaller) type to truncate to
4848     const Twine &NameStr,         ///< A name for the new instruction
4849     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4850   );
4851 
4852   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4853   static bool classof(const Instruction *I) {
4854     return I->getOpcode() == Trunc;
4855   }
4856   static bool classof(const Value *V) {
4857     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4858   }
4859 };
4860 
4861 //===----------------------------------------------------------------------===//
4862 //                                 ZExtInst Class
4863 //===----------------------------------------------------------------------===//
4864 
4865 /// This class represents zero extension of integer types.
4866 class ZExtInst : public CastInst {
4867 protected:
4868   // Note: Instruction needs to be a friend here to call cloneImpl.
4869   friend class Instruction;
4870 
4871   /// Clone an identical ZExtInst
4872   ZExtInst *cloneImpl() const;
4873 
4874 public:
4875   /// Constructor with insert-before-instruction semantics
4876   ZExtInst(
4877     Value *S,                           ///< The value to be zero extended
4878     Type *Ty,                           ///< The type to zero extend to
4879     const Twine &NameStr = "",          ///< A name for the new instruction
4880     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4881   );
4882 
4883   /// Constructor with insert-at-end semantics.
4884   ZExtInst(
4885     Value *S,                     ///< The value to be zero extended
4886     Type *Ty,                     ///< The type to zero extend to
4887     const Twine &NameStr,         ///< A name for the new instruction
4888     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4889   );
4890 
4891   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4892   static bool classof(const Instruction *I) {
4893     return I->getOpcode() == ZExt;
4894   }
4895   static bool classof(const Value *V) {
4896     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4897   }
4898 };
4899 
4900 //===----------------------------------------------------------------------===//
4901 //                                 SExtInst Class
4902 //===----------------------------------------------------------------------===//
4903 
4904 /// This class represents a sign extension of integer types.
4905 class SExtInst : public CastInst {
4906 protected:
4907   // Note: Instruction needs to be a friend here to call cloneImpl.
4908   friend class Instruction;
4909 
4910   /// Clone an identical SExtInst
4911   SExtInst *cloneImpl() const;
4912 
4913 public:
4914   /// Constructor with insert-before-instruction semantics
4915   SExtInst(
4916     Value *S,                           ///< The value to be sign extended
4917     Type *Ty,                           ///< The type to sign extend to
4918     const Twine &NameStr = "",          ///< A name for the new instruction
4919     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4920   );
4921 
4922   /// Constructor with insert-at-end-of-block semantics
4923   SExtInst(
4924     Value *S,                     ///< The value to be sign extended
4925     Type *Ty,                     ///< The type to sign extend to
4926     const Twine &NameStr,         ///< A name for the new instruction
4927     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4928   );
4929 
4930   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4931   static bool classof(const Instruction *I) {
4932     return I->getOpcode() == SExt;
4933   }
4934   static bool classof(const Value *V) {
4935     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4936   }
4937 };
4938 
4939 //===----------------------------------------------------------------------===//
4940 //                                 FPTruncInst Class
4941 //===----------------------------------------------------------------------===//
4942 
4943 /// This class represents a truncation of floating point types.
4944 class FPTruncInst : public CastInst {
4945 protected:
4946   // Note: Instruction needs to be a friend here to call cloneImpl.
4947   friend class Instruction;
4948 
4949   /// Clone an identical FPTruncInst
4950   FPTruncInst *cloneImpl() const;
4951 
4952 public:
4953   /// Constructor with insert-before-instruction semantics
4954   FPTruncInst(
4955     Value *S,                           ///< The value to be truncated
4956     Type *Ty,                           ///< The type to truncate to
4957     const Twine &NameStr = "",          ///< A name for the new instruction
4958     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4959   );
4960 
4961   /// Constructor with insert-before-instruction semantics
4962   FPTruncInst(
4963     Value *S,                     ///< The value to be truncated
4964     Type *Ty,                     ///< The type to truncate to
4965     const Twine &NameStr,         ///< A name for the new instruction
4966     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4967   );
4968 
4969   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4970   static bool classof(const Instruction *I) {
4971     return I->getOpcode() == FPTrunc;
4972   }
4973   static bool classof(const Value *V) {
4974     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4975   }
4976 };
4977 
4978 //===----------------------------------------------------------------------===//
4979 //                                 FPExtInst Class
4980 //===----------------------------------------------------------------------===//
4981 
4982 /// This class represents an extension of floating point types.
4983 class FPExtInst : public CastInst {
4984 protected:
4985   // Note: Instruction needs to be a friend here to call cloneImpl.
4986   friend class Instruction;
4987 
4988   /// Clone an identical FPExtInst
4989   FPExtInst *cloneImpl() const;
4990 
4991 public:
4992   /// Constructor with insert-before-instruction semantics
4993   FPExtInst(
4994     Value *S,                           ///< The value to be extended
4995     Type *Ty,                           ///< The type to extend to
4996     const Twine &NameStr = "",          ///< A name for the new instruction
4997     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4998   );
4999 
5000   /// Constructor with insert-at-end-of-block semantics
5001   FPExtInst(
5002     Value *S,                     ///< The value to be extended
5003     Type *Ty,                     ///< The type to extend to
5004     const Twine &NameStr,         ///< A name for the new instruction
5005     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5006   );
5007 
5008   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5009   static bool classof(const Instruction *I) {
5010     return I->getOpcode() == FPExt;
5011   }
5012   static bool classof(const Value *V) {
5013     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5014   }
5015 };
5016 
5017 //===----------------------------------------------------------------------===//
5018 //                                 UIToFPInst Class
5019 //===----------------------------------------------------------------------===//
5020 
5021 /// This class represents a cast unsigned integer to floating point.
5022 class UIToFPInst : public CastInst {
5023 protected:
5024   // Note: Instruction needs to be a friend here to call cloneImpl.
5025   friend class Instruction;
5026 
5027   /// Clone an identical UIToFPInst
5028   UIToFPInst *cloneImpl() const;
5029 
5030 public:
5031   /// Constructor with insert-before-instruction semantics
5032   UIToFPInst(
5033     Value *S,                           ///< The value to be converted
5034     Type *Ty,                           ///< The type to convert to
5035     const Twine &NameStr = "",          ///< A name for the new instruction
5036     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5037   );
5038 
5039   /// Constructor with insert-at-end-of-block semantics
5040   UIToFPInst(
5041     Value *S,                     ///< The value to be converted
5042     Type *Ty,                     ///< The type to convert to
5043     const Twine &NameStr,         ///< A name for the new instruction
5044     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5045   );
5046 
5047   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5048   static bool classof(const Instruction *I) {
5049     return I->getOpcode() == UIToFP;
5050   }
5051   static bool classof(const Value *V) {
5052     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5053   }
5054 };
5055 
5056 //===----------------------------------------------------------------------===//
5057 //                                 SIToFPInst Class
5058 //===----------------------------------------------------------------------===//
5059 
5060 /// This class represents a cast from signed integer to floating point.
5061 class SIToFPInst : public CastInst {
5062 protected:
5063   // Note: Instruction needs to be a friend here to call cloneImpl.
5064   friend class Instruction;
5065 
5066   /// Clone an identical SIToFPInst
5067   SIToFPInst *cloneImpl() const;
5068 
5069 public:
5070   /// Constructor with insert-before-instruction semantics
5071   SIToFPInst(
5072     Value *S,                           ///< The value to be converted
5073     Type *Ty,                           ///< The type to convert to
5074     const Twine &NameStr = "",          ///< A name for the new instruction
5075     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5076   );
5077 
5078   /// Constructor with insert-at-end-of-block semantics
5079   SIToFPInst(
5080     Value *S,                     ///< The value to be converted
5081     Type *Ty,                     ///< The type to convert to
5082     const Twine &NameStr,         ///< A name for the new instruction
5083     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5084   );
5085 
5086   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5087   static bool classof(const Instruction *I) {
5088     return I->getOpcode() == SIToFP;
5089   }
5090   static bool classof(const Value *V) {
5091     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5092   }
5093 };
5094 
5095 //===----------------------------------------------------------------------===//
5096 //                                 FPToUIInst Class
5097 //===----------------------------------------------------------------------===//
5098 
5099 /// This class represents a cast from floating point to unsigned integer
5100 class FPToUIInst  : public CastInst {
5101 protected:
5102   // Note: Instruction needs to be a friend here to call cloneImpl.
5103   friend class Instruction;
5104 
5105   /// Clone an identical FPToUIInst
5106   FPToUIInst *cloneImpl() const;
5107 
5108 public:
5109   /// Constructor with insert-before-instruction semantics
5110   FPToUIInst(
5111     Value *S,                           ///< The value to be converted
5112     Type *Ty,                           ///< The type to convert to
5113     const Twine &NameStr = "",          ///< A name for the new instruction
5114     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5115   );
5116 
5117   /// Constructor with insert-at-end-of-block semantics
5118   FPToUIInst(
5119     Value *S,                     ///< The value to be converted
5120     Type *Ty,                     ///< The type to convert to
5121     const Twine &NameStr,         ///< A name for the new instruction
5122     BasicBlock *InsertAtEnd       ///< Where to insert the new instruction
5123   );
5124 
5125   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5126   static bool classof(const Instruction *I) {
5127     return I->getOpcode() == FPToUI;
5128   }
5129   static bool classof(const Value *V) {
5130     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5131   }
5132 };
5133 
5134 //===----------------------------------------------------------------------===//
5135 //                                 FPToSIInst Class
5136 //===----------------------------------------------------------------------===//
5137 
5138 /// This class represents a cast from floating point to signed integer.
5139 class FPToSIInst  : public CastInst {
5140 protected:
5141   // Note: Instruction needs to be a friend here to call cloneImpl.
5142   friend class Instruction;
5143 
5144   /// Clone an identical FPToSIInst
5145   FPToSIInst *cloneImpl() const;
5146 
5147 public:
5148   /// Constructor with insert-before-instruction semantics
5149   FPToSIInst(
5150     Value *S,                           ///< The value to be converted
5151     Type *Ty,                           ///< The type to convert to
5152     const Twine &NameStr = "",          ///< A name for the new instruction
5153     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5154   );
5155 
5156   /// Constructor with insert-at-end-of-block semantics
5157   FPToSIInst(
5158     Value *S,                     ///< The value to be converted
5159     Type *Ty,                     ///< The type to convert to
5160     const Twine &NameStr,         ///< A name for the new instruction
5161     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5162   );
5163 
5164   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5165   static bool classof(const Instruction *I) {
5166     return I->getOpcode() == FPToSI;
5167   }
5168   static bool classof(const Value *V) {
5169     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5170   }
5171 };
5172 
5173 //===----------------------------------------------------------------------===//
5174 //                                 IntToPtrInst Class
5175 //===----------------------------------------------------------------------===//
5176 
5177 /// This class represents a cast from an integer to a pointer.
5178 class IntToPtrInst : public CastInst {
5179 public:
5180   // Note: Instruction needs to be a friend here to call cloneImpl.
5181   friend class Instruction;
5182 
5183   /// Constructor with insert-before-instruction semantics
5184   IntToPtrInst(
5185     Value *S,                           ///< The value to be converted
5186     Type *Ty,                           ///< The type to convert to
5187     const Twine &NameStr = "",          ///< A name for the new instruction
5188     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5189   );
5190 
5191   /// Constructor with insert-at-end-of-block semantics
5192   IntToPtrInst(
5193     Value *S,                     ///< The value to be converted
5194     Type *Ty,                     ///< The type to convert to
5195     const Twine &NameStr,         ///< A name for the new instruction
5196     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5197   );
5198 
5199   /// Clone an identical IntToPtrInst.
5200   IntToPtrInst *cloneImpl() const;
5201 
5202   /// Returns the address space of this instruction's pointer type.
5203   unsigned getAddressSpace() const {
5204     return getType()->getPointerAddressSpace();
5205   }
5206 
5207   // Methods for support type inquiry through isa, cast, and dyn_cast:
5208   static bool classof(const Instruction *I) {
5209     return I->getOpcode() == IntToPtr;
5210   }
5211   static bool classof(const Value *V) {
5212     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5213   }
5214 };
5215 
5216 //===----------------------------------------------------------------------===//
5217 //                                 PtrToIntInst Class
5218 //===----------------------------------------------------------------------===//
5219 
5220 /// This class represents a cast from a pointer to an integer.
5221 class PtrToIntInst : public CastInst {
5222 protected:
5223   // Note: Instruction needs to be a friend here to call cloneImpl.
5224   friend class Instruction;
5225 
5226   /// Clone an identical PtrToIntInst.
5227   PtrToIntInst *cloneImpl() const;
5228 
5229 public:
5230   /// Constructor with insert-before-instruction semantics
5231   PtrToIntInst(
5232     Value *S,                           ///< The value to be converted
5233     Type *Ty,                           ///< The type to convert to
5234     const Twine &NameStr = "",          ///< A name for the new instruction
5235     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5236   );
5237 
5238   /// Constructor with insert-at-end-of-block semantics
5239   PtrToIntInst(
5240     Value *S,                     ///< The value to be converted
5241     Type *Ty,                     ///< The type to convert to
5242     const Twine &NameStr,         ///< A name for the new instruction
5243     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5244   );
5245 
5246   /// Gets the pointer operand.
5247   Value *getPointerOperand() { return getOperand(0); }
5248   /// Gets the pointer operand.
5249   const Value *getPointerOperand() const { return getOperand(0); }
5250   /// Gets the operand index of the pointer operand.
5251   static unsigned getPointerOperandIndex() { return 0U; }
5252 
5253   /// Returns the address space of the pointer operand.
5254   unsigned getPointerAddressSpace() const {
5255     return getPointerOperand()->getType()->getPointerAddressSpace();
5256   }
5257 
5258   // Methods for support type inquiry through isa, cast, and dyn_cast:
5259   static bool classof(const Instruction *I) {
5260     return I->getOpcode() == PtrToInt;
5261   }
5262   static bool classof(const Value *V) {
5263     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5264   }
5265 };
5266 
5267 //===----------------------------------------------------------------------===//
5268 //                             BitCastInst Class
5269 //===----------------------------------------------------------------------===//
5270 
5271 /// This class represents a no-op cast from one type to another.
5272 class BitCastInst : public CastInst {
5273 protected:
5274   // Note: Instruction needs to be a friend here to call cloneImpl.
5275   friend class Instruction;
5276 
5277   /// Clone an identical BitCastInst.
5278   BitCastInst *cloneImpl() const;
5279 
5280 public:
5281   /// Constructor with insert-before-instruction semantics
5282   BitCastInst(
5283     Value *S,                           ///< The value to be casted
5284     Type *Ty,                           ///< The type to casted to
5285     const Twine &NameStr = "",          ///< A name for the new instruction
5286     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5287   );
5288 
5289   /// Constructor with insert-at-end-of-block semantics
5290   BitCastInst(
5291     Value *S,                     ///< The value to be casted
5292     Type *Ty,                     ///< The type to casted to
5293     const Twine &NameStr,         ///< A name for the new instruction
5294     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5295   );
5296 
5297   // Methods for support type inquiry through isa, cast, and dyn_cast:
5298   static bool classof(const Instruction *I) {
5299     return I->getOpcode() == BitCast;
5300   }
5301   static bool classof(const Value *V) {
5302     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5303   }
5304 };
5305 
5306 //===----------------------------------------------------------------------===//
5307 //                          AddrSpaceCastInst Class
5308 //===----------------------------------------------------------------------===//
5309 
5310 /// This class represents a conversion between pointers from one address space
5311 /// to another.
5312 class AddrSpaceCastInst : public CastInst {
5313 protected:
5314   // Note: Instruction needs to be a friend here to call cloneImpl.
5315   friend class Instruction;
5316 
5317   /// Clone an identical AddrSpaceCastInst.
5318   AddrSpaceCastInst *cloneImpl() const;
5319 
5320 public:
5321   /// Constructor with insert-before-instruction semantics
5322   AddrSpaceCastInst(
5323     Value *S,                           ///< The value to be casted
5324     Type *Ty,                           ///< The type to casted to
5325     const Twine &NameStr = "",          ///< A name for the new instruction
5326     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5327   );
5328 
5329   /// Constructor with insert-at-end-of-block semantics
5330   AddrSpaceCastInst(
5331     Value *S,                     ///< The value to be casted
5332     Type *Ty,                     ///< The type to casted to
5333     const Twine &NameStr,         ///< A name for the new instruction
5334     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5335   );
5336 
5337   // Methods for support type inquiry through isa, cast, and dyn_cast:
5338   static bool classof(const Instruction *I) {
5339     return I->getOpcode() == AddrSpaceCast;
5340   }
5341   static bool classof(const Value *V) {
5342     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5343   }
5344 
5345   /// Gets the pointer operand.
5346   Value *getPointerOperand() {
5347     return getOperand(0);
5348   }
5349 
5350   /// Gets the pointer operand.
5351   const Value *getPointerOperand() const {
5352     return getOperand(0);
5353   }
5354 
5355   /// Gets the operand index of the pointer operand.
5356   static unsigned getPointerOperandIndex() {
5357     return 0U;
5358   }
5359 
5360   /// Returns the address space of the pointer operand.
5361   unsigned getSrcAddressSpace() const {
5362     return getPointerOperand()->getType()->getPointerAddressSpace();
5363   }
5364 
5365   /// Returns the address space of the result.
5366   unsigned getDestAddressSpace() const {
5367     return getType()->getPointerAddressSpace();
5368   }
5369 };
5370 
5371 //===----------------------------------------------------------------------===//
5372 //                          Helper functions
5373 //===----------------------------------------------------------------------===//
5374 
5375 /// A helper function that returns the pointer operand of a load or store
5376 /// instruction. Returns nullptr if not load or store.
5377 inline const Value *getLoadStorePointerOperand(const Value *V) {
5378   if (auto *Load = dyn_cast<LoadInst>(V))
5379     return Load->getPointerOperand();
5380   if (auto *Store = dyn_cast<StoreInst>(V))
5381     return Store->getPointerOperand();
5382   return nullptr;
5383 }
5384 inline Value *getLoadStorePointerOperand(Value *V) {
5385   return const_cast<Value *>(
5386       getLoadStorePointerOperand(static_cast<const Value *>(V)));
5387 }
5388 
5389 /// A helper function that returns the pointer operand of a load, store
5390 /// or GEP instruction. Returns nullptr if not load, store, or GEP.
5391 inline const Value *getPointerOperand(const Value *V) {
5392   if (auto *Ptr = getLoadStorePointerOperand(V))
5393     return Ptr;
5394   if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5395     return Gep->getPointerOperand();
5396   return nullptr;
5397 }
5398 inline Value *getPointerOperand(Value *V) {
5399   return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5400 }
5401 
5402 /// A helper function that returns the alignment of load or store instruction.
5403 inline Align getLoadStoreAlignment(Value *I) {
5404   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5405          "Expected Load or Store instruction");
5406   if (auto *LI = dyn_cast<LoadInst>(I))
5407     return LI->getAlign();
5408   return cast<StoreInst>(I)->getAlign();
5409 }
5410 
5411 /// A helper function that returns the address space of the pointer operand of
5412 /// load or store instruction.
5413 inline unsigned getLoadStoreAddressSpace(Value *I) {
5414   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5415          "Expected Load or Store instruction");
5416   if (auto *LI = dyn_cast<LoadInst>(I))
5417     return LI->getPointerAddressSpace();
5418   return cast<StoreInst>(I)->getPointerAddressSpace();
5419 }
5420 
5421 /// A helper function that returns the type of a load or store instruction.
5422 inline Type *getLoadStoreType(Value *I) {
5423   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5424          "Expected Load or Store instruction");
5425   if (auto *LI = dyn_cast<LoadInst>(I))
5426     return LI->getType();
5427   return cast<StoreInst>(I)->getValueOperand()->getType();
5428 }
5429 
5430 /// A helper function that returns an atomic operation's sync scope; returns
5431 /// std::nullopt if it is not an atomic operation.
5432 inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5433   if (!I->isAtomic())
5434     return std::nullopt;
5435   if (auto *AI = dyn_cast<LoadInst>(I))
5436     return AI->getSyncScopeID();
5437   if (auto *AI = dyn_cast<StoreInst>(I))
5438     return AI->getSyncScopeID();
5439   if (auto *AI = dyn_cast<FenceInst>(I))
5440     return AI->getSyncScopeID();
5441   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5442     return AI->getSyncScopeID();
5443   if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5444     return AI->getSyncScopeID();
5445   llvm_unreachable("unhandled atomic operation");
5446 }
5447 
5448 //===----------------------------------------------------------------------===//
5449 //                              FreezeInst Class
5450 //===----------------------------------------------------------------------===//
5451 
5452 /// This class represents a freeze function that returns random concrete
5453 /// value if an operand is either a poison value or an undef value
5454 class FreezeInst : public UnaryInstruction {
5455 protected:
5456   // Note: Instruction needs to be a friend here to call cloneImpl.
5457   friend class Instruction;
5458 
5459   /// Clone an identical FreezeInst
5460   FreezeInst *cloneImpl() const;
5461 
5462 public:
5463   explicit FreezeInst(Value *S,
5464                       const Twine &NameStr = "",
5465                       Instruction *InsertBefore = nullptr);
5466   FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5467 
5468   // Methods for support type inquiry through isa, cast, and dyn_cast:
5469   static inline bool classof(const Instruction *I) {
5470     return I->getOpcode() == Freeze;
5471   }
5472   static inline bool classof(const Value *V) {
5473     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5474   }
5475 };
5476 
5477 } // end namespace llvm
5478 
5479 #endif // LLVM_IR_INSTRUCTIONS_H
5480