1 //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file exposes the class definitions of all of the subclasses of the
10 // Instruction class.  This is meant to be an easy way to get access to all
11 // instruction subclasses.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_IR_INSTRUCTIONS_H
16 #define LLVM_IR_INSTRUCTIONS_H
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/Bitfields.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/ADT/iterator.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/IR/CFG.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/InstrTypes.h"
30 #include "llvm/IR/Instruction.h"
31 #include "llvm/IR/OperandTraits.h"
32 #include "llvm/IR/Use.h"
33 #include "llvm/IR/User.h"
34 #include "llvm/Support/AtomicOrdering.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include <cassert>
37 #include <cstddef>
38 #include <cstdint>
39 #include <iterator>
40 #include <optional>
41 
42 namespace llvm {
43 
44 class APFloat;
45 class APInt;
46 class BasicBlock;
47 class ConstantInt;
48 class DataLayout;
49 class StringRef;
50 class Type;
51 class Value;
52 
53 //===----------------------------------------------------------------------===//
54 //                                AllocaInst Class
55 //===----------------------------------------------------------------------===//
56 
57 /// an instruction to allocate memory on the stack
58 class AllocaInst : public UnaryInstruction {
59   Type *AllocatedType;
60 
61   using AlignmentField = AlignmentBitfieldElementT<0>;
62   using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
63   using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
64   static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65                                         SwiftErrorField>(),
66                 "Bitfields must be contiguous");
67 
68 protected:
69   // Note: Instruction needs to be a friend here to call cloneImpl.
70   friend class Instruction;
71 
72   AllocaInst *cloneImpl() const;
73 
74 public:
75   explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76                       const Twine &Name, Instruction *InsertBefore);
77   AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78              const Twine &Name, BasicBlock *InsertAtEnd);
79 
80   AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81              Instruction *InsertBefore);
82   AllocaInst(Type *Ty, unsigned AddrSpace,
83              const Twine &Name, BasicBlock *InsertAtEnd);
84 
85   AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86              const Twine &Name = "", Instruction *InsertBefore = nullptr);
87   AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88              const Twine &Name, BasicBlock *InsertAtEnd);
89 
90   /// Return true if there is an allocation size parameter to the allocation
91   /// instruction that is not 1.
92   bool isArrayAllocation() const;
93 
94   /// Get the number of elements allocated. For a simple allocation of a single
95   /// element, this will return a constant 1 value.
96   const Value *getArraySize() const { return getOperand(0); }
97   Value *getArraySize() { return getOperand(0); }
98 
99   /// Overload to return most specific pointer type.
100   PointerType *getType() const {
101     return cast<PointerType>(Instruction::getType());
102   }
103 
104   /// Return the address space for the allocation.
105   unsigned getAddressSpace() const {
106     return getType()->getAddressSpace();
107   }
108 
109   /// Get allocation size in bytes. Returns std::nullopt if size can't be
110   /// determined, e.g. in case of a VLA.
111   std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
112 
113   /// Get allocation size in bits. Returns std::nullopt if size can't be
114   /// determined, e.g. in case of a VLA.
115   std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116 
117   /// Return the type that is being allocated by the instruction.
118   Type *getAllocatedType() const { return AllocatedType; }
119   /// for use only in special circumstances that need to generically
120   /// transform a whole instruction (eg: IR linking and vectorization).
121   void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122 
123   /// Return the alignment of the memory that is being allocated by the
124   /// instruction.
125   Align getAlign() const {
126     return Align(1ULL << getSubclassData<AlignmentField>());
127   }
128 
129   void setAlignment(Align Align) {
130     setSubclassData<AlignmentField>(Log2(Align));
131   }
132 
133   /// Return true if this alloca is in the entry block of the function and is a
134   /// constant size. If so, the code generator will fold it into the
135   /// prolog/epilog code, so it is basically free.
136   bool isStaticAlloca() const;
137 
138   /// Return true if this alloca is used as an inalloca argument to a call. Such
139   /// allocas are never considered static even if they are in the entry block.
140   bool isUsedWithInAlloca() const {
141     return getSubclassData<UsedWithInAllocaField>();
142   }
143 
144   /// Specify whether this alloca is used to represent the arguments to a call.
145   void setUsedWithInAlloca(bool V) {
146     setSubclassData<UsedWithInAllocaField>(V);
147   }
148 
149   /// Return true if this alloca is used as a swifterror argument to a call.
150   bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
151   /// Specify whether this alloca is used to represent a swifterror.
152   void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
153 
154   // Methods for support type inquiry through isa, cast, and dyn_cast:
155   static bool classof(const Instruction *I) {
156     return (I->getOpcode() == Instruction::Alloca);
157   }
158   static bool classof(const Value *V) {
159     return isa<Instruction>(V) && classof(cast<Instruction>(V));
160   }
161 
162 private:
163   // Shadow Instruction::setInstructionSubclassData with a private forwarding
164   // method so that subclasses cannot accidentally use it.
165   template <typename Bitfield>
166   void setSubclassData(typename Bitfield::Type Value) {
167     Instruction::setSubclassData<Bitfield>(Value);
168   }
169 };
170 
171 //===----------------------------------------------------------------------===//
172 //                                LoadInst Class
173 //===----------------------------------------------------------------------===//
174 
175 /// An instruction for reading from memory. This uses the SubclassData field in
176 /// Value to store whether or not the load is volatile.
177 class LoadInst : public UnaryInstruction {
178   using VolatileField = BoolBitfieldElementT<0>;
179   using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
180   using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
181   static_assert(
182       Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
183       "Bitfields must be contiguous");
184 
185   void AssertOK();
186 
187 protected:
188   // Note: Instruction needs to be a friend here to call cloneImpl.
189   friend class Instruction;
190 
191   LoadInst *cloneImpl() const;
192 
193 public:
194   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
195            Instruction *InsertBefore);
196   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
197   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198            Instruction *InsertBefore);
199   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200            BasicBlock *InsertAtEnd);
201   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202            Align Align, Instruction *InsertBefore = nullptr);
203   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204            Align Align, BasicBlock *InsertAtEnd);
205   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
206            Align Align, AtomicOrdering Order,
207            SyncScope::ID SSID = SyncScope::System,
208            Instruction *InsertBefore = nullptr);
209   LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
210            Align Align, AtomicOrdering Order, SyncScope::ID SSID,
211            BasicBlock *InsertAtEnd);
212 
213   /// Return true if this is a load from a volatile memory location.
214   bool isVolatile() const { return getSubclassData<VolatileField>(); }
215 
216   /// Specify whether this is a volatile load or not.
217   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
218 
219   /// Return the alignment of the access that is being performed.
220   Align getAlign() const {
221     return Align(1ULL << (getSubclassData<AlignmentField>()));
222   }
223 
224   void setAlignment(Align Align) {
225     setSubclassData<AlignmentField>(Log2(Align));
226   }
227 
228   /// Returns the ordering constraint of this load instruction.
229   AtomicOrdering getOrdering() const {
230     return getSubclassData<OrderingField>();
231   }
232   /// Sets the ordering constraint of this load instruction.  May not be Release
233   /// or AcquireRelease.
234   void setOrdering(AtomicOrdering Ordering) {
235     setSubclassData<OrderingField>(Ordering);
236   }
237 
238   /// Returns the synchronization scope ID of this load instruction.
239   SyncScope::ID getSyncScopeID() const {
240     return SSID;
241   }
242 
243   /// Sets the synchronization scope ID of this load instruction.
244   void setSyncScopeID(SyncScope::ID SSID) {
245     this->SSID = SSID;
246   }
247 
248   /// Sets the ordering constraint and the synchronization scope ID of this load
249   /// instruction.
250   void setAtomic(AtomicOrdering Ordering,
251                  SyncScope::ID SSID = SyncScope::System) {
252     setOrdering(Ordering);
253     setSyncScopeID(SSID);
254   }
255 
256   bool isSimple() const { return !isAtomic() && !isVolatile(); }
257 
258   bool isUnordered() const {
259     return (getOrdering() == AtomicOrdering::NotAtomic ||
260             getOrdering() == AtomicOrdering::Unordered) &&
261            !isVolatile();
262   }
263 
264   Value *getPointerOperand() { return getOperand(0); }
265   const Value *getPointerOperand() const { return getOperand(0); }
266   static unsigned getPointerOperandIndex() { return 0U; }
267   Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
268 
269   /// Returns the address space of the pointer operand.
270   unsigned getPointerAddressSpace() const {
271     return getPointerOperandType()->getPointerAddressSpace();
272   }
273 
274   // Methods for support type inquiry through isa, cast, and dyn_cast:
275   static bool classof(const Instruction *I) {
276     return I->getOpcode() == Instruction::Load;
277   }
278   static bool classof(const Value *V) {
279     return isa<Instruction>(V) && classof(cast<Instruction>(V));
280   }
281 
282 private:
283   // Shadow Instruction::setInstructionSubclassData with a private forwarding
284   // method so that subclasses cannot accidentally use it.
285   template <typename Bitfield>
286   void setSubclassData(typename Bitfield::Type Value) {
287     Instruction::setSubclassData<Bitfield>(Value);
288   }
289 
290   /// The synchronization scope ID of this load instruction.  Not quite enough
291   /// room in SubClassData for everything, so synchronization scope ID gets its
292   /// own field.
293   SyncScope::ID SSID;
294 };
295 
296 //===----------------------------------------------------------------------===//
297 //                                StoreInst Class
298 //===----------------------------------------------------------------------===//
299 
300 /// An instruction for storing to memory.
301 class StoreInst : public Instruction {
302   using VolatileField = BoolBitfieldElementT<0>;
303   using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
304   using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
305   static_assert(
306       Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
307       "Bitfields must be contiguous");
308 
309   void AssertOK();
310 
311 protected:
312   // Note: Instruction needs to be a friend here to call cloneImpl.
313   friend class Instruction;
314 
315   StoreInst *cloneImpl() const;
316 
317 public:
318   StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
319   StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
320   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
321   StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
322   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
323             Instruction *InsertBefore = nullptr);
324   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325             BasicBlock *InsertAtEnd);
326   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327             AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
328             Instruction *InsertBefore = nullptr);
329   StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330             AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
331 
332   // allocate space for exactly two operands
333   void *operator new(size_t S) { return User::operator new(S, 2); }
334   void operator delete(void *Ptr) { User::operator delete(Ptr); }
335 
336   /// Return true if this is a store to a volatile memory location.
337   bool isVolatile() const { return getSubclassData<VolatileField>(); }
338 
339   /// Specify whether this is a volatile store or not.
340   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
341 
342   /// Transparently provide more efficient getOperand methods.
343   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
344 
345   Align getAlign() const {
346     return Align(1ULL << (getSubclassData<AlignmentField>()));
347   }
348 
349   void setAlignment(Align Align) {
350     setSubclassData<AlignmentField>(Log2(Align));
351   }
352 
353   /// Returns the ordering constraint of this store instruction.
354   AtomicOrdering getOrdering() const {
355     return getSubclassData<OrderingField>();
356   }
357 
358   /// Sets the ordering constraint of this store instruction.  May not be
359   /// Acquire or AcquireRelease.
360   void setOrdering(AtomicOrdering Ordering) {
361     setSubclassData<OrderingField>(Ordering);
362   }
363 
364   /// Returns the synchronization scope ID of this store instruction.
365   SyncScope::ID getSyncScopeID() const {
366     return SSID;
367   }
368 
369   /// Sets the synchronization scope ID of this store instruction.
370   void setSyncScopeID(SyncScope::ID SSID) {
371     this->SSID = SSID;
372   }
373 
374   /// Sets the ordering constraint and the synchronization scope ID of this
375   /// store instruction.
376   void setAtomic(AtomicOrdering Ordering,
377                  SyncScope::ID SSID = SyncScope::System) {
378     setOrdering(Ordering);
379     setSyncScopeID(SSID);
380   }
381 
382   bool isSimple() const { return !isAtomic() && !isVolatile(); }
383 
384   bool isUnordered() const {
385     return (getOrdering() == AtomicOrdering::NotAtomic ||
386             getOrdering() == AtomicOrdering::Unordered) &&
387            !isVolatile();
388   }
389 
390   Value *getValueOperand() { return getOperand(0); }
391   const Value *getValueOperand() const { return getOperand(0); }
392 
393   Value *getPointerOperand() { return getOperand(1); }
394   const Value *getPointerOperand() const { return getOperand(1); }
395   static unsigned getPointerOperandIndex() { return 1U; }
396   Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
397 
398   /// Returns the address space of the pointer operand.
399   unsigned getPointerAddressSpace() const {
400     return getPointerOperandType()->getPointerAddressSpace();
401   }
402 
403   // Methods for support type inquiry through isa, cast, and dyn_cast:
404   static bool classof(const Instruction *I) {
405     return I->getOpcode() == Instruction::Store;
406   }
407   static bool classof(const Value *V) {
408     return isa<Instruction>(V) && classof(cast<Instruction>(V));
409   }
410 
411 private:
412   // Shadow Instruction::setInstructionSubclassData with a private forwarding
413   // method so that subclasses cannot accidentally use it.
414   template <typename Bitfield>
415   void setSubclassData(typename Bitfield::Type Value) {
416     Instruction::setSubclassData<Bitfield>(Value);
417   }
418 
419   /// The synchronization scope ID of this store instruction.  Not quite enough
420   /// room in SubClassData for everything, so synchronization scope ID gets its
421   /// own field.
422   SyncScope::ID SSID;
423 };
424 
425 template <>
426 struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
427 };
428 
429 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
430 
431 //===----------------------------------------------------------------------===//
432 //                                FenceInst Class
433 //===----------------------------------------------------------------------===//
434 
435 /// An instruction for ordering other memory operations.
436 class FenceInst : public Instruction {
437   using OrderingField = AtomicOrderingBitfieldElementT<0>;
438 
439   void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
440 
441 protected:
442   // Note: Instruction needs to be a friend here to call cloneImpl.
443   friend class Instruction;
444 
445   FenceInst *cloneImpl() const;
446 
447 public:
448   // Ordering may only be Acquire, Release, AcquireRelease, or
449   // SequentiallyConsistent.
450   FenceInst(LLVMContext &C, AtomicOrdering Ordering,
451             SyncScope::ID SSID = SyncScope::System,
452             Instruction *InsertBefore = nullptr);
453   FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
454             BasicBlock *InsertAtEnd);
455 
456   // allocate space for exactly zero operands
457   void *operator new(size_t S) { return User::operator new(S, 0); }
458   void operator delete(void *Ptr) { User::operator delete(Ptr); }
459 
460   /// Returns the ordering constraint of this fence instruction.
461   AtomicOrdering getOrdering() const {
462     return getSubclassData<OrderingField>();
463   }
464 
465   /// Sets the ordering constraint of this fence instruction.  May only be
466   /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
467   void setOrdering(AtomicOrdering Ordering) {
468     setSubclassData<OrderingField>(Ordering);
469   }
470 
471   /// Returns the synchronization scope ID of this fence instruction.
472   SyncScope::ID getSyncScopeID() const {
473     return SSID;
474   }
475 
476   /// Sets the synchronization scope ID of this fence instruction.
477   void setSyncScopeID(SyncScope::ID SSID) {
478     this->SSID = SSID;
479   }
480 
481   // Methods for support type inquiry through isa, cast, and dyn_cast:
482   static bool classof(const Instruction *I) {
483     return I->getOpcode() == Instruction::Fence;
484   }
485   static bool classof(const Value *V) {
486     return isa<Instruction>(V) && classof(cast<Instruction>(V));
487   }
488 
489 private:
490   // Shadow Instruction::setInstructionSubclassData with a private forwarding
491   // method so that subclasses cannot accidentally use it.
492   template <typename Bitfield>
493   void setSubclassData(typename Bitfield::Type Value) {
494     Instruction::setSubclassData<Bitfield>(Value);
495   }
496 
497   /// The synchronization scope ID of this fence instruction.  Not quite enough
498   /// room in SubClassData for everything, so synchronization scope ID gets its
499   /// own field.
500   SyncScope::ID SSID;
501 };
502 
503 //===----------------------------------------------------------------------===//
504 //                                AtomicCmpXchgInst Class
505 //===----------------------------------------------------------------------===//
506 
507 /// An instruction that atomically checks whether a
508 /// specified value is in a memory location, and, if it is, stores a new value
509 /// there. The value returned by this instruction is a pair containing the
510 /// original value as first element, and an i1 indicating success (true) or
511 /// failure (false) as second element.
512 ///
513 class AtomicCmpXchgInst : public Instruction {
514   void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
515             AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
516             SyncScope::ID SSID);
517 
518   template <unsigned Offset>
519   using AtomicOrderingBitfieldElement =
520       typename Bitfield::Element<AtomicOrdering, Offset, 3,
521                                  AtomicOrdering::LAST>;
522 
523 protected:
524   // Note: Instruction needs to be a friend here to call cloneImpl.
525   friend class Instruction;
526 
527   AtomicCmpXchgInst *cloneImpl() const;
528 
529 public:
530   AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
531                     AtomicOrdering SuccessOrdering,
532                     AtomicOrdering FailureOrdering, SyncScope::ID SSID,
533                     Instruction *InsertBefore = nullptr);
534   AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
535                     AtomicOrdering SuccessOrdering,
536                     AtomicOrdering FailureOrdering, SyncScope::ID SSID,
537                     BasicBlock *InsertAtEnd);
538 
539   // allocate space for exactly three operands
540   void *operator new(size_t S) { return User::operator new(S, 3); }
541   void operator delete(void *Ptr) { User::operator delete(Ptr); }
542 
543   using VolatileField = BoolBitfieldElementT<0>;
544   using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
545   using SuccessOrderingField =
546       AtomicOrderingBitfieldElementT<WeakField::NextBit>;
547   using FailureOrderingField =
548       AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
549   using AlignmentField =
550       AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
551   static_assert(
552       Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
553                               FailureOrderingField, AlignmentField>(),
554       "Bitfields must be contiguous");
555 
556   /// Return the alignment of the memory that is being allocated by the
557   /// instruction.
558   Align getAlign() const {
559     return Align(1ULL << getSubclassData<AlignmentField>());
560   }
561 
562   void setAlignment(Align Align) {
563     setSubclassData<AlignmentField>(Log2(Align));
564   }
565 
566   /// Return true if this is a cmpxchg from a volatile memory
567   /// location.
568   ///
569   bool isVolatile() const { return getSubclassData<VolatileField>(); }
570 
571   /// Specify whether this is a volatile cmpxchg.
572   ///
573   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
574 
575   /// Return true if this cmpxchg may spuriously fail.
576   bool isWeak() const { return getSubclassData<WeakField>(); }
577 
578   void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
579 
580   /// Transparently provide more efficient getOperand methods.
581   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
582 
583   static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
584     return Ordering != AtomicOrdering::NotAtomic &&
585            Ordering != AtomicOrdering::Unordered;
586   }
587 
588   static bool isValidFailureOrdering(AtomicOrdering Ordering) {
589     return Ordering != AtomicOrdering::NotAtomic &&
590            Ordering != AtomicOrdering::Unordered &&
591            Ordering != AtomicOrdering::AcquireRelease &&
592            Ordering != AtomicOrdering::Release;
593   }
594 
595   /// Returns the success ordering constraint of this cmpxchg instruction.
596   AtomicOrdering getSuccessOrdering() const {
597     return getSubclassData<SuccessOrderingField>();
598   }
599 
600   /// Sets the success ordering constraint of this cmpxchg instruction.
601   void setSuccessOrdering(AtomicOrdering Ordering) {
602     assert(isValidSuccessOrdering(Ordering) &&
603            "invalid CmpXchg success ordering");
604     setSubclassData<SuccessOrderingField>(Ordering);
605   }
606 
607   /// Returns the failure ordering constraint of this cmpxchg instruction.
608   AtomicOrdering getFailureOrdering() const {
609     return getSubclassData<FailureOrderingField>();
610   }
611 
612   /// Sets the failure ordering constraint of this cmpxchg instruction.
613   void setFailureOrdering(AtomicOrdering Ordering) {
614     assert(isValidFailureOrdering(Ordering) &&
615            "invalid CmpXchg failure ordering");
616     setSubclassData<FailureOrderingField>(Ordering);
617   }
618 
619   /// Returns a single ordering which is at least as strong as both the
620   /// success and failure orderings for this cmpxchg.
621   AtomicOrdering getMergedOrdering() const {
622     if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
623       return AtomicOrdering::SequentiallyConsistent;
624     if (getFailureOrdering() == AtomicOrdering::Acquire) {
625       if (getSuccessOrdering() == AtomicOrdering::Monotonic)
626         return AtomicOrdering::Acquire;
627       if (getSuccessOrdering() == AtomicOrdering::Release)
628         return AtomicOrdering::AcquireRelease;
629     }
630     return getSuccessOrdering();
631   }
632 
633   /// Returns the synchronization scope ID of this cmpxchg instruction.
634   SyncScope::ID getSyncScopeID() const {
635     return SSID;
636   }
637 
638   /// Sets the synchronization scope ID of this cmpxchg instruction.
639   void setSyncScopeID(SyncScope::ID SSID) {
640     this->SSID = SSID;
641   }
642 
643   Value *getPointerOperand() { return getOperand(0); }
644   const Value *getPointerOperand() const { return getOperand(0); }
645   static unsigned getPointerOperandIndex() { return 0U; }
646 
647   Value *getCompareOperand() { return getOperand(1); }
648   const Value *getCompareOperand() const { return getOperand(1); }
649 
650   Value *getNewValOperand() { return getOperand(2); }
651   const Value *getNewValOperand() const { return getOperand(2); }
652 
653   /// Returns the address space of the pointer operand.
654   unsigned getPointerAddressSpace() const {
655     return getPointerOperand()->getType()->getPointerAddressSpace();
656   }
657 
658   /// Returns the strongest permitted ordering on failure, given the
659   /// desired ordering on success.
660   ///
661   /// If the comparison in a cmpxchg operation fails, there is no atomic store
662   /// so release semantics cannot be provided. So this function drops explicit
663   /// Release requests from the AtomicOrdering. A SequentiallyConsistent
664   /// operation would remain SequentiallyConsistent.
665   static AtomicOrdering
666   getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
667     switch (SuccessOrdering) {
668     default:
669       llvm_unreachable("invalid cmpxchg success ordering");
670     case AtomicOrdering::Release:
671     case AtomicOrdering::Monotonic:
672       return AtomicOrdering::Monotonic;
673     case AtomicOrdering::AcquireRelease:
674     case AtomicOrdering::Acquire:
675       return AtomicOrdering::Acquire;
676     case AtomicOrdering::SequentiallyConsistent:
677       return AtomicOrdering::SequentiallyConsistent;
678     }
679   }
680 
681   // Methods for support type inquiry through isa, cast, and dyn_cast:
682   static bool classof(const Instruction *I) {
683     return I->getOpcode() == Instruction::AtomicCmpXchg;
684   }
685   static bool classof(const Value *V) {
686     return isa<Instruction>(V) && classof(cast<Instruction>(V));
687   }
688 
689 private:
690   // Shadow Instruction::setInstructionSubclassData with a private forwarding
691   // method so that subclasses cannot accidentally use it.
692   template <typename Bitfield>
693   void setSubclassData(typename Bitfield::Type Value) {
694     Instruction::setSubclassData<Bitfield>(Value);
695   }
696 
697   /// The synchronization scope ID of this cmpxchg instruction.  Not quite
698   /// enough room in SubClassData for everything, so synchronization scope ID
699   /// gets its own field.
700   SyncScope::ID SSID;
701 };
702 
703 template <>
704 struct OperandTraits<AtomicCmpXchgInst> :
705     public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
706 };
707 
708 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
709 
710 //===----------------------------------------------------------------------===//
711 //                                AtomicRMWInst Class
712 //===----------------------------------------------------------------------===//
713 
714 /// an instruction that atomically reads a memory location,
715 /// combines it with another value, and then stores the result back.  Returns
716 /// the old value.
717 ///
718 class AtomicRMWInst : public Instruction {
719 protected:
720   // Note: Instruction needs to be a friend here to call cloneImpl.
721   friend class Instruction;
722 
723   AtomicRMWInst *cloneImpl() const;
724 
725 public:
726   /// This enumeration lists the possible modifications atomicrmw can make.  In
727   /// the descriptions, 'p' is the pointer to the instruction's memory location,
728   /// 'old' is the initial value of *p, and 'v' is the other value passed to the
729   /// instruction.  These instructions always return 'old'.
730   enum BinOp : unsigned {
731     /// *p = v
732     Xchg,
733     /// *p = old + v
734     Add,
735     /// *p = old - v
736     Sub,
737     /// *p = old & v
738     And,
739     /// *p = ~(old & v)
740     Nand,
741     /// *p = old | v
742     Or,
743     /// *p = old ^ v
744     Xor,
745     /// *p = old >signed v ? old : v
746     Max,
747     /// *p = old <signed v ? old : v
748     Min,
749     /// *p = old >unsigned v ? old : v
750     UMax,
751     /// *p = old <unsigned v ? old : v
752     UMin,
753 
754     /// *p = old + v
755     FAdd,
756 
757     /// *p = old - v
758     FSub,
759 
760     /// *p = maxnum(old, v)
761     /// \p maxnum matches the behavior of \p llvm.maxnum.*.
762     FMax,
763 
764     /// *p = minnum(old, v)
765     /// \p minnum matches the behavior of \p llvm.minnum.*.
766     FMin,
767 
768     /// Increment one up to a maximum value.
769     /// *p = (old u>= v) ? 0 : (old + 1)
770     UIncWrap,
771 
772     /// Decrement one until a minimum value or zero.
773     /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
774     UDecWrap,
775 
776     FIRST_BINOP = Xchg,
777     LAST_BINOP = UDecWrap,
778     BAD_BINOP
779   };
780 
781 private:
782   template <unsigned Offset>
783   using AtomicOrderingBitfieldElement =
784       typename Bitfield::Element<AtomicOrdering, Offset, 3,
785                                  AtomicOrdering::LAST>;
786 
787   template <unsigned Offset>
788   using BinOpBitfieldElement =
789       typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>;
790 
791 public:
792   AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793                 AtomicOrdering Ordering, SyncScope::ID SSID,
794                 Instruction *InsertBefore = nullptr);
795   AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
796                 AtomicOrdering Ordering, SyncScope::ID SSID,
797                 BasicBlock *InsertAtEnd);
798 
799   // allocate space for exactly two operands
800   void *operator new(size_t S) { return User::operator new(S, 2); }
801   void operator delete(void *Ptr) { User::operator delete(Ptr); }
802 
803   using VolatileField = BoolBitfieldElementT<0>;
804   using AtomicOrderingField =
805       AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
806   using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
807   using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
808   static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
809                                         OperationField, AlignmentField>(),
810                 "Bitfields must be contiguous");
811 
812   BinOp getOperation() const { return getSubclassData<OperationField>(); }
813 
814   static StringRef getOperationName(BinOp Op);
815 
816   static bool isFPOperation(BinOp Op) {
817     switch (Op) {
818     case AtomicRMWInst::FAdd:
819     case AtomicRMWInst::FSub:
820     case AtomicRMWInst::FMax:
821     case AtomicRMWInst::FMin:
822       return true;
823     default:
824       return false;
825     }
826   }
827 
828   void setOperation(BinOp Operation) {
829     setSubclassData<OperationField>(Operation);
830   }
831 
832   /// Return the alignment of the memory that is being allocated by the
833   /// instruction.
834   Align getAlign() const {
835     return Align(1ULL << getSubclassData<AlignmentField>());
836   }
837 
838   void setAlignment(Align Align) {
839     setSubclassData<AlignmentField>(Log2(Align));
840   }
841 
842   /// Return true if this is a RMW on a volatile memory location.
843   ///
844   bool isVolatile() const { return getSubclassData<VolatileField>(); }
845 
846   /// Specify whether this is a volatile RMW or not.
847   ///
848   void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
849 
850   /// Transparently provide more efficient getOperand methods.
851   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
852 
853   /// Returns the ordering constraint of this rmw instruction.
854   AtomicOrdering getOrdering() const {
855     return getSubclassData<AtomicOrderingField>();
856   }
857 
858   /// Sets the ordering constraint of this rmw instruction.
859   void setOrdering(AtomicOrdering Ordering) {
860     assert(Ordering != AtomicOrdering::NotAtomic &&
861            "atomicrmw instructions can only be atomic.");
862     assert(Ordering != AtomicOrdering::Unordered &&
863            "atomicrmw instructions cannot be unordered.");
864     setSubclassData<AtomicOrderingField>(Ordering);
865   }
866 
867   /// Returns the synchronization scope ID of this rmw instruction.
868   SyncScope::ID getSyncScopeID() const {
869     return SSID;
870   }
871 
872   /// Sets the synchronization scope ID of this rmw instruction.
873   void setSyncScopeID(SyncScope::ID SSID) {
874     this->SSID = SSID;
875   }
876 
877   Value *getPointerOperand() { return getOperand(0); }
878   const Value *getPointerOperand() const { return getOperand(0); }
879   static unsigned getPointerOperandIndex() { return 0U; }
880 
881   Value *getValOperand() { return getOperand(1); }
882   const Value *getValOperand() const { return getOperand(1); }
883 
884   /// Returns the address space of the pointer operand.
885   unsigned getPointerAddressSpace() const {
886     return getPointerOperand()->getType()->getPointerAddressSpace();
887   }
888 
889   bool isFloatingPointOperation() const {
890     return isFPOperation(getOperation());
891   }
892 
893   // Methods for support type inquiry through isa, cast, and dyn_cast:
894   static bool classof(const Instruction *I) {
895     return I->getOpcode() == Instruction::AtomicRMW;
896   }
897   static bool classof(const Value *V) {
898     return isa<Instruction>(V) && classof(cast<Instruction>(V));
899   }
900 
901 private:
902   void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
903             AtomicOrdering Ordering, SyncScope::ID SSID);
904 
905   // Shadow Instruction::setInstructionSubclassData with a private forwarding
906   // method so that subclasses cannot accidentally use it.
907   template <typename Bitfield>
908   void setSubclassData(typename Bitfield::Type Value) {
909     Instruction::setSubclassData<Bitfield>(Value);
910   }
911 
912   /// The synchronization scope ID of this rmw instruction.  Not quite enough
913   /// room in SubClassData for everything, so synchronization scope ID gets its
914   /// own field.
915   SyncScope::ID SSID;
916 };
917 
918 template <>
919 struct OperandTraits<AtomicRMWInst>
920     : public FixedNumOperandTraits<AtomicRMWInst,2> {
921 };
922 
923 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
924 
925 //===----------------------------------------------------------------------===//
926 //                             GetElementPtrInst Class
927 //===----------------------------------------------------------------------===//
928 
929 // checkGEPType - Simple wrapper function to give a better assertion failure
930 // message on bad indexes for a gep instruction.
931 //
932 inline Type *checkGEPType(Type *Ty) {
933   assert(Ty && "Invalid GetElementPtrInst indices for type!");
934   return Ty;
935 }
936 
937 /// an instruction for type-safe pointer arithmetic to
938 /// access elements of arrays and structs
939 ///
940 class GetElementPtrInst : public Instruction {
941   Type *SourceElementType;
942   Type *ResultElementType;
943 
944   GetElementPtrInst(const GetElementPtrInst &GEPI);
945 
946   /// Constructors - Create a getelementptr instruction with a base pointer an
947   /// list of indices. The first ctor can optionally insert before an existing
948   /// instruction, the second appends the new instruction to the specified
949   /// BasicBlock.
950   inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
951                            ArrayRef<Value *> IdxList, unsigned Values,
952                            const Twine &NameStr, Instruction *InsertBefore);
953   inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
954                            ArrayRef<Value *> IdxList, unsigned Values,
955                            const Twine &NameStr, BasicBlock *InsertAtEnd);
956 
957   void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
958 
959 protected:
960   // Note: Instruction needs to be a friend here to call cloneImpl.
961   friend class Instruction;
962 
963   GetElementPtrInst *cloneImpl() const;
964 
965 public:
966   static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967                                    ArrayRef<Value *> IdxList,
968                                    const Twine &NameStr = "",
969                                    Instruction *InsertBefore = nullptr) {
970     unsigned Values = 1 + unsigned(IdxList.size());
971     assert(PointeeType && "Must specify element type");
972     assert(cast<PointerType>(Ptr->getType()->getScalarType())
973                ->isOpaqueOrPointeeTypeMatches(PointeeType));
974     return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975                                           NameStr, InsertBefore);
976   }
977 
978   static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
979                                    ArrayRef<Value *> IdxList,
980                                    const Twine &NameStr,
981                                    BasicBlock *InsertAtEnd) {
982     unsigned Values = 1 + unsigned(IdxList.size());
983     assert(PointeeType && "Must specify element type");
984     assert(cast<PointerType>(Ptr->getType()->getScalarType())
985                ->isOpaqueOrPointeeTypeMatches(PointeeType));
986     return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
987                                           NameStr, InsertAtEnd);
988   }
989 
990   /// Create an "inbounds" getelementptr. See the documentation for the
991   /// "inbounds" flag in LangRef.html for details.
992   static GetElementPtrInst *
993   CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
994                  const Twine &NameStr = "",
995                  Instruction *InsertBefore = nullptr) {
996     GetElementPtrInst *GEP =
997         Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
998     GEP->setIsInBounds(true);
999     return GEP;
1000   }
1001 
1002   static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1003                                            ArrayRef<Value *> IdxList,
1004                                            const Twine &NameStr,
1005                                            BasicBlock *InsertAtEnd) {
1006     GetElementPtrInst *GEP =
1007         Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1008     GEP->setIsInBounds(true);
1009     return GEP;
1010   }
1011 
1012   /// Transparently provide more efficient getOperand methods.
1013   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1014 
1015   Type *getSourceElementType() const { return SourceElementType; }
1016 
1017   void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1018   void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1019 
1020   Type *getResultElementType() const {
1021     assert(cast<PointerType>(getType()->getScalarType())
1022                ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1023     return ResultElementType;
1024   }
1025 
1026   /// Returns the address space of this instruction's pointer type.
1027   unsigned getAddressSpace() const {
1028     // Note that this is always the same as the pointer operand's address space
1029     // and that is cheaper to compute, so cheat here.
1030     return getPointerAddressSpace();
1031   }
1032 
1033   /// Returns the result type of a getelementptr with the given source
1034   /// element type and indexes.
1035   ///
1036   /// Null is returned if the indices are invalid for the specified
1037   /// source element type.
1038   static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1039   static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1040   static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1041 
1042   /// Return the type of the element at the given index of an indexable
1043   /// type.  This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1044   ///
1045   /// Returns null if the type can't be indexed, or the given index is not
1046   /// legal for the given type.
1047   static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1048   static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1049 
1050   inline op_iterator       idx_begin()       { return op_begin()+1; }
1051   inline const_op_iterator idx_begin() const { return op_begin()+1; }
1052   inline op_iterator       idx_end()         { return op_end(); }
1053   inline const_op_iterator idx_end()   const { return op_end(); }
1054 
1055   inline iterator_range<op_iterator> indices() {
1056     return make_range(idx_begin(), idx_end());
1057   }
1058 
1059   inline iterator_range<const_op_iterator> indices() const {
1060     return make_range(idx_begin(), idx_end());
1061   }
1062 
1063   Value *getPointerOperand() {
1064     return getOperand(0);
1065   }
1066   const Value *getPointerOperand() const {
1067     return getOperand(0);
1068   }
1069   static unsigned getPointerOperandIndex() {
1070     return 0U;    // get index for modifying correct operand.
1071   }
1072 
1073   /// Method to return the pointer operand as a
1074   /// PointerType.
1075   Type *getPointerOperandType() const {
1076     return getPointerOperand()->getType();
1077   }
1078 
1079   /// Returns the address space of the pointer operand.
1080   unsigned getPointerAddressSpace() const {
1081     return getPointerOperandType()->getPointerAddressSpace();
1082   }
1083 
1084   /// Returns the pointer type returned by the GEP
1085   /// instruction, which may be a vector of pointers.
1086   static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1087                                 ArrayRef<Value *> IdxList) {
1088     PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1089     unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1090     Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1091     Type *PtrTy = OrigPtrTy->isOpaque()
1092       ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1093       : PointerType::get(ResultElemTy, AddrSpace);
1094     // Vector GEP
1095     if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1096       ElementCount EltCount = PtrVTy->getElementCount();
1097       return VectorType::get(PtrTy, EltCount);
1098     }
1099     for (Value *Index : IdxList)
1100       if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1101         ElementCount EltCount = IndexVTy->getElementCount();
1102         return VectorType::get(PtrTy, EltCount);
1103       }
1104     // Scalar GEP
1105     return PtrTy;
1106   }
1107 
1108   unsigned getNumIndices() const {  // Note: always non-negative
1109     return getNumOperands() - 1;
1110   }
1111 
1112   bool hasIndices() const {
1113     return getNumOperands() > 1;
1114   }
1115 
1116   /// Return true if all of the indices of this GEP are
1117   /// zeros.  If so, the result pointer and the first operand have the same
1118   /// value, just potentially different types.
1119   bool hasAllZeroIndices() const;
1120 
1121   /// Return true if all of the indices of this GEP are
1122   /// constant integers.  If so, the result pointer and the first operand have
1123   /// a constant offset between them.
1124   bool hasAllConstantIndices() const;
1125 
1126   /// Set or clear the inbounds flag on this GEP instruction.
1127   /// See LangRef.html for the meaning of inbounds on a getelementptr.
1128   void setIsInBounds(bool b = true);
1129 
1130   /// Determine whether the GEP has the inbounds flag.
1131   bool isInBounds() const;
1132 
1133   /// Accumulate the constant address offset of this GEP if possible.
1134   ///
1135   /// This routine accepts an APInt into which it will accumulate the constant
1136   /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1137   /// all-constant, it returns false and the value of the offset APInt is
1138   /// undefined (it is *not* preserved!). The APInt passed into this routine
1139   /// must be at least as wide as the IntPtr type for the address space of
1140   /// the base GEP pointer.
1141   bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1142   bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1143                      MapVector<Value *, APInt> &VariableOffsets,
1144                      APInt &ConstantOffset) const;
1145   // Methods for support type inquiry through isa, cast, and dyn_cast:
1146   static bool classof(const Instruction *I) {
1147     return (I->getOpcode() == Instruction::GetElementPtr);
1148   }
1149   static bool classof(const Value *V) {
1150     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1151   }
1152 };
1153 
1154 template <>
1155 struct OperandTraits<GetElementPtrInst> :
1156   public VariadicOperandTraits<GetElementPtrInst, 1> {
1157 };
1158 
1159 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1160                                      ArrayRef<Value *> IdxList, unsigned Values,
1161                                      const Twine &NameStr,
1162                                      Instruction *InsertBefore)
1163     : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1164                   OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1165                   Values, InsertBefore),
1166       SourceElementType(PointeeType),
1167       ResultElementType(getIndexedType(PointeeType, IdxList)) {
1168   assert(cast<PointerType>(getType()->getScalarType())
1169              ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1170   init(Ptr, IdxList, NameStr);
1171 }
1172 
1173 GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1174                                      ArrayRef<Value *> IdxList, unsigned Values,
1175                                      const Twine &NameStr,
1176                                      BasicBlock *InsertAtEnd)
1177     : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1178                   OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1179                   Values, InsertAtEnd),
1180       SourceElementType(PointeeType),
1181       ResultElementType(getIndexedType(PointeeType, IdxList)) {
1182   assert(cast<PointerType>(getType()->getScalarType())
1183              ->isOpaqueOrPointeeTypeMatches(ResultElementType));
1184   init(Ptr, IdxList, NameStr);
1185 }
1186 
1187 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1188 
1189 //===----------------------------------------------------------------------===//
1190 //                               ICmpInst Class
1191 //===----------------------------------------------------------------------===//
1192 
1193 /// This instruction compares its operands according to the predicate given
1194 /// to the constructor. It only operates on integers or pointers. The operands
1195 /// must be identical types.
1196 /// Represent an integer comparison operator.
1197 class ICmpInst: public CmpInst {
1198   void AssertOK() {
1199     assert(isIntPredicate() &&
1200            "Invalid ICmp predicate value");
1201     assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1202           "Both operands to ICmp instruction are not of the same type!");
1203     // Check that the operands are the right type
1204     assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1205             getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1206            "Invalid operand types for ICmp instruction");
1207   }
1208 
1209 protected:
1210   // Note: Instruction needs to be a friend here to call cloneImpl.
1211   friend class Instruction;
1212 
1213   /// Clone an identical ICmpInst
1214   ICmpInst *cloneImpl() const;
1215 
1216 public:
1217   /// Constructor with insert-before-instruction semantics.
1218   ICmpInst(
1219     Instruction *InsertBefore,  ///< Where to insert
1220     Predicate pred,  ///< The predicate to use for the comparison
1221     Value *LHS,      ///< The left-hand-side of the expression
1222     Value *RHS,      ///< The right-hand-side of the expression
1223     const Twine &NameStr = ""  ///< Name of the instruction
1224   ) : CmpInst(makeCmpResultType(LHS->getType()),
1225               Instruction::ICmp, pred, LHS, RHS, NameStr,
1226               InsertBefore) {
1227 #ifndef NDEBUG
1228   AssertOK();
1229 #endif
1230   }
1231 
1232   /// Constructor with insert-at-end semantics.
1233   ICmpInst(
1234     BasicBlock &InsertAtEnd, ///< Block to insert into.
1235     Predicate pred,  ///< The predicate to use for the comparison
1236     Value *LHS,      ///< The left-hand-side of the expression
1237     Value *RHS,      ///< The right-hand-side of the expression
1238     const Twine &NameStr = ""  ///< Name of the instruction
1239   ) : CmpInst(makeCmpResultType(LHS->getType()),
1240               Instruction::ICmp, pred, LHS, RHS, NameStr,
1241               &InsertAtEnd) {
1242 #ifndef NDEBUG
1243   AssertOK();
1244 #endif
1245   }
1246 
1247   /// Constructor with no-insertion semantics
1248   ICmpInst(
1249     Predicate pred, ///< The predicate to use for the comparison
1250     Value *LHS,     ///< The left-hand-side of the expression
1251     Value *RHS,     ///< The right-hand-side of the expression
1252     const Twine &NameStr = "" ///< Name of the instruction
1253   ) : CmpInst(makeCmpResultType(LHS->getType()),
1254               Instruction::ICmp, pred, LHS, RHS, NameStr) {
1255 #ifndef NDEBUG
1256   AssertOK();
1257 #endif
1258   }
1259 
1260   /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1261   /// @returns the predicate that would be the result if the operand were
1262   /// regarded as signed.
1263   /// Return the signed version of the predicate
1264   Predicate getSignedPredicate() const {
1265     return getSignedPredicate(getPredicate());
1266   }
1267 
1268   /// This is a static version that you can use without an instruction.
1269   /// Return the signed version of the predicate.
1270   static Predicate getSignedPredicate(Predicate pred);
1271 
1272   /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1273   /// @returns the predicate that would be the result if the operand were
1274   /// regarded as unsigned.
1275   /// Return the unsigned version of the predicate
1276   Predicate getUnsignedPredicate() const {
1277     return getUnsignedPredicate(getPredicate());
1278   }
1279 
1280   /// This is a static version that you can use without an instruction.
1281   /// Return the unsigned version of the predicate.
1282   static Predicate getUnsignedPredicate(Predicate pred);
1283 
1284   /// Return true if this predicate is either EQ or NE.  This also
1285   /// tests for commutativity.
1286   static bool isEquality(Predicate P) {
1287     return P == ICMP_EQ || P == ICMP_NE;
1288   }
1289 
1290   /// Return true if this predicate is either EQ or NE.  This also
1291   /// tests for commutativity.
1292   bool isEquality() const {
1293     return isEquality(getPredicate());
1294   }
1295 
1296   /// @returns true if the predicate of this ICmpInst is commutative
1297   /// Determine if this relation is commutative.
1298   bool isCommutative() const { return isEquality(); }
1299 
1300   /// Return true if the predicate is relational (not EQ or NE).
1301   ///
1302   bool isRelational() const {
1303     return !isEquality();
1304   }
1305 
1306   /// Return true if the predicate is relational (not EQ or NE).
1307   ///
1308   static bool isRelational(Predicate P) {
1309     return !isEquality(P);
1310   }
1311 
1312   /// Return true if the predicate is SGT or UGT.
1313   ///
1314   static bool isGT(Predicate P) {
1315     return P == ICMP_SGT || P == ICMP_UGT;
1316   }
1317 
1318   /// Return true if the predicate is SLT or ULT.
1319   ///
1320   static bool isLT(Predicate P) {
1321     return P == ICMP_SLT || P == ICMP_ULT;
1322   }
1323 
1324   /// Return true if the predicate is SGE or UGE.
1325   ///
1326   static bool isGE(Predicate P) {
1327     return P == ICMP_SGE || P == ICMP_UGE;
1328   }
1329 
1330   /// Return true if the predicate is SLE or ULE.
1331   ///
1332   static bool isLE(Predicate P) {
1333     return P == ICMP_SLE || P == ICMP_ULE;
1334   }
1335 
1336   /// Returns the sequence of all ICmp predicates.
1337   ///
1338   static auto predicates() { return ICmpPredicates(); }
1339 
1340   /// Exchange the two operands to this instruction in such a way that it does
1341   /// not modify the semantics of the instruction. The predicate value may be
1342   /// changed to retain the same result if the predicate is order dependent
1343   /// (e.g. ult).
1344   /// Swap operands and adjust predicate.
1345   void swapOperands() {
1346     setPredicate(getSwappedPredicate());
1347     Op<0>().swap(Op<1>());
1348   }
1349 
1350   /// Return result of `LHS Pred RHS` comparison.
1351   static bool compare(const APInt &LHS, const APInt &RHS,
1352                       ICmpInst::Predicate Pred);
1353 
1354   // Methods for support type inquiry through isa, cast, and dyn_cast:
1355   static bool classof(const Instruction *I) {
1356     return I->getOpcode() == Instruction::ICmp;
1357   }
1358   static bool classof(const Value *V) {
1359     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1360   }
1361 };
1362 
1363 //===----------------------------------------------------------------------===//
1364 //                               FCmpInst Class
1365 //===----------------------------------------------------------------------===//
1366 
1367 /// This instruction compares its operands according to the predicate given
1368 /// to the constructor. It only operates on floating point values or packed
1369 /// vectors of floating point values. The operands must be identical types.
1370 /// Represents a floating point comparison operator.
1371 class FCmpInst: public CmpInst {
1372   void AssertOK() {
1373     assert(isFPPredicate() && "Invalid FCmp predicate value");
1374     assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1375            "Both operands to FCmp instruction are not of the same type!");
1376     // Check that the operands are the right type
1377     assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1378            "Invalid operand types for FCmp instruction");
1379   }
1380 
1381 protected:
1382   // Note: Instruction needs to be a friend here to call cloneImpl.
1383   friend class Instruction;
1384 
1385   /// Clone an identical FCmpInst
1386   FCmpInst *cloneImpl() const;
1387 
1388 public:
1389   /// Constructor with insert-before-instruction semantics.
1390   FCmpInst(
1391     Instruction *InsertBefore, ///< Where to insert
1392     Predicate pred,  ///< The predicate to use for the comparison
1393     Value *LHS,      ///< The left-hand-side of the expression
1394     Value *RHS,      ///< The right-hand-side of the expression
1395     const Twine &NameStr = ""  ///< Name of the instruction
1396   ) : CmpInst(makeCmpResultType(LHS->getType()),
1397               Instruction::FCmp, pred, LHS, RHS, NameStr,
1398               InsertBefore) {
1399     AssertOK();
1400   }
1401 
1402   /// Constructor with insert-at-end semantics.
1403   FCmpInst(
1404     BasicBlock &InsertAtEnd, ///< Block to insert into.
1405     Predicate pred,  ///< The predicate to use for the comparison
1406     Value *LHS,      ///< The left-hand-side of the expression
1407     Value *RHS,      ///< The right-hand-side of the expression
1408     const Twine &NameStr = ""  ///< Name of the instruction
1409   ) : CmpInst(makeCmpResultType(LHS->getType()),
1410               Instruction::FCmp, pred, LHS, RHS, NameStr,
1411               &InsertAtEnd) {
1412     AssertOK();
1413   }
1414 
1415   /// Constructor with no-insertion semantics
1416   FCmpInst(
1417     Predicate Pred, ///< The predicate to use for the comparison
1418     Value *LHS,     ///< The left-hand-side of the expression
1419     Value *RHS,     ///< The right-hand-side of the expression
1420     const Twine &NameStr = "", ///< Name of the instruction
1421     Instruction *FlagsSource = nullptr
1422   ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1423               RHS, NameStr, nullptr, FlagsSource) {
1424     AssertOK();
1425   }
1426 
1427   /// @returns true if the predicate of this instruction is EQ or NE.
1428   /// Determine if this is an equality predicate.
1429   static bool isEquality(Predicate Pred) {
1430     return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1431            Pred == FCMP_UNE;
1432   }
1433 
1434   /// @returns true if the predicate of this instruction is EQ or NE.
1435   /// Determine if this is an equality predicate.
1436   bool isEquality() const { return isEquality(getPredicate()); }
1437 
1438   /// @returns true if the predicate of this instruction is commutative.
1439   /// Determine if this is a commutative predicate.
1440   bool isCommutative() const {
1441     return isEquality() ||
1442            getPredicate() == FCMP_FALSE ||
1443            getPredicate() == FCMP_TRUE ||
1444            getPredicate() == FCMP_ORD ||
1445            getPredicate() == FCMP_UNO;
1446   }
1447 
1448   /// @returns true if the predicate is relational (not EQ or NE).
1449   /// Determine if this a relational predicate.
1450   bool isRelational() const { return !isEquality(); }
1451 
1452   /// Exchange the two operands to this instruction in such a way that it does
1453   /// not modify the semantics of the instruction. The predicate value may be
1454   /// changed to retain the same result if the predicate is order dependent
1455   /// (e.g. ult).
1456   /// Swap operands and adjust predicate.
1457   void swapOperands() {
1458     setPredicate(getSwappedPredicate());
1459     Op<0>().swap(Op<1>());
1460   }
1461 
1462   /// Returns the sequence of all FCmp predicates.
1463   ///
1464   static auto predicates() { return FCmpPredicates(); }
1465 
1466   /// Return result of `LHS Pred RHS` comparison.
1467   static bool compare(const APFloat &LHS, const APFloat &RHS,
1468                       FCmpInst::Predicate Pred);
1469 
1470   /// Methods for support type inquiry through isa, cast, and dyn_cast:
1471   static bool classof(const Instruction *I) {
1472     return I->getOpcode() == Instruction::FCmp;
1473   }
1474   static bool classof(const Value *V) {
1475     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1476   }
1477 };
1478 
1479 //===----------------------------------------------------------------------===//
1480 /// This class represents a function call, abstracting a target
1481 /// machine's calling convention.  This class uses low bit of the SubClassData
1482 /// field to indicate whether or not this is a tail call.  The rest of the bits
1483 /// hold the calling convention of the call.
1484 ///
1485 class CallInst : public CallBase {
1486   CallInst(const CallInst &CI);
1487 
1488   /// Construct a CallInst given a range of arguments.
1489   /// Construct a CallInst from a range of arguments
1490   inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492                   Instruction *InsertBefore);
1493 
1494   inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1495                   const Twine &NameStr, Instruction *InsertBefore)
1496       : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1497 
1498   /// Construct a CallInst given a range of arguments.
1499   /// Construct a CallInst from a range of arguments
1500   inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1501                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1502                   BasicBlock *InsertAtEnd);
1503 
1504   explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1505                     Instruction *InsertBefore);
1506 
1507   CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1508            BasicBlock *InsertAtEnd);
1509 
1510   void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1511             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1512   void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1513 
1514   /// Compute the number of operands to allocate.
1515   static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1516     // We need one operand for the called function, plus the input operand
1517     // counts provided.
1518     return 1 + NumArgs + NumBundleInputs;
1519   }
1520 
1521 protected:
1522   // Note: Instruction needs to be a friend here to call cloneImpl.
1523   friend class Instruction;
1524 
1525   CallInst *cloneImpl() const;
1526 
1527 public:
1528   static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1529                           Instruction *InsertBefore = nullptr) {
1530     return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1531   }
1532 
1533   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1534                           const Twine &NameStr,
1535                           Instruction *InsertBefore = nullptr) {
1536     return new (ComputeNumOperands(Args.size()))
1537         CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1538   }
1539 
1540   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1541                           ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1542                           const Twine &NameStr = "",
1543                           Instruction *InsertBefore = nullptr) {
1544     const int NumOperands =
1545         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1546     const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1547 
1548     return new (NumOperands, DescriptorBytes)
1549         CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1550   }
1551 
1552   static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1553                           BasicBlock *InsertAtEnd) {
1554     return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1555   }
1556 
1557   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1558                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1559     return new (ComputeNumOperands(Args.size()))
1560         CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1561   }
1562 
1563   static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1564                           ArrayRef<OperandBundleDef> Bundles,
1565                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1566     const int NumOperands =
1567         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1568     const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1569 
1570     return new (NumOperands, DescriptorBytes)
1571         CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1572   }
1573 
1574   static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1575                           Instruction *InsertBefore = nullptr) {
1576     return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1577                   InsertBefore);
1578   }
1579 
1580   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1581                           ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1582                           const Twine &NameStr = "",
1583                           Instruction *InsertBefore = nullptr) {
1584     return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1585                   NameStr, InsertBefore);
1586   }
1587 
1588   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1589                           const Twine &NameStr,
1590                           Instruction *InsertBefore = nullptr) {
1591     return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1592                   InsertBefore);
1593   }
1594 
1595   static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1596                           BasicBlock *InsertAtEnd) {
1597     return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1598                   InsertAtEnd);
1599   }
1600 
1601   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1602                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1603     return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1604                   InsertAtEnd);
1605   }
1606 
1607   static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1608                           ArrayRef<OperandBundleDef> Bundles,
1609                           const Twine &NameStr, BasicBlock *InsertAtEnd) {
1610     return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1611                   NameStr, InsertAtEnd);
1612   }
1613 
1614   /// Create a clone of \p CI with a different set of operand bundles and
1615   /// insert it before \p InsertPt.
1616   ///
1617   /// The returned call instruction is identical \p CI in every way except that
1618   /// the operand bundles for the new instruction are set to the operand bundles
1619   /// in \p Bundles.
1620   static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1621                           Instruction *InsertPt = nullptr);
1622 
1623   /// Generate the IR for a call to malloc:
1624   /// 1. Compute the malloc call's argument as the specified type's size,
1625   ///    possibly multiplied by the array size if the array size is not
1626   ///    constant 1.
1627   /// 2. Call malloc with that argument.
1628   /// 3. Bitcast the result of the malloc call to the specified type.
1629   static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630                                    Type *AllocTy, Value *AllocSize,
1631                                    Value *ArraySize = nullptr,
1632                                    Function *MallocF = nullptr,
1633                                    const Twine &Name = "");
1634   static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635                                    Type *AllocTy, Value *AllocSize,
1636                                    Value *ArraySize = nullptr,
1637                                    Function *MallocF = nullptr,
1638                                    const Twine &Name = "");
1639   static Instruction *
1640   CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy,
1641                Value *AllocSize, Value *ArraySize = nullptr,
1642                ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1643                Function *MallocF = nullptr, const Twine &Name = "");
1644   static Instruction *
1645   CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy,
1646                Value *AllocSize, Value *ArraySize = nullptr,
1647                ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1648                Function *MallocF = nullptr, const Twine &Name = "");
1649   /// Generate the IR for a call to the builtin free function.
1650   static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1651   static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1652   static Instruction *CreateFree(Value *Source,
1653                                  ArrayRef<OperandBundleDef> Bundles,
1654                                  Instruction *InsertBefore);
1655   static Instruction *CreateFree(Value *Source,
1656                                  ArrayRef<OperandBundleDef> Bundles,
1657                                  BasicBlock *InsertAtEnd);
1658 
1659   // Note that 'musttail' implies 'tail'.
1660   enum TailCallKind : unsigned {
1661     TCK_None = 0,
1662     TCK_Tail = 1,
1663     TCK_MustTail = 2,
1664     TCK_NoTail = 3,
1665     TCK_LAST = TCK_NoTail
1666   };
1667 
1668   using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1669   static_assert(
1670       Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1671       "Bitfields must be contiguous");
1672 
1673   TailCallKind getTailCallKind() const {
1674     return getSubclassData<TailCallKindField>();
1675   }
1676 
1677   bool isTailCall() const {
1678     TailCallKind Kind = getTailCallKind();
1679     return Kind == TCK_Tail || Kind == TCK_MustTail;
1680   }
1681 
1682   bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1683 
1684   bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1685 
1686   void setTailCallKind(TailCallKind TCK) {
1687     setSubclassData<TailCallKindField>(TCK);
1688   }
1689 
1690   void setTailCall(bool IsTc = true) {
1691     setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1692   }
1693 
1694   /// Return true if the call can return twice
1695   bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1696   void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1697 
1698   // Methods for support type inquiry through isa, cast, and dyn_cast:
1699   static bool classof(const Instruction *I) {
1700     return I->getOpcode() == Instruction::Call;
1701   }
1702   static bool classof(const Value *V) {
1703     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1704   }
1705 
1706   /// Updates profile metadata by scaling it by \p S / \p T.
1707   void updateProfWeight(uint64_t S, uint64_t T);
1708 
1709 private:
1710   // Shadow Instruction::setInstructionSubclassData with a private forwarding
1711   // method so that subclasses cannot accidentally use it.
1712   template <typename Bitfield>
1713   void setSubclassData(typename Bitfield::Type Value) {
1714     Instruction::setSubclassData<Bitfield>(Value);
1715   }
1716 };
1717 
1718 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1719                    ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1720                    BasicBlock *InsertAtEnd)
1721     : CallBase(Ty->getReturnType(), Instruction::Call,
1722                OperandTraits<CallBase>::op_end(this) -
1723                    (Args.size() + CountBundleInputs(Bundles) + 1),
1724                unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1725                InsertAtEnd) {
1726   init(Ty, Func, Args, Bundles, NameStr);
1727 }
1728 
1729 CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1730                    ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1731                    Instruction *InsertBefore)
1732     : CallBase(Ty->getReturnType(), Instruction::Call,
1733                OperandTraits<CallBase>::op_end(this) -
1734                    (Args.size() + CountBundleInputs(Bundles) + 1),
1735                unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1736                InsertBefore) {
1737   init(Ty, Func, Args, Bundles, NameStr);
1738 }
1739 
1740 //===----------------------------------------------------------------------===//
1741 //                               SelectInst Class
1742 //===----------------------------------------------------------------------===//
1743 
1744 /// This class represents the LLVM 'select' instruction.
1745 ///
1746 class SelectInst : public Instruction {
1747   SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1748              Instruction *InsertBefore)
1749     : Instruction(S1->getType(), Instruction::Select,
1750                   &Op<0>(), 3, InsertBefore) {
1751     init(C, S1, S2);
1752     setName(NameStr);
1753   }
1754 
1755   SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1756              BasicBlock *InsertAtEnd)
1757     : Instruction(S1->getType(), Instruction::Select,
1758                   &Op<0>(), 3, InsertAtEnd) {
1759     init(C, S1, S2);
1760     setName(NameStr);
1761   }
1762 
1763   void init(Value *C, Value *S1, Value *S2) {
1764     assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1765     Op<0>() = C;
1766     Op<1>() = S1;
1767     Op<2>() = S2;
1768   }
1769 
1770 protected:
1771   // Note: Instruction needs to be a friend here to call cloneImpl.
1772   friend class Instruction;
1773 
1774   SelectInst *cloneImpl() const;
1775 
1776 public:
1777   static SelectInst *Create(Value *C, Value *S1, Value *S2,
1778                             const Twine &NameStr = "",
1779                             Instruction *InsertBefore = nullptr,
1780                             Instruction *MDFrom = nullptr) {
1781     SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1782     if (MDFrom)
1783       Sel->copyMetadata(*MDFrom);
1784     return Sel;
1785   }
1786 
1787   static SelectInst *Create(Value *C, Value *S1, Value *S2,
1788                             const Twine &NameStr,
1789                             BasicBlock *InsertAtEnd) {
1790     return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1791   }
1792 
1793   const Value *getCondition() const { return Op<0>(); }
1794   const Value *getTrueValue() const { return Op<1>(); }
1795   const Value *getFalseValue() const { return Op<2>(); }
1796   Value *getCondition() { return Op<0>(); }
1797   Value *getTrueValue() { return Op<1>(); }
1798   Value *getFalseValue() { return Op<2>(); }
1799 
1800   void setCondition(Value *V) { Op<0>() = V; }
1801   void setTrueValue(Value *V) { Op<1>() = V; }
1802   void setFalseValue(Value *V) { Op<2>() = V; }
1803 
1804   /// Swap the true and false values of the select instruction.
1805   /// This doesn't swap prof metadata.
1806   void swapValues() { Op<1>().swap(Op<2>()); }
1807 
1808   /// Return a string if the specified operands are invalid
1809   /// for a select operation, otherwise return null.
1810   static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1811 
1812   /// Transparently provide more efficient getOperand methods.
1813   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1814 
1815   OtherOps getOpcode() const {
1816     return static_cast<OtherOps>(Instruction::getOpcode());
1817   }
1818 
1819   // Methods for support type inquiry through isa, cast, and dyn_cast:
1820   static bool classof(const Instruction *I) {
1821     return I->getOpcode() == Instruction::Select;
1822   }
1823   static bool classof(const Value *V) {
1824     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1825   }
1826 };
1827 
1828 template <>
1829 struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1830 };
1831 
1832 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
1833 
1834 //===----------------------------------------------------------------------===//
1835 //                                VAArgInst Class
1836 //===----------------------------------------------------------------------===//
1837 
1838 /// This class represents the va_arg llvm instruction, which returns
1839 /// an argument of the specified type given a va_list and increments that list
1840 ///
1841 class VAArgInst : public UnaryInstruction {
1842 protected:
1843   // Note: Instruction needs to be a friend here to call cloneImpl.
1844   friend class Instruction;
1845 
1846   VAArgInst *cloneImpl() const;
1847 
1848 public:
1849   VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1850              Instruction *InsertBefore = nullptr)
1851     : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1852     setName(NameStr);
1853   }
1854 
1855   VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1856             BasicBlock *InsertAtEnd)
1857     : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1858     setName(NameStr);
1859   }
1860 
1861   Value *getPointerOperand() { return getOperand(0); }
1862   const Value *getPointerOperand() const { return getOperand(0); }
1863   static unsigned getPointerOperandIndex() { return 0U; }
1864 
1865   // Methods for support type inquiry through isa, cast, and dyn_cast:
1866   static bool classof(const Instruction *I) {
1867     return I->getOpcode() == VAArg;
1868   }
1869   static bool classof(const Value *V) {
1870     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1871   }
1872 };
1873 
1874 //===----------------------------------------------------------------------===//
1875 //                                ExtractElementInst Class
1876 //===----------------------------------------------------------------------===//
1877 
1878 /// This instruction extracts a single (scalar)
1879 /// element from a VectorType value
1880 ///
1881 class ExtractElementInst : public Instruction {
1882   ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1883                      Instruction *InsertBefore = nullptr);
1884   ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1885                      BasicBlock *InsertAtEnd);
1886 
1887 protected:
1888   // Note: Instruction needs to be a friend here to call cloneImpl.
1889   friend class Instruction;
1890 
1891   ExtractElementInst *cloneImpl() const;
1892 
1893 public:
1894   static ExtractElementInst *Create(Value *Vec, Value *Idx,
1895                                    const Twine &NameStr = "",
1896                                    Instruction *InsertBefore = nullptr) {
1897     return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1898   }
1899 
1900   static ExtractElementInst *Create(Value *Vec, Value *Idx,
1901                                    const Twine &NameStr,
1902                                    BasicBlock *InsertAtEnd) {
1903     return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1904   }
1905 
1906   /// Return true if an extractelement instruction can be
1907   /// formed with the specified operands.
1908   static bool isValidOperands(const Value *Vec, const Value *Idx);
1909 
1910   Value *getVectorOperand() { return Op<0>(); }
1911   Value *getIndexOperand() { return Op<1>(); }
1912   const Value *getVectorOperand() const { return Op<0>(); }
1913   const Value *getIndexOperand() const { return Op<1>(); }
1914 
1915   VectorType *getVectorOperandType() const {
1916     return cast<VectorType>(getVectorOperand()->getType());
1917   }
1918 
1919   /// Transparently provide more efficient getOperand methods.
1920   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1921 
1922   // Methods for support type inquiry through isa, cast, and dyn_cast:
1923   static bool classof(const Instruction *I) {
1924     return I->getOpcode() == Instruction::ExtractElement;
1925   }
1926   static bool classof(const Value *V) {
1927     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1928   }
1929 };
1930 
1931 template <>
1932 struct OperandTraits<ExtractElementInst> :
1933   public FixedNumOperandTraits<ExtractElementInst, 2> {
1934 };
1935 
1936 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
1937 
1938 //===----------------------------------------------------------------------===//
1939 //                                InsertElementInst Class
1940 //===----------------------------------------------------------------------===//
1941 
1942 /// This instruction inserts a single (scalar)
1943 /// element into a VectorType value
1944 ///
1945 class InsertElementInst : public Instruction {
1946   InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1947                     const Twine &NameStr = "",
1948                     Instruction *InsertBefore = nullptr);
1949   InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1950                     BasicBlock *InsertAtEnd);
1951 
1952 protected:
1953   // Note: Instruction needs to be a friend here to call cloneImpl.
1954   friend class Instruction;
1955 
1956   InsertElementInst *cloneImpl() const;
1957 
1958 public:
1959   static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1960                                    const Twine &NameStr = "",
1961                                    Instruction *InsertBefore = nullptr) {
1962     return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1963   }
1964 
1965   static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1966                                    const Twine &NameStr,
1967                                    BasicBlock *InsertAtEnd) {
1968     return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1969   }
1970 
1971   /// Return true if an insertelement instruction can be
1972   /// formed with the specified operands.
1973   static bool isValidOperands(const Value *Vec, const Value *NewElt,
1974                               const Value *Idx);
1975 
1976   /// Overload to return most specific vector type.
1977   ///
1978   VectorType *getType() const {
1979     return cast<VectorType>(Instruction::getType());
1980   }
1981 
1982   /// Transparently provide more efficient getOperand methods.
1983   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1984 
1985   // Methods for support type inquiry through isa, cast, and dyn_cast:
1986   static bool classof(const Instruction *I) {
1987     return I->getOpcode() == Instruction::InsertElement;
1988   }
1989   static bool classof(const Value *V) {
1990     return isa<Instruction>(V) && classof(cast<Instruction>(V));
1991   }
1992 };
1993 
1994 template <>
1995 struct OperandTraits<InsertElementInst> :
1996   public FixedNumOperandTraits<InsertElementInst, 3> {
1997 };
1998 
1999 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
2000 
2001 //===----------------------------------------------------------------------===//
2002 //                           ShuffleVectorInst Class
2003 //===----------------------------------------------------------------------===//
2004 
2005 constexpr int UndefMaskElem = -1;
2006 
2007 /// This instruction constructs a fixed permutation of two
2008 /// input vectors.
2009 ///
2010 /// For each element of the result vector, the shuffle mask selects an element
2011 /// from one of the input vectors to copy to the result. Non-negative elements
2012 /// in the mask represent an index into the concatenated pair of input vectors.
2013 /// UndefMaskElem (-1) specifies that the result element is undefined.
2014 ///
2015 /// For scalable vectors, all the elements of the mask must be 0 or -1. This
2016 /// requirement may be relaxed in the future.
2017 class ShuffleVectorInst : public Instruction {
2018   SmallVector<int, 4> ShuffleMask;
2019   Constant *ShuffleMaskForBitcode;
2020 
2021 protected:
2022   // Note: Instruction needs to be a friend here to call cloneImpl.
2023   friend class Instruction;
2024 
2025   ShuffleVectorInst *cloneImpl() const;
2026 
2027 public:
2028   ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2029                     Instruction *InsertBefore = nullptr);
2030   ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2031                     BasicBlock *InsertAtEnd);
2032   ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2033                     Instruction *InsertBefore = nullptr);
2034   ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2035                     BasicBlock *InsertAtEnd);
2036   ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2037                     const Twine &NameStr = "",
2038                     Instruction *InsertBefor = nullptr);
2039   ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2040                     const Twine &NameStr, BasicBlock *InsertAtEnd);
2041   ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2042                     const Twine &NameStr = "",
2043                     Instruction *InsertBefor = nullptr);
2044   ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2045                     const Twine &NameStr, BasicBlock *InsertAtEnd);
2046 
2047   void *operator new(size_t S) { return User::operator new(S, 2); }
2048   void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2049 
2050   /// Swap the operands and adjust the mask to preserve the semantics
2051   /// of the instruction.
2052   void commute();
2053 
2054   /// Return true if a shufflevector instruction can be
2055   /// formed with the specified operands.
2056   static bool isValidOperands(const Value *V1, const Value *V2,
2057                               const Value *Mask);
2058   static bool isValidOperands(const Value *V1, const Value *V2,
2059                               ArrayRef<int> Mask);
2060 
2061   /// Overload to return most specific vector type.
2062   ///
2063   VectorType *getType() const {
2064     return cast<VectorType>(Instruction::getType());
2065   }
2066 
2067   /// Transparently provide more efficient getOperand methods.
2068   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2069 
2070   /// Return the shuffle mask value of this instruction for the given element
2071   /// index. Return UndefMaskElem if the element is undef.
2072   int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2073 
2074   /// Convert the input shuffle mask operand to a vector of integers. Undefined
2075   /// elements of the mask are returned as UndefMaskElem.
2076   static void getShuffleMask(const Constant *Mask,
2077                              SmallVectorImpl<int> &Result);
2078 
2079   /// Return the mask for this instruction as a vector of integers. Undefined
2080   /// elements of the mask are returned as UndefMaskElem.
2081   void getShuffleMask(SmallVectorImpl<int> &Result) const {
2082     Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2083   }
2084 
2085   /// Return the mask for this instruction, for use in bitcode.
2086   ///
2087   /// TODO: This is temporary until we decide a new bitcode encoding for
2088   /// shufflevector.
2089   Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2090 
2091   static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2092                                                 Type *ResultTy);
2093 
2094   void setShuffleMask(ArrayRef<int> Mask);
2095 
2096   ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2097 
2098   /// Return true if this shuffle returns a vector with a different number of
2099   /// elements than its source vectors.
2100   /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2101   ///           shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2102   bool changesLength() const {
2103     unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2104                                  ->getElementCount()
2105                                  .getKnownMinValue();
2106     unsigned NumMaskElts = ShuffleMask.size();
2107     return NumSourceElts != NumMaskElts;
2108   }
2109 
2110   /// Return true if this shuffle returns a vector with a greater number of
2111   /// elements than its source vectors.
2112   /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2113   bool increasesLength() const {
2114     unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2115                                  ->getElementCount()
2116                                  .getKnownMinValue();
2117     unsigned NumMaskElts = ShuffleMask.size();
2118     return NumSourceElts < NumMaskElts;
2119   }
2120 
2121   /// Return true if this shuffle mask chooses elements from exactly one source
2122   /// vector.
2123   /// Example: <7,5,undef,7>
2124   /// This assumes that vector operands are the same length as the mask.
2125   static bool isSingleSourceMask(ArrayRef<int> Mask);
2126   static bool isSingleSourceMask(const Constant *Mask) {
2127     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2128     SmallVector<int, 16> MaskAsInts;
2129     getShuffleMask(Mask, MaskAsInts);
2130     return isSingleSourceMask(MaskAsInts);
2131   }
2132 
2133   /// Return true if this shuffle chooses elements from exactly one source
2134   /// vector without changing the length of that vector.
2135   /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2136   /// TODO: Optionally allow length-changing shuffles.
2137   bool isSingleSource() const {
2138     return !changesLength() && isSingleSourceMask(ShuffleMask);
2139   }
2140 
2141   /// Return true if this shuffle mask chooses elements from exactly one source
2142   /// vector without lane crossings. A shuffle using this mask is not
2143   /// necessarily a no-op because it may change the number of elements from its
2144   /// input vectors or it may provide demanded bits knowledge via undef lanes.
2145   /// Example: <undef,undef,2,3>
2146   static bool isIdentityMask(ArrayRef<int> Mask);
2147   static bool isIdentityMask(const Constant *Mask) {
2148     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2149 
2150     // Not possible to express a shuffle mask for a scalable vector for this
2151     // case.
2152     if (isa<ScalableVectorType>(Mask->getType()))
2153       return false;
2154 
2155     SmallVector<int, 16> MaskAsInts;
2156     getShuffleMask(Mask, MaskAsInts);
2157     return isIdentityMask(MaskAsInts);
2158   }
2159 
2160   /// Return true if this shuffle chooses elements from exactly one source
2161   /// vector without lane crossings and does not change the number of elements
2162   /// from its input vectors.
2163   /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2164   bool isIdentity() const {
2165     // Not possible to express a shuffle mask for a scalable vector for this
2166     // case.
2167     if (isa<ScalableVectorType>(getType()))
2168       return false;
2169 
2170     return !changesLength() && isIdentityMask(ShuffleMask);
2171   }
2172 
2173   /// Return true if this shuffle lengthens exactly one source vector with
2174   /// undefs in the high elements.
2175   bool isIdentityWithPadding() const;
2176 
2177   /// Return true if this shuffle extracts the first N elements of exactly one
2178   /// source vector.
2179   bool isIdentityWithExtract() const;
2180 
2181   /// Return true if this shuffle concatenates its 2 source vectors. This
2182   /// returns false if either input is undefined. In that case, the shuffle is
2183   /// is better classified as an identity with padding operation.
2184   bool isConcat() const;
2185 
2186   /// Return true if this shuffle mask chooses elements from its source vectors
2187   /// without lane crossings. A shuffle using this mask would be
2188   /// equivalent to a vector select with a constant condition operand.
2189   /// Example: <4,1,6,undef>
2190   /// This returns false if the mask does not choose from both input vectors.
2191   /// In that case, the shuffle is better classified as an identity shuffle.
2192   /// This assumes that vector operands are the same length as the mask
2193   /// (a length-changing shuffle can never be equivalent to a vector select).
2194   static bool isSelectMask(ArrayRef<int> Mask);
2195   static bool isSelectMask(const Constant *Mask) {
2196     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2197     SmallVector<int, 16> MaskAsInts;
2198     getShuffleMask(Mask, MaskAsInts);
2199     return isSelectMask(MaskAsInts);
2200   }
2201 
2202   /// Return true if this shuffle chooses elements from its source vectors
2203   /// without lane crossings and all operands have the same number of elements.
2204   /// In other words, this shuffle is equivalent to a vector select with a
2205   /// constant condition operand.
2206   /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2207   /// This returns false if the mask does not choose from both input vectors.
2208   /// In that case, the shuffle is better classified as an identity shuffle.
2209   /// TODO: Optionally allow length-changing shuffles.
2210   bool isSelect() const {
2211     return !changesLength() && isSelectMask(ShuffleMask);
2212   }
2213 
2214   /// Return true if this shuffle mask swaps the order of elements from exactly
2215   /// one source vector.
2216   /// Example: <7,6,undef,4>
2217   /// This assumes that vector operands are the same length as the mask.
2218   static bool isReverseMask(ArrayRef<int> Mask);
2219   static bool isReverseMask(const Constant *Mask) {
2220     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2221     SmallVector<int, 16> MaskAsInts;
2222     getShuffleMask(Mask, MaskAsInts);
2223     return isReverseMask(MaskAsInts);
2224   }
2225 
2226   /// Return true if this shuffle swaps the order of elements from exactly
2227   /// one source vector.
2228   /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2229   /// TODO: Optionally allow length-changing shuffles.
2230   bool isReverse() const {
2231     return !changesLength() && isReverseMask(ShuffleMask);
2232   }
2233 
2234   /// Return true if this shuffle mask chooses all elements with the same value
2235   /// as the first element of exactly one source vector.
2236   /// Example: <4,undef,undef,4>
2237   /// This assumes that vector operands are the same length as the mask.
2238   static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2239   static bool isZeroEltSplatMask(const Constant *Mask) {
2240     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2241     SmallVector<int, 16> MaskAsInts;
2242     getShuffleMask(Mask, MaskAsInts);
2243     return isZeroEltSplatMask(MaskAsInts);
2244   }
2245 
2246   /// Return true if all elements of this shuffle are the same value as the
2247   /// first element of exactly one source vector without changing the length
2248   /// of that vector.
2249   /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2250   /// TODO: Optionally allow length-changing shuffles.
2251   /// TODO: Optionally allow splats from other elements.
2252   bool isZeroEltSplat() const {
2253     return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2254   }
2255 
2256   /// Return true if this shuffle mask is a transpose mask.
2257   /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2258   /// even- or odd-numbered vector elements from two n-dimensional source
2259   /// vectors and write each result into consecutive elements of an
2260   /// n-dimensional destination vector. Two shuffles are necessary to complete
2261   /// the transpose, one for the even elements and another for the odd elements.
2262   /// This description closely follows how the TRN1 and TRN2 AArch64
2263   /// instructions operate.
2264   ///
2265   /// For example, a simple 2x2 matrix can be transposed with:
2266   ///
2267   ///   ; Original matrix
2268   ///   m0 = < a, b >
2269   ///   m1 = < c, d >
2270   ///
2271   ///   ; Transposed matrix
2272   ///   t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2273   ///   t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2274   ///
2275   /// For matrices having greater than n columns, the resulting nx2 transposed
2276   /// matrix is stored in two result vectors such that one vector contains
2277   /// interleaved elements from all the even-numbered rows and the other vector
2278   /// contains interleaved elements from all the odd-numbered rows. For example,
2279   /// a 2x4 matrix can be transposed with:
2280   ///
2281   ///   ; Original matrix
2282   ///   m0 = < a, b, c, d >
2283   ///   m1 = < e, f, g, h >
2284   ///
2285   ///   ; Transposed matrix
2286   ///   t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2287   ///   t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2288   static bool isTransposeMask(ArrayRef<int> Mask);
2289   static bool isTransposeMask(const Constant *Mask) {
2290     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2291     SmallVector<int, 16> MaskAsInts;
2292     getShuffleMask(Mask, MaskAsInts);
2293     return isTransposeMask(MaskAsInts);
2294   }
2295 
2296   /// Return true if this shuffle transposes the elements of its inputs without
2297   /// changing the length of the vectors. This operation may also be known as a
2298   /// merge or interleave. See the description for isTransposeMask() for the
2299   /// exact specification.
2300   /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2301   bool isTranspose() const {
2302     return !changesLength() && isTransposeMask(ShuffleMask);
2303   }
2304 
2305   /// Return true if this shuffle mask is a splice mask, concatenating the two
2306   /// inputs together and then extracts an original width vector starting from
2307   /// the splice index.
2308   /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2309   static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2310   static bool isSpliceMask(const Constant *Mask, int &Index) {
2311     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2312     SmallVector<int, 16> MaskAsInts;
2313     getShuffleMask(Mask, MaskAsInts);
2314     return isSpliceMask(MaskAsInts, Index);
2315   }
2316 
2317   /// Return true if this shuffle splices two inputs without changing the length
2318   /// of the vectors. This operation concatenates the two inputs together and
2319   /// then extracts an original width vector starting from the splice index.
2320   /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2321   bool isSplice(int &Index) const {
2322     return !changesLength() && isSpliceMask(ShuffleMask, Index);
2323   }
2324 
2325   /// Return true if this shuffle mask is an extract subvector mask.
2326   /// A valid extract subvector mask returns a smaller vector from a single
2327   /// source operand. The base extraction index is returned as well.
2328   static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2329                                      int &Index);
2330   static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2331                                      int &Index) {
2332     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2333     // Not possible to express a shuffle mask for a scalable vector for this
2334     // case.
2335     if (isa<ScalableVectorType>(Mask->getType()))
2336       return false;
2337     SmallVector<int, 16> MaskAsInts;
2338     getShuffleMask(Mask, MaskAsInts);
2339     return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2340   }
2341 
2342   /// Return true if this shuffle mask is an extract subvector mask.
2343   bool isExtractSubvectorMask(int &Index) const {
2344     // Not possible to express a shuffle mask for a scalable vector for this
2345     // case.
2346     if (isa<ScalableVectorType>(getType()))
2347       return false;
2348 
2349     int NumSrcElts =
2350         cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2351     return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2352   }
2353 
2354   /// Return true if this shuffle mask is an insert subvector mask.
2355   /// A valid insert subvector mask inserts the lowest elements of a second
2356   /// source operand into an in-place first source operand operand.
2357   /// Both the sub vector width and the insertion index is returned.
2358   static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2359                                     int &NumSubElts, int &Index);
2360   static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2361                                     int &NumSubElts, int &Index) {
2362     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2363     // Not possible to express a shuffle mask for a scalable vector for this
2364     // case.
2365     if (isa<ScalableVectorType>(Mask->getType()))
2366       return false;
2367     SmallVector<int, 16> MaskAsInts;
2368     getShuffleMask(Mask, MaskAsInts);
2369     return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2370   }
2371 
2372   /// Return true if this shuffle mask is an insert subvector mask.
2373   bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2374     // Not possible to express a shuffle mask for a scalable vector for this
2375     // case.
2376     if (isa<ScalableVectorType>(getType()))
2377       return false;
2378 
2379     int NumSrcElts =
2380         cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2381     return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2382   }
2383 
2384   /// Return true if this shuffle mask replicates each of the \p VF elements
2385   /// in a vector \p ReplicationFactor times.
2386   /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2387   ///   <0,0,0,1,1,1,2,2,2,3,3,3>
2388   static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2389                                 int &VF);
2390   static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2391                                 int &VF) {
2392     assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2393     // Not possible to express a shuffle mask for a scalable vector for this
2394     // case.
2395     if (isa<ScalableVectorType>(Mask->getType()))
2396       return false;
2397     SmallVector<int, 16> MaskAsInts;
2398     getShuffleMask(Mask, MaskAsInts);
2399     return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2400   }
2401 
2402   /// Return true if this shuffle mask is a replication mask.
2403   bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2404 
2405   /// Return true if this shuffle mask represents "clustered" mask of size VF,
2406   /// i.e. each index between [0..VF) is used exactly once in each submask of
2407   /// size VF.
2408   /// For example, the mask for \p VF=4 is:
2409   /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2410   /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2411   /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2412   ///                          element 3 is used twice in the second submask
2413   ///                          (3,3,1,0) and index 2 is not used at all.
2414   static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2415 
2416   /// Return true if this shuffle mask is a one-use-single-source("clustered")
2417   /// mask.
2418   bool isOneUseSingleSourceMask(int VF) const;
2419 
2420   /// Change values in a shuffle permute mask assuming the two vector operands
2421   /// of length InVecNumElts have swapped position.
2422   static void commuteShuffleMask(MutableArrayRef<int> Mask,
2423                                  unsigned InVecNumElts) {
2424     for (int &Idx : Mask) {
2425       if (Idx == -1)
2426         continue;
2427       Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2428       assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2429              "shufflevector mask index out of range");
2430     }
2431   }
2432 
2433   // Methods for support type inquiry through isa, cast, and dyn_cast:
2434   static bool classof(const Instruction *I) {
2435     return I->getOpcode() == Instruction::ShuffleVector;
2436   }
2437   static bool classof(const Value *V) {
2438     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2439   }
2440 };
2441 
2442 template <>
2443 struct OperandTraits<ShuffleVectorInst>
2444     : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2445 
2446 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
2447 
2448 //===----------------------------------------------------------------------===//
2449 //                                ExtractValueInst Class
2450 //===----------------------------------------------------------------------===//
2451 
2452 /// This instruction extracts a struct member or array
2453 /// element value from an aggregate value.
2454 ///
2455 class ExtractValueInst : public UnaryInstruction {
2456   SmallVector<unsigned, 4> Indices;
2457 
2458   ExtractValueInst(const ExtractValueInst &EVI);
2459 
2460   /// Constructors - Create a extractvalue instruction with a base aggregate
2461   /// value and a list of indices.  The first ctor can optionally insert before
2462   /// an existing instruction, the second appends the new instruction to the
2463   /// specified BasicBlock.
2464   inline ExtractValueInst(Value *Agg,
2465                           ArrayRef<unsigned> Idxs,
2466                           const Twine &NameStr,
2467                           Instruction *InsertBefore);
2468   inline ExtractValueInst(Value *Agg,
2469                           ArrayRef<unsigned> Idxs,
2470                           const Twine &NameStr, BasicBlock *InsertAtEnd);
2471 
2472   void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2473 
2474 protected:
2475   // Note: Instruction needs to be a friend here to call cloneImpl.
2476   friend class Instruction;
2477 
2478   ExtractValueInst *cloneImpl() const;
2479 
2480 public:
2481   static ExtractValueInst *Create(Value *Agg,
2482                                   ArrayRef<unsigned> Idxs,
2483                                   const Twine &NameStr = "",
2484                                   Instruction *InsertBefore = nullptr) {
2485     return new
2486       ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2487   }
2488 
2489   static ExtractValueInst *Create(Value *Agg,
2490                                   ArrayRef<unsigned> Idxs,
2491                                   const Twine &NameStr,
2492                                   BasicBlock *InsertAtEnd) {
2493     return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2494   }
2495 
2496   /// Returns the type of the element that would be extracted
2497   /// with an extractvalue instruction with the specified parameters.
2498   ///
2499   /// Null is returned if the indices are invalid for the specified type.
2500   static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2501 
2502   using idx_iterator = const unsigned*;
2503 
2504   inline idx_iterator idx_begin() const { return Indices.begin(); }
2505   inline idx_iterator idx_end()   const { return Indices.end(); }
2506   inline iterator_range<idx_iterator> indices() const {
2507     return make_range(idx_begin(), idx_end());
2508   }
2509 
2510   Value *getAggregateOperand() {
2511     return getOperand(0);
2512   }
2513   const Value *getAggregateOperand() const {
2514     return getOperand(0);
2515   }
2516   static unsigned getAggregateOperandIndex() {
2517     return 0U;                      // get index for modifying correct operand
2518   }
2519 
2520   ArrayRef<unsigned> getIndices() const {
2521     return Indices;
2522   }
2523 
2524   unsigned getNumIndices() const {
2525     return (unsigned)Indices.size();
2526   }
2527 
2528   bool hasIndices() const {
2529     return true;
2530   }
2531 
2532   // Methods for support type inquiry through isa, cast, and dyn_cast:
2533   static bool classof(const Instruction *I) {
2534     return I->getOpcode() == Instruction::ExtractValue;
2535   }
2536   static bool classof(const Value *V) {
2537     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2538   }
2539 };
2540 
2541 ExtractValueInst::ExtractValueInst(Value *Agg,
2542                                    ArrayRef<unsigned> Idxs,
2543                                    const Twine &NameStr,
2544                                    Instruction *InsertBefore)
2545   : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2546                      ExtractValue, Agg, InsertBefore) {
2547   init(Idxs, NameStr);
2548 }
2549 
2550 ExtractValueInst::ExtractValueInst(Value *Agg,
2551                                    ArrayRef<unsigned> Idxs,
2552                                    const Twine &NameStr,
2553                                    BasicBlock *InsertAtEnd)
2554   : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2555                      ExtractValue, Agg, InsertAtEnd) {
2556   init(Idxs, NameStr);
2557 }
2558 
2559 //===----------------------------------------------------------------------===//
2560 //                                InsertValueInst Class
2561 //===----------------------------------------------------------------------===//
2562 
2563 /// This instruction inserts a struct field of array element
2564 /// value into an aggregate value.
2565 ///
2566 class InsertValueInst : public Instruction {
2567   SmallVector<unsigned, 4> Indices;
2568 
2569   InsertValueInst(const InsertValueInst &IVI);
2570 
2571   /// Constructors - Create a insertvalue instruction with a base aggregate
2572   /// value, a value to insert, and a list of indices.  The first ctor can
2573   /// optionally insert before an existing instruction, the second appends
2574   /// the new instruction to the specified BasicBlock.
2575   inline InsertValueInst(Value *Agg, Value *Val,
2576                          ArrayRef<unsigned> Idxs,
2577                          const Twine &NameStr,
2578                          Instruction *InsertBefore);
2579   inline InsertValueInst(Value *Agg, Value *Val,
2580                          ArrayRef<unsigned> Idxs,
2581                          const Twine &NameStr, BasicBlock *InsertAtEnd);
2582 
2583   /// Constructors - These two constructors are convenience methods because one
2584   /// and two index insertvalue instructions are so common.
2585   InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2586                   const Twine &NameStr = "",
2587                   Instruction *InsertBefore = nullptr);
2588   InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2589                   BasicBlock *InsertAtEnd);
2590 
2591   void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2592             const Twine &NameStr);
2593 
2594 protected:
2595   // Note: Instruction needs to be a friend here to call cloneImpl.
2596   friend class Instruction;
2597 
2598   InsertValueInst *cloneImpl() const;
2599 
2600 public:
2601   // allocate space for exactly two operands
2602   void *operator new(size_t S) { return User::operator new(S, 2); }
2603   void operator delete(void *Ptr) { User::operator delete(Ptr); }
2604 
2605   static InsertValueInst *Create(Value *Agg, Value *Val,
2606                                  ArrayRef<unsigned> Idxs,
2607                                  const Twine &NameStr = "",
2608                                  Instruction *InsertBefore = nullptr) {
2609     return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2610   }
2611 
2612   static InsertValueInst *Create(Value *Agg, Value *Val,
2613                                  ArrayRef<unsigned> Idxs,
2614                                  const Twine &NameStr,
2615                                  BasicBlock *InsertAtEnd) {
2616     return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2617   }
2618 
2619   /// Transparently provide more efficient getOperand methods.
2620   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2621 
2622   using idx_iterator = const unsigned*;
2623 
2624   inline idx_iterator idx_begin() const { return Indices.begin(); }
2625   inline idx_iterator idx_end()   const { return Indices.end(); }
2626   inline iterator_range<idx_iterator> indices() const {
2627     return make_range(idx_begin(), idx_end());
2628   }
2629 
2630   Value *getAggregateOperand() {
2631     return getOperand(0);
2632   }
2633   const Value *getAggregateOperand() const {
2634     return getOperand(0);
2635   }
2636   static unsigned getAggregateOperandIndex() {
2637     return 0U;                      // get index for modifying correct operand
2638   }
2639 
2640   Value *getInsertedValueOperand() {
2641     return getOperand(1);
2642   }
2643   const Value *getInsertedValueOperand() const {
2644     return getOperand(1);
2645   }
2646   static unsigned getInsertedValueOperandIndex() {
2647     return 1U;                      // get index for modifying correct operand
2648   }
2649 
2650   ArrayRef<unsigned> getIndices() const {
2651     return Indices;
2652   }
2653 
2654   unsigned getNumIndices() const {
2655     return (unsigned)Indices.size();
2656   }
2657 
2658   bool hasIndices() const {
2659     return true;
2660   }
2661 
2662   // Methods for support type inquiry through isa, cast, and dyn_cast:
2663   static bool classof(const Instruction *I) {
2664     return I->getOpcode() == Instruction::InsertValue;
2665   }
2666   static bool classof(const Value *V) {
2667     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2668   }
2669 };
2670 
2671 template <>
2672 struct OperandTraits<InsertValueInst> :
2673   public FixedNumOperandTraits<InsertValueInst, 2> {
2674 };
2675 
2676 InsertValueInst::InsertValueInst(Value *Agg,
2677                                  Value *Val,
2678                                  ArrayRef<unsigned> Idxs,
2679                                  const Twine &NameStr,
2680                                  Instruction *InsertBefore)
2681   : Instruction(Agg->getType(), InsertValue,
2682                 OperandTraits<InsertValueInst>::op_begin(this),
2683                 2, InsertBefore) {
2684   init(Agg, Val, Idxs, NameStr);
2685 }
2686 
2687 InsertValueInst::InsertValueInst(Value *Agg,
2688                                  Value *Val,
2689                                  ArrayRef<unsigned> Idxs,
2690                                  const Twine &NameStr,
2691                                  BasicBlock *InsertAtEnd)
2692   : Instruction(Agg->getType(), InsertValue,
2693                 OperandTraits<InsertValueInst>::op_begin(this),
2694                 2, InsertAtEnd) {
2695   init(Agg, Val, Idxs, NameStr);
2696 }
2697 
2698 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2699 
2700 //===----------------------------------------------------------------------===//
2701 //                               PHINode Class
2702 //===----------------------------------------------------------------------===//
2703 
2704 // PHINode - The PHINode class is used to represent the magical mystical PHI
2705 // node, that can not exist in nature, but can be synthesized in a computer
2706 // scientist's overactive imagination.
2707 //
2708 class PHINode : public Instruction {
2709   /// The number of operands actually allocated.  NumOperands is
2710   /// the number actually in use.
2711   unsigned ReservedSpace;
2712 
2713   PHINode(const PHINode &PN);
2714 
2715   explicit PHINode(Type *Ty, unsigned NumReservedValues,
2716                    const Twine &NameStr = "",
2717                    Instruction *InsertBefore = nullptr)
2718     : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2719       ReservedSpace(NumReservedValues) {
2720     assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2721     setName(NameStr);
2722     allocHungoffUses(ReservedSpace);
2723   }
2724 
2725   PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2726           BasicBlock *InsertAtEnd)
2727     : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2728       ReservedSpace(NumReservedValues) {
2729     assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2730     setName(NameStr);
2731     allocHungoffUses(ReservedSpace);
2732   }
2733 
2734 protected:
2735   // Note: Instruction needs to be a friend here to call cloneImpl.
2736   friend class Instruction;
2737 
2738   PHINode *cloneImpl() const;
2739 
2740   // allocHungoffUses - this is more complicated than the generic
2741   // User::allocHungoffUses, because we have to allocate Uses for the incoming
2742   // values and pointers to the incoming blocks, all in one allocation.
2743   void allocHungoffUses(unsigned N) {
2744     User::allocHungoffUses(N, /* IsPhi */ true);
2745   }
2746 
2747 public:
2748   /// Constructors - NumReservedValues is a hint for the number of incoming
2749   /// edges that this phi node will have (use 0 if you really have no idea).
2750   static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2751                          const Twine &NameStr = "",
2752                          Instruction *InsertBefore = nullptr) {
2753     return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2754   }
2755 
2756   static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2757                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
2758     return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2759   }
2760 
2761   /// Provide fast operand accessors
2762   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2763 
2764   // Block iterator interface. This provides access to the list of incoming
2765   // basic blocks, which parallels the list of incoming values.
2766   // Please note that we are not providing non-const iterators for blocks to
2767   // force all updates go through an interface function.
2768 
2769   using block_iterator = BasicBlock **;
2770   using const_block_iterator = BasicBlock * const *;
2771 
2772   const_block_iterator block_begin() const {
2773     return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2774   }
2775 
2776   const_block_iterator block_end() const {
2777     return block_begin() + getNumOperands();
2778   }
2779 
2780   iterator_range<const_block_iterator> blocks() const {
2781     return make_range(block_begin(), block_end());
2782   }
2783 
2784   op_range incoming_values() { return operands(); }
2785 
2786   const_op_range incoming_values() const { return operands(); }
2787 
2788   /// Return the number of incoming edges
2789   ///
2790   unsigned getNumIncomingValues() const { return getNumOperands(); }
2791 
2792   /// Return incoming value number x
2793   ///
2794   Value *getIncomingValue(unsigned i) const {
2795     return getOperand(i);
2796   }
2797   void setIncomingValue(unsigned i, Value *V) {
2798     assert(V && "PHI node got a null value!");
2799     assert(getType() == V->getType() &&
2800            "All operands to PHI node must be the same type as the PHI node!");
2801     setOperand(i, V);
2802   }
2803 
2804   static unsigned getOperandNumForIncomingValue(unsigned i) {
2805     return i;
2806   }
2807 
2808   static unsigned getIncomingValueNumForOperand(unsigned i) {
2809     return i;
2810   }
2811 
2812   /// Return incoming basic block number @p i.
2813   ///
2814   BasicBlock *getIncomingBlock(unsigned i) const {
2815     return block_begin()[i];
2816   }
2817 
2818   /// Return incoming basic block corresponding
2819   /// to an operand of the PHI.
2820   ///
2821   BasicBlock *getIncomingBlock(const Use &U) const {
2822     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2823     return getIncomingBlock(unsigned(&U - op_begin()));
2824   }
2825 
2826   /// Return incoming basic block corresponding
2827   /// to value use iterator.
2828   ///
2829   BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2830     return getIncomingBlock(I.getUse());
2831   }
2832 
2833   void setIncomingBlock(unsigned i, BasicBlock *BB) {
2834     const_cast<block_iterator>(block_begin())[i] = BB;
2835   }
2836 
2837   /// Copies the basic blocks from \p BBRange to the incoming basic block list
2838   /// of this PHINode, starting at \p ToIdx.
2839   void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
2840                           uint32_t ToIdx = 0) {
2841     copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
2842   }
2843 
2844   /// Replace every incoming basic block \p Old to basic block \p New.
2845   void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2846     assert(New && Old && "PHI node got a null basic block!");
2847     for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2848       if (getIncomingBlock(Op) == Old)
2849         setIncomingBlock(Op, New);
2850   }
2851 
2852   /// Add an incoming value to the end of the PHI list
2853   ///
2854   void addIncoming(Value *V, BasicBlock *BB) {
2855     if (getNumOperands() == ReservedSpace)
2856       growOperands();  // Get more space!
2857     // Initialize some new operands.
2858     setNumHungOffUseOperands(getNumOperands() + 1);
2859     setIncomingValue(getNumOperands() - 1, V);
2860     setIncomingBlock(getNumOperands() - 1, BB);
2861   }
2862 
2863   /// Remove an incoming value.  This is useful if a
2864   /// predecessor basic block is deleted.  The value removed is returned.
2865   ///
2866   /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2867   /// is true), the PHI node is destroyed and any uses of it are replaced with
2868   /// dummy values.  The only time there should be zero incoming values to a PHI
2869   /// node is when the block is dead, so this strategy is sound.
2870   ///
2871   Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2872 
2873   Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2874     int Idx = getBasicBlockIndex(BB);
2875     assert(Idx >= 0 && "Invalid basic block argument to remove!");
2876     return removeIncomingValue(Idx, DeletePHIIfEmpty);
2877   }
2878 
2879   /// Return the first index of the specified basic
2880   /// block in the value list for this PHI.  Returns -1 if no instance.
2881   ///
2882   int getBasicBlockIndex(const BasicBlock *BB) const {
2883     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2884       if (block_begin()[i] == BB)
2885         return i;
2886     return -1;
2887   }
2888 
2889   Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2890     int Idx = getBasicBlockIndex(BB);
2891     assert(Idx >= 0 && "Invalid basic block argument!");
2892     return getIncomingValue(Idx);
2893   }
2894 
2895   /// Set every incoming value(s) for block \p BB to \p V.
2896   void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2897     assert(BB && "PHI node got a null basic block!");
2898     bool Found = false;
2899     for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2900       if (getIncomingBlock(Op) == BB) {
2901         Found = true;
2902         setIncomingValue(Op, V);
2903       }
2904     (void)Found;
2905     assert(Found && "Invalid basic block argument to set!");
2906   }
2907 
2908   /// If the specified PHI node always merges together the
2909   /// same value, return the value, otherwise return null.
2910   Value *hasConstantValue() const;
2911 
2912   /// Whether the specified PHI node always merges
2913   /// together the same value, assuming undefs are equal to a unique
2914   /// non-undef value.
2915   bool hasConstantOrUndefValue() const;
2916 
2917   /// If the PHI node is complete which means all of its parent's predecessors
2918   /// have incoming value in this PHI, return true, otherwise return false.
2919   bool isComplete() const {
2920     return llvm::all_of(predecessors(getParent()),
2921                         [this](const BasicBlock *Pred) {
2922                           return getBasicBlockIndex(Pred) >= 0;
2923                         });
2924   }
2925 
2926   /// Methods for support type inquiry through isa, cast, and dyn_cast:
2927   static bool classof(const Instruction *I) {
2928     return I->getOpcode() == Instruction::PHI;
2929   }
2930   static bool classof(const Value *V) {
2931     return isa<Instruction>(V) && classof(cast<Instruction>(V));
2932   }
2933 
2934 private:
2935   void growOperands();
2936 };
2937 
2938 template <>
2939 struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2940 };
2941 
2942 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
2943 
2944 //===----------------------------------------------------------------------===//
2945 //                           LandingPadInst Class
2946 //===----------------------------------------------------------------------===//
2947 
2948 //===---------------------------------------------------------------------------
2949 /// The landingpad instruction holds all of the information
2950 /// necessary to generate correct exception handling. The landingpad instruction
2951 /// cannot be moved from the top of a landing pad block, which itself is
2952 /// accessible only from the 'unwind' edge of an invoke. This uses the
2953 /// SubclassData field in Value to store whether or not the landingpad is a
2954 /// cleanup.
2955 ///
2956 class LandingPadInst : public Instruction {
2957   using CleanupField = BoolBitfieldElementT<0>;
2958 
2959   /// The number of operands actually allocated.  NumOperands is
2960   /// the number actually in use.
2961   unsigned ReservedSpace;
2962 
2963   LandingPadInst(const LandingPadInst &LP);
2964 
2965 public:
2966   enum ClauseType { Catch, Filter };
2967 
2968 private:
2969   explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2970                           const Twine &NameStr, Instruction *InsertBefore);
2971   explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2972                           const Twine &NameStr, BasicBlock *InsertAtEnd);
2973 
2974   // Allocate space for exactly zero operands.
2975   void *operator new(size_t S) { return User::operator new(S); }
2976 
2977   void growOperands(unsigned Size);
2978   void init(unsigned NumReservedValues, const Twine &NameStr);
2979 
2980 protected:
2981   // Note: Instruction needs to be a friend here to call cloneImpl.
2982   friend class Instruction;
2983 
2984   LandingPadInst *cloneImpl() const;
2985 
2986 public:
2987   void operator delete(void *Ptr) { User::operator delete(Ptr); }
2988 
2989   /// Constructors - NumReservedClauses is a hint for the number of incoming
2990   /// clauses that this landingpad will have (use 0 if you really have no idea).
2991   static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2992                                 const Twine &NameStr = "",
2993                                 Instruction *InsertBefore = nullptr);
2994   static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2995                                 const Twine &NameStr, BasicBlock *InsertAtEnd);
2996 
2997   /// Provide fast operand accessors
2998   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2999 
3000   /// Return 'true' if this landingpad instruction is a
3001   /// cleanup. I.e., it should be run when unwinding even if its landing pad
3002   /// doesn't catch the exception.
3003   bool isCleanup() const { return getSubclassData<CleanupField>(); }
3004 
3005   /// Indicate that this landingpad instruction is a cleanup.
3006   void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3007 
3008   /// Add a catch or filter clause to the landing pad.
3009   void addClause(Constant *ClauseVal);
3010 
3011   /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3012   /// determine what type of clause this is.
3013   Constant *getClause(unsigned Idx) const {
3014     return cast<Constant>(getOperandList()[Idx]);
3015   }
3016 
3017   /// Return 'true' if the clause and index Idx is a catch clause.
3018   bool isCatch(unsigned Idx) const {
3019     return !isa<ArrayType>(getOperandList()[Idx]->getType());
3020   }
3021 
3022   /// Return 'true' if the clause and index Idx is a filter clause.
3023   bool isFilter(unsigned Idx) const {
3024     return isa<ArrayType>(getOperandList()[Idx]->getType());
3025   }
3026 
3027   /// Get the number of clauses for this landing pad.
3028   unsigned getNumClauses() const { return getNumOperands(); }
3029 
3030   /// Grow the size of the operand list to accommodate the new
3031   /// number of clauses.
3032   void reserveClauses(unsigned Size) { growOperands(Size); }
3033 
3034   // Methods for support type inquiry through isa, cast, and dyn_cast:
3035   static bool classof(const Instruction *I) {
3036     return I->getOpcode() == Instruction::LandingPad;
3037   }
3038   static bool classof(const Value *V) {
3039     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3040   }
3041 };
3042 
3043 template <>
3044 struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3045 };
3046 
3047 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
3048 
3049 //===----------------------------------------------------------------------===//
3050 //                               ReturnInst Class
3051 //===----------------------------------------------------------------------===//
3052 
3053 //===---------------------------------------------------------------------------
3054 /// Return a value (possibly void), from a function.  Execution
3055 /// does not continue in this function any longer.
3056 ///
3057 class ReturnInst : public Instruction {
3058   ReturnInst(const ReturnInst &RI);
3059 
3060 private:
3061   // ReturnInst constructors:
3062   // ReturnInst()                  - 'ret void' instruction
3063   // ReturnInst(    null)          - 'ret void' instruction
3064   // ReturnInst(Value* X)          - 'ret X'    instruction
3065   // ReturnInst(    null, Inst *I) - 'ret void' instruction, insert before I
3066   // ReturnInst(Value* X, Inst *I) - 'ret X'    instruction, insert before I
3067   // ReturnInst(    null, BB *B)   - 'ret void' instruction, insert @ end of B
3068   // ReturnInst(Value* X, BB *B)   - 'ret X'    instruction, insert @ end of B
3069   //
3070   // NOTE: If the Value* passed is of type void then the constructor behaves as
3071   // if it was passed NULL.
3072   explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3073                       Instruction *InsertBefore = nullptr);
3074   ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3075   explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3076 
3077 protected:
3078   // Note: Instruction needs to be a friend here to call cloneImpl.
3079   friend class Instruction;
3080 
3081   ReturnInst *cloneImpl() const;
3082 
3083 public:
3084   static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3085                             Instruction *InsertBefore = nullptr) {
3086     return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3087   }
3088 
3089   static ReturnInst* Create(LLVMContext &C, Value *retVal,
3090                             BasicBlock *InsertAtEnd) {
3091     return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3092   }
3093 
3094   static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3095     return new(0) ReturnInst(C, InsertAtEnd);
3096   }
3097 
3098   /// Provide fast operand accessors
3099   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3100 
3101   /// Convenience accessor. Returns null if there is no return value.
3102   Value *getReturnValue() const {
3103     return getNumOperands() != 0 ? getOperand(0) : nullptr;
3104   }
3105 
3106   unsigned getNumSuccessors() const { return 0; }
3107 
3108   // Methods for support type inquiry through isa, cast, and dyn_cast:
3109   static bool classof(const Instruction *I) {
3110     return (I->getOpcode() == Instruction::Ret);
3111   }
3112   static bool classof(const Value *V) {
3113     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3114   }
3115 
3116 private:
3117   BasicBlock *getSuccessor(unsigned idx) const {
3118     llvm_unreachable("ReturnInst has no successors!");
3119   }
3120 
3121   void setSuccessor(unsigned idx, BasicBlock *B) {
3122     llvm_unreachable("ReturnInst has no successors!");
3123   }
3124 };
3125 
3126 template <>
3127 struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3128 };
3129 
3130 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
3131 
3132 //===----------------------------------------------------------------------===//
3133 //                               BranchInst Class
3134 //===----------------------------------------------------------------------===//
3135 
3136 //===---------------------------------------------------------------------------
3137 /// Conditional or Unconditional Branch instruction.
3138 ///
3139 class BranchInst : public Instruction {
3140   /// Ops list - Branches are strange.  The operands are ordered:
3141   ///  [Cond, FalseDest,] TrueDest.  This makes some accessors faster because
3142   /// they don't have to check for cond/uncond branchness. These are mostly
3143   /// accessed relative from op_end().
3144   BranchInst(const BranchInst &BI);
3145   // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3146   // BranchInst(BB *B)                           - 'br B'
3147   // BranchInst(BB* T, BB *F, Value *C)          - 'br C, T, F'
3148   // BranchInst(BB* B, Inst *I)                  - 'br B'        insert before I
3149   // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3150   // BranchInst(BB* B, BB *I)                    - 'br B'        insert at end
3151   // BranchInst(BB* T, BB *F, Value *C, BB *I)   - 'br C, T, F', insert at end
3152   explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3153   BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3154              Instruction *InsertBefore = nullptr);
3155   BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3156   BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3157              BasicBlock *InsertAtEnd);
3158 
3159   void AssertOK();
3160 
3161 protected:
3162   // Note: Instruction needs to be a friend here to call cloneImpl.
3163   friend class Instruction;
3164 
3165   BranchInst *cloneImpl() const;
3166 
3167 public:
3168   /// Iterator type that casts an operand to a basic block.
3169   ///
3170   /// This only makes sense because the successors are stored as adjacent
3171   /// operands for branch instructions.
3172   struct succ_op_iterator
3173       : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3174                               std::random_access_iterator_tag, BasicBlock *,
3175                               ptrdiff_t, BasicBlock *, BasicBlock *> {
3176     explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3177 
3178     BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3179     BasicBlock *operator->() const { return operator*(); }
3180   };
3181 
3182   /// The const version of `succ_op_iterator`.
3183   struct const_succ_op_iterator
3184       : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3185                               std::random_access_iterator_tag,
3186                               const BasicBlock *, ptrdiff_t, const BasicBlock *,
3187                               const BasicBlock *> {
3188     explicit const_succ_op_iterator(const_value_op_iterator I)
3189         : iterator_adaptor_base(I) {}
3190 
3191     const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3192     const BasicBlock *operator->() const { return operator*(); }
3193   };
3194 
3195   static BranchInst *Create(BasicBlock *IfTrue,
3196                             Instruction *InsertBefore = nullptr) {
3197     return new(1) BranchInst(IfTrue, InsertBefore);
3198   }
3199 
3200   static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3201                             Value *Cond, Instruction *InsertBefore = nullptr) {
3202     return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3203   }
3204 
3205   static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3206     return new(1) BranchInst(IfTrue, InsertAtEnd);
3207   }
3208 
3209   static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3210                             Value *Cond, BasicBlock *InsertAtEnd) {
3211     return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3212   }
3213 
3214   /// Transparently provide more efficient getOperand methods.
3215   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3216 
3217   bool isUnconditional() const { return getNumOperands() == 1; }
3218   bool isConditional()   const { return getNumOperands() == 3; }
3219 
3220   Value *getCondition() const {
3221     assert(isConditional() && "Cannot get condition of an uncond branch!");
3222     return Op<-3>();
3223   }
3224 
3225   void setCondition(Value *V) {
3226     assert(isConditional() && "Cannot set condition of unconditional branch!");
3227     Op<-3>() = V;
3228   }
3229 
3230   unsigned getNumSuccessors() const { return 1+isConditional(); }
3231 
3232   BasicBlock *getSuccessor(unsigned i) const {
3233     assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3234     return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3235   }
3236 
3237   void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3238     assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3239     *(&Op<-1>() - idx) = NewSucc;
3240   }
3241 
3242   /// Swap the successors of this branch instruction.
3243   ///
3244   /// Swaps the successors of the branch instruction. This also swaps any
3245   /// branch weight metadata associated with the instruction so that it
3246   /// continues to map correctly to each operand.
3247   void swapSuccessors();
3248 
3249   iterator_range<succ_op_iterator> successors() {
3250     return make_range(
3251         succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3252         succ_op_iterator(value_op_end()));
3253   }
3254 
3255   iterator_range<const_succ_op_iterator> successors() const {
3256     return make_range(const_succ_op_iterator(
3257                           std::next(value_op_begin(), isConditional() ? 1 : 0)),
3258                       const_succ_op_iterator(value_op_end()));
3259   }
3260 
3261   // Methods for support type inquiry through isa, cast, and dyn_cast:
3262   static bool classof(const Instruction *I) {
3263     return (I->getOpcode() == Instruction::Br);
3264   }
3265   static bool classof(const Value *V) {
3266     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3267   }
3268 };
3269 
3270 template <>
3271 struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3272 };
3273 
3274 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
3275 
3276 //===----------------------------------------------------------------------===//
3277 //                               SwitchInst Class
3278 //===----------------------------------------------------------------------===//
3279 
3280 //===---------------------------------------------------------------------------
3281 /// Multiway switch
3282 ///
3283 class SwitchInst : public Instruction {
3284   unsigned ReservedSpace;
3285 
3286   // Operand[0]    = Value to switch on
3287   // Operand[1]    = Default basic block destination
3288   // Operand[2n  ] = Value to match
3289   // Operand[2n+1] = BasicBlock to go to on match
3290   SwitchInst(const SwitchInst &SI);
3291 
3292   /// Create a new switch instruction, specifying a value to switch on and a
3293   /// default destination. The number of additional cases can be specified here
3294   /// to make memory allocation more efficient. This constructor can also
3295   /// auto-insert before another instruction.
3296   SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3297              Instruction *InsertBefore);
3298 
3299   /// Create a new switch instruction, specifying a value to switch on and a
3300   /// default destination. The number of additional cases can be specified here
3301   /// to make memory allocation more efficient. This constructor also
3302   /// auto-inserts at the end of the specified BasicBlock.
3303   SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3304              BasicBlock *InsertAtEnd);
3305 
3306   // allocate space for exactly zero operands
3307   void *operator new(size_t S) { return User::operator new(S); }
3308 
3309   void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3310   void growOperands();
3311 
3312 protected:
3313   // Note: Instruction needs to be a friend here to call cloneImpl.
3314   friend class Instruction;
3315 
3316   SwitchInst *cloneImpl() const;
3317 
3318 public:
3319   void operator delete(void *Ptr) { User::operator delete(Ptr); }
3320 
3321   // -2
3322   static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3323 
3324   template <typename CaseHandleT> class CaseIteratorImpl;
3325 
3326   /// A handle to a particular switch case. It exposes a convenient interface
3327   /// to both the case value and the successor block.
3328   ///
3329   /// We define this as a template and instantiate it to form both a const and
3330   /// non-const handle.
3331   template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3332   class CaseHandleImpl {
3333     // Directly befriend both const and non-const iterators.
3334     friend class SwitchInst::CaseIteratorImpl<
3335         CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3336 
3337   protected:
3338     // Expose the switch type we're parameterized with to the iterator.
3339     using SwitchInstType = SwitchInstT;
3340 
3341     SwitchInstT *SI;
3342     ptrdiff_t Index;
3343 
3344     CaseHandleImpl() = default;
3345     CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3346 
3347   public:
3348     /// Resolves case value for current case.
3349     ConstantIntT *getCaseValue() const {
3350       assert((unsigned)Index < SI->getNumCases() &&
3351              "Index out the number of cases.");
3352       return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3353     }
3354 
3355     /// Resolves successor for current case.
3356     BasicBlockT *getCaseSuccessor() const {
3357       assert(((unsigned)Index < SI->getNumCases() ||
3358               (unsigned)Index == DefaultPseudoIndex) &&
3359              "Index out the number of cases.");
3360       return SI->getSuccessor(getSuccessorIndex());
3361     }
3362 
3363     /// Returns number of current case.
3364     unsigned getCaseIndex() const { return Index; }
3365 
3366     /// Returns successor index for current case successor.
3367     unsigned getSuccessorIndex() const {
3368       assert(((unsigned)Index == DefaultPseudoIndex ||
3369               (unsigned)Index < SI->getNumCases()) &&
3370              "Index out the number of cases.");
3371       return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3372     }
3373 
3374     bool operator==(const CaseHandleImpl &RHS) const {
3375       assert(SI == RHS.SI && "Incompatible operators.");
3376       return Index == RHS.Index;
3377     }
3378   };
3379 
3380   using ConstCaseHandle =
3381       CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3382 
3383   class CaseHandle
3384       : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3385     friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3386 
3387   public:
3388     CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3389 
3390     /// Sets the new value for current case.
3391     void setValue(ConstantInt *V) const {
3392       assert((unsigned)Index < SI->getNumCases() &&
3393              "Index out the number of cases.");
3394       SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3395     }
3396 
3397     /// Sets the new successor for current case.
3398     void setSuccessor(BasicBlock *S) const {
3399       SI->setSuccessor(getSuccessorIndex(), S);
3400     }
3401   };
3402 
3403   template <typename CaseHandleT>
3404   class CaseIteratorImpl
3405       : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3406                                     std::random_access_iterator_tag,
3407                                     const CaseHandleT> {
3408     using SwitchInstT = typename CaseHandleT::SwitchInstType;
3409 
3410     CaseHandleT Case;
3411 
3412   public:
3413     /// Default constructed iterator is in an invalid state until assigned to
3414     /// a case for a particular switch.
3415     CaseIteratorImpl() = default;
3416 
3417     /// Initializes case iterator for given SwitchInst and for given
3418     /// case number.
3419     CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3420 
3421     /// Initializes case iterator for given SwitchInst and for given
3422     /// successor index.
3423     static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3424                                                unsigned SuccessorIndex) {
3425       assert(SuccessorIndex < SI->getNumSuccessors() &&
3426              "Successor index # out of range!");
3427       return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3428                                  : CaseIteratorImpl(SI, DefaultPseudoIndex);
3429     }
3430 
3431     /// Support converting to the const variant. This will be a no-op for const
3432     /// variant.
3433     operator CaseIteratorImpl<ConstCaseHandle>() const {
3434       return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3435     }
3436 
3437     CaseIteratorImpl &operator+=(ptrdiff_t N) {
3438       // Check index correctness after addition.
3439       // Note: Index == getNumCases() means end().
3440       assert(Case.Index + N >= 0 &&
3441              (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3442              "Case.Index out the number of cases.");
3443       Case.Index += N;
3444       return *this;
3445     }
3446     CaseIteratorImpl &operator-=(ptrdiff_t N) {
3447       // Check index correctness after subtraction.
3448       // Note: Case.Index == getNumCases() means end().
3449       assert(Case.Index - N >= 0 &&
3450              (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3451              "Case.Index out the number of cases.");
3452       Case.Index -= N;
3453       return *this;
3454     }
3455     ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3456       assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3457       return Case.Index - RHS.Case.Index;
3458     }
3459     bool operator==(const CaseIteratorImpl &RHS) const {
3460       return Case == RHS.Case;
3461     }
3462     bool operator<(const CaseIteratorImpl &RHS) const {
3463       assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3464       return Case.Index < RHS.Case.Index;
3465     }
3466     const CaseHandleT &operator*() const { return Case; }
3467   };
3468 
3469   using CaseIt = CaseIteratorImpl<CaseHandle>;
3470   using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3471 
3472   static SwitchInst *Create(Value *Value, BasicBlock *Default,
3473                             unsigned NumCases,
3474                             Instruction *InsertBefore = nullptr) {
3475     return new SwitchInst(Value, Default, NumCases, InsertBefore);
3476   }
3477 
3478   static SwitchInst *Create(Value *Value, BasicBlock *Default,
3479                             unsigned NumCases, BasicBlock *InsertAtEnd) {
3480     return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3481   }
3482 
3483   /// Provide fast operand accessors
3484   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3485 
3486   // Accessor Methods for Switch stmt
3487   Value *getCondition() const { return getOperand(0); }
3488   void setCondition(Value *V) { setOperand(0, V); }
3489 
3490   BasicBlock *getDefaultDest() const {
3491     return cast<BasicBlock>(getOperand(1));
3492   }
3493 
3494   void setDefaultDest(BasicBlock *DefaultCase) {
3495     setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3496   }
3497 
3498   /// Return the number of 'cases' in this switch instruction, excluding the
3499   /// default case.
3500   unsigned getNumCases() const {
3501     return getNumOperands()/2 - 1;
3502   }
3503 
3504   /// Returns a read/write iterator that points to the first case in the
3505   /// SwitchInst.
3506   CaseIt case_begin() {
3507     return CaseIt(this, 0);
3508   }
3509 
3510   /// Returns a read-only iterator that points to the first case in the
3511   /// SwitchInst.
3512   ConstCaseIt case_begin() const {
3513     return ConstCaseIt(this, 0);
3514   }
3515 
3516   /// Returns a read/write iterator that points one past the last in the
3517   /// SwitchInst.
3518   CaseIt case_end() {
3519     return CaseIt(this, getNumCases());
3520   }
3521 
3522   /// Returns a read-only iterator that points one past the last in the
3523   /// SwitchInst.
3524   ConstCaseIt case_end() const {
3525     return ConstCaseIt(this, getNumCases());
3526   }
3527 
3528   /// Iteration adapter for range-for loops.
3529   iterator_range<CaseIt> cases() {
3530     return make_range(case_begin(), case_end());
3531   }
3532 
3533   /// Constant iteration adapter for range-for loops.
3534   iterator_range<ConstCaseIt> cases() const {
3535     return make_range(case_begin(), case_end());
3536   }
3537 
3538   /// Returns an iterator that points to the default case.
3539   /// Note: this iterator allows to resolve successor only. Attempt
3540   /// to resolve case value causes an assertion.
3541   /// Also note, that increment and decrement also causes an assertion and
3542   /// makes iterator invalid.
3543   CaseIt case_default() {
3544     return CaseIt(this, DefaultPseudoIndex);
3545   }
3546   ConstCaseIt case_default() const {
3547     return ConstCaseIt(this, DefaultPseudoIndex);
3548   }
3549 
3550   /// Search all of the case values for the specified constant. If it is
3551   /// explicitly handled, return the case iterator of it, otherwise return
3552   /// default case iterator to indicate that it is handled by the default
3553   /// handler.
3554   CaseIt findCaseValue(const ConstantInt *C) {
3555     return CaseIt(
3556         this,
3557         const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3558   }
3559   ConstCaseIt findCaseValue(const ConstantInt *C) const {
3560     ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3561       return Case.getCaseValue() == C;
3562     });
3563     if (I != case_end())
3564       return I;
3565 
3566     return case_default();
3567   }
3568 
3569   /// Finds the unique case value for a given successor. Returns null if the
3570   /// successor is not found, not unique, or is the default case.
3571   ConstantInt *findCaseDest(BasicBlock *BB) {
3572     if (BB == getDefaultDest())
3573       return nullptr;
3574 
3575     ConstantInt *CI = nullptr;
3576     for (auto Case : cases()) {
3577       if (Case.getCaseSuccessor() != BB)
3578         continue;
3579 
3580       if (CI)
3581         return nullptr; // Multiple cases lead to BB.
3582 
3583       CI = Case.getCaseValue();
3584     }
3585 
3586     return CI;
3587   }
3588 
3589   /// Add an entry to the switch instruction.
3590   /// Note:
3591   /// This action invalidates case_end(). Old case_end() iterator will
3592   /// point to the added case.
3593   void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3594 
3595   /// This method removes the specified case and its successor from the switch
3596   /// instruction. Note that this operation may reorder the remaining cases at
3597   /// index idx and above.
3598   /// Note:
3599   /// This action invalidates iterators for all cases following the one removed,
3600   /// including the case_end() iterator. It returns an iterator for the next
3601   /// case.
3602   CaseIt removeCase(CaseIt I);
3603 
3604   unsigned getNumSuccessors() const { return getNumOperands()/2; }
3605   BasicBlock *getSuccessor(unsigned idx) const {
3606     assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3607     return cast<BasicBlock>(getOperand(idx*2+1));
3608   }
3609   void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3610     assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3611     setOperand(idx * 2 + 1, NewSucc);
3612   }
3613 
3614   // Methods for support type inquiry through isa, cast, and dyn_cast:
3615   static bool classof(const Instruction *I) {
3616     return I->getOpcode() == Instruction::Switch;
3617   }
3618   static bool classof(const Value *V) {
3619     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3620   }
3621 };
3622 
3623 /// A wrapper class to simplify modification of SwitchInst cases along with
3624 /// their prof branch_weights metadata.
3625 class SwitchInstProfUpdateWrapper {
3626   SwitchInst &SI;
3627   std::optional<SmallVector<uint32_t, 8>> Weights;
3628   bool Changed = false;
3629 
3630 protected:
3631   MDNode *buildProfBranchWeightsMD();
3632 
3633   void init();
3634 
3635 public:
3636   using CaseWeightOpt = std::optional<uint32_t>;
3637   SwitchInst *operator->() { return &SI; }
3638   SwitchInst &operator*() { return SI; }
3639   operator SwitchInst *() { return &SI; }
3640 
3641   SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3642 
3643   ~SwitchInstProfUpdateWrapper() {
3644     if (Changed)
3645       SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3646   }
3647 
3648   /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3649   /// correspondent branch weight.
3650   SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3651 
3652   /// Delegate the call to the underlying SwitchInst::addCase() and set the
3653   /// specified branch weight for the added case.
3654   void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3655 
3656   /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3657   /// this object to not touch the underlying SwitchInst in destructor.
3658   SymbolTableList<Instruction>::iterator eraseFromParent();
3659 
3660   void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3661   CaseWeightOpt getSuccessorWeight(unsigned idx);
3662 
3663   static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3664 };
3665 
3666 template <>
3667 struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3668 };
3669 
3670 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
3671 
3672 //===----------------------------------------------------------------------===//
3673 //                             IndirectBrInst Class
3674 //===----------------------------------------------------------------------===//
3675 
3676 //===---------------------------------------------------------------------------
3677 /// Indirect Branch Instruction.
3678 ///
3679 class IndirectBrInst : public Instruction {
3680   unsigned ReservedSpace;
3681 
3682   // Operand[0]   = Address to jump to
3683   // Operand[n+1] = n-th destination
3684   IndirectBrInst(const IndirectBrInst &IBI);
3685 
3686   /// Create a new indirectbr instruction, specifying an
3687   /// Address to jump to.  The number of expected destinations can be specified
3688   /// here to make memory allocation more efficient.  This constructor can also
3689   /// autoinsert before another instruction.
3690   IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3691 
3692   /// Create a new indirectbr instruction, specifying an
3693   /// Address to jump to.  The number of expected destinations can be specified
3694   /// here to make memory allocation more efficient.  This constructor also
3695   /// autoinserts at the end of the specified BasicBlock.
3696   IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3697 
3698   // allocate space for exactly zero operands
3699   void *operator new(size_t S) { return User::operator new(S); }
3700 
3701   void init(Value *Address, unsigned NumDests);
3702   void growOperands();
3703 
3704 protected:
3705   // Note: Instruction needs to be a friend here to call cloneImpl.
3706   friend class Instruction;
3707 
3708   IndirectBrInst *cloneImpl() const;
3709 
3710 public:
3711   void operator delete(void *Ptr) { User::operator delete(Ptr); }
3712 
3713   /// Iterator type that casts an operand to a basic block.
3714   ///
3715   /// This only makes sense because the successors are stored as adjacent
3716   /// operands for indirectbr instructions.
3717   struct succ_op_iterator
3718       : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3719                               std::random_access_iterator_tag, BasicBlock *,
3720                               ptrdiff_t, BasicBlock *, BasicBlock *> {
3721     explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3722 
3723     BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3724     BasicBlock *operator->() const { return operator*(); }
3725   };
3726 
3727   /// The const version of `succ_op_iterator`.
3728   struct const_succ_op_iterator
3729       : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3730                               std::random_access_iterator_tag,
3731                               const BasicBlock *, ptrdiff_t, const BasicBlock *,
3732                               const BasicBlock *> {
3733     explicit const_succ_op_iterator(const_value_op_iterator I)
3734         : iterator_adaptor_base(I) {}
3735 
3736     const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3737     const BasicBlock *operator->() const { return operator*(); }
3738   };
3739 
3740   static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3741                                 Instruction *InsertBefore = nullptr) {
3742     return new IndirectBrInst(Address, NumDests, InsertBefore);
3743   }
3744 
3745   static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3746                                 BasicBlock *InsertAtEnd) {
3747     return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3748   }
3749 
3750   /// Provide fast operand accessors.
3751   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3752 
3753   // Accessor Methods for IndirectBrInst instruction.
3754   Value *getAddress() { return getOperand(0); }
3755   const Value *getAddress() const { return getOperand(0); }
3756   void setAddress(Value *V) { setOperand(0, V); }
3757 
3758   /// return the number of possible destinations in this
3759   /// indirectbr instruction.
3760   unsigned getNumDestinations() const { return getNumOperands()-1; }
3761 
3762   /// Return the specified destination.
3763   BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3764   const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3765 
3766   /// Add a destination.
3767   ///
3768   void addDestination(BasicBlock *Dest);
3769 
3770   /// This method removes the specified successor from the
3771   /// indirectbr instruction.
3772   void removeDestination(unsigned i);
3773 
3774   unsigned getNumSuccessors() const { return getNumOperands()-1; }
3775   BasicBlock *getSuccessor(unsigned i) const {
3776     return cast<BasicBlock>(getOperand(i+1));
3777   }
3778   void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3779     setOperand(i + 1, NewSucc);
3780   }
3781 
3782   iterator_range<succ_op_iterator> successors() {
3783     return make_range(succ_op_iterator(std::next(value_op_begin())),
3784                       succ_op_iterator(value_op_end()));
3785   }
3786 
3787   iterator_range<const_succ_op_iterator> successors() const {
3788     return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3789                       const_succ_op_iterator(value_op_end()));
3790   }
3791 
3792   // Methods for support type inquiry through isa, cast, and dyn_cast:
3793   static bool classof(const Instruction *I) {
3794     return I->getOpcode() == Instruction::IndirectBr;
3795   }
3796   static bool classof(const Value *V) {
3797     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3798   }
3799 };
3800 
3801 template <>
3802 struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3803 };
3804 
3805 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
3806 
3807 //===----------------------------------------------------------------------===//
3808 //                               InvokeInst Class
3809 //===----------------------------------------------------------------------===//
3810 
3811 /// Invoke instruction.  The SubclassData field is used to hold the
3812 /// calling convention of the call.
3813 ///
3814 class InvokeInst : public CallBase {
3815   /// The number of operands for this call beyond the called function,
3816   /// arguments, and operand bundles.
3817   static constexpr int NumExtraOperands = 2;
3818 
3819   /// The index from the end of the operand array to the normal destination.
3820   static constexpr int NormalDestOpEndIdx = -3;
3821 
3822   /// The index from the end of the operand array to the unwind destination.
3823   static constexpr int UnwindDestOpEndIdx = -2;
3824 
3825   InvokeInst(const InvokeInst &BI);
3826 
3827   /// Construct an InvokeInst given a range of arguments.
3828   ///
3829   /// Construct an InvokeInst from a range of arguments
3830   inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3831                     BasicBlock *IfException, ArrayRef<Value *> Args,
3832                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3833                     const Twine &NameStr, Instruction *InsertBefore);
3834 
3835   inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3836                     BasicBlock *IfException, ArrayRef<Value *> Args,
3837                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3838                     const Twine &NameStr, BasicBlock *InsertAtEnd);
3839 
3840   void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3841             BasicBlock *IfException, ArrayRef<Value *> Args,
3842             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3843 
3844   /// Compute the number of operands to allocate.
3845   static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3846     // We need one operand for the called function, plus our extra operands and
3847     // the input operand counts provided.
3848     return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3849   }
3850 
3851 protected:
3852   // Note: Instruction needs to be a friend here to call cloneImpl.
3853   friend class Instruction;
3854 
3855   InvokeInst *cloneImpl() const;
3856 
3857 public:
3858   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3859                             BasicBlock *IfException, ArrayRef<Value *> Args,
3860                             const Twine &NameStr,
3861                             Instruction *InsertBefore = nullptr) {
3862     int NumOperands = ComputeNumOperands(Args.size());
3863     return new (NumOperands)
3864         InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3865                    NumOperands, NameStr, InsertBefore);
3866   }
3867 
3868   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3869                             BasicBlock *IfException, ArrayRef<Value *> Args,
3870                             ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3871                             const Twine &NameStr = "",
3872                             Instruction *InsertBefore = nullptr) {
3873     int NumOperands =
3874         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3875     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3876 
3877     return new (NumOperands, DescriptorBytes)
3878         InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3879                    NameStr, InsertBefore);
3880   }
3881 
3882   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3883                             BasicBlock *IfException, ArrayRef<Value *> Args,
3884                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3885     int NumOperands = ComputeNumOperands(Args.size());
3886     return new (NumOperands)
3887         InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3888                    NumOperands, NameStr, InsertAtEnd);
3889   }
3890 
3891   static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3892                             BasicBlock *IfException, ArrayRef<Value *> Args,
3893                             ArrayRef<OperandBundleDef> Bundles,
3894                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3895     int NumOperands =
3896         ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3897     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3898 
3899     return new (NumOperands, DescriptorBytes)
3900         InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3901                    NameStr, InsertAtEnd);
3902   }
3903 
3904   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3905                             BasicBlock *IfException, ArrayRef<Value *> Args,
3906                             const Twine &NameStr,
3907                             Instruction *InsertBefore = nullptr) {
3908     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3909                   IfException, Args, std::nullopt, NameStr, InsertBefore);
3910   }
3911 
3912   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3913                             BasicBlock *IfException, ArrayRef<Value *> Args,
3914                             ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3915                             const Twine &NameStr = "",
3916                             Instruction *InsertBefore = nullptr) {
3917     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3918                   IfException, Args, Bundles, NameStr, InsertBefore);
3919   }
3920 
3921   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3922                             BasicBlock *IfException, ArrayRef<Value *> Args,
3923                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3924     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3925                   IfException, Args, NameStr, InsertAtEnd);
3926   }
3927 
3928   static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3929                             BasicBlock *IfException, ArrayRef<Value *> Args,
3930                             ArrayRef<OperandBundleDef> Bundles,
3931                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
3932     return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3933                   IfException, Args, Bundles, NameStr, InsertAtEnd);
3934   }
3935 
3936   /// Create a clone of \p II with a different set of operand bundles and
3937   /// insert it before \p InsertPt.
3938   ///
3939   /// The returned invoke instruction is identical to \p II in every way except
3940   /// that the operand bundles for the new instruction are set to the operand
3941   /// bundles in \p Bundles.
3942   static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3943                             Instruction *InsertPt = nullptr);
3944 
3945   // get*Dest - Return the destination basic blocks...
3946   BasicBlock *getNormalDest() const {
3947     return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3948   }
3949   BasicBlock *getUnwindDest() const {
3950     return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3951   }
3952   void setNormalDest(BasicBlock *B) {
3953     Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3954   }
3955   void setUnwindDest(BasicBlock *B) {
3956     Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3957   }
3958 
3959   /// Get the landingpad instruction from the landing pad
3960   /// block (the unwind destination).
3961   LandingPadInst *getLandingPadInst() const;
3962 
3963   BasicBlock *getSuccessor(unsigned i) const {
3964     assert(i < 2 && "Successor # out of range for invoke!");
3965     return i == 0 ? getNormalDest() : getUnwindDest();
3966   }
3967 
3968   void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3969     assert(i < 2 && "Successor # out of range for invoke!");
3970     if (i == 0)
3971       setNormalDest(NewSucc);
3972     else
3973       setUnwindDest(NewSucc);
3974   }
3975 
3976   unsigned getNumSuccessors() const { return 2; }
3977 
3978   // Methods for support type inquiry through isa, cast, and dyn_cast:
3979   static bool classof(const Instruction *I) {
3980     return (I->getOpcode() == Instruction::Invoke);
3981   }
3982   static bool classof(const Value *V) {
3983     return isa<Instruction>(V) && classof(cast<Instruction>(V));
3984   }
3985 
3986 private:
3987   // Shadow Instruction::setInstructionSubclassData with a private forwarding
3988   // method so that subclasses cannot accidentally use it.
3989   template <typename Bitfield>
3990   void setSubclassData(typename Bitfield::Type Value) {
3991     Instruction::setSubclassData<Bitfield>(Value);
3992   }
3993 };
3994 
3995 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3996                        BasicBlock *IfException, ArrayRef<Value *> Args,
3997                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3998                        const Twine &NameStr, Instruction *InsertBefore)
3999     : CallBase(Ty->getReturnType(), Instruction::Invoke,
4000                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4001                InsertBefore) {
4002   init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4003 }
4004 
4005 InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4006                        BasicBlock *IfException, ArrayRef<Value *> Args,
4007                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4008                        const Twine &NameStr, BasicBlock *InsertAtEnd)
4009     : CallBase(Ty->getReturnType(), Instruction::Invoke,
4010                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4011                InsertAtEnd) {
4012   init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4013 }
4014 
4015 //===----------------------------------------------------------------------===//
4016 //                              CallBrInst Class
4017 //===----------------------------------------------------------------------===//
4018 
4019 /// CallBr instruction, tracking function calls that may not return control but
4020 /// instead transfer it to a third location. The SubclassData field is used to
4021 /// hold the calling convention of the call.
4022 ///
4023 class CallBrInst : public CallBase {
4024 
4025   unsigned NumIndirectDests;
4026 
4027   CallBrInst(const CallBrInst &BI);
4028 
4029   /// Construct a CallBrInst given a range of arguments.
4030   ///
4031   /// Construct a CallBrInst from a range of arguments
4032   inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4033                     ArrayRef<BasicBlock *> IndirectDests,
4034                     ArrayRef<Value *> Args,
4035                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4036                     const Twine &NameStr, Instruction *InsertBefore);
4037 
4038   inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4039                     ArrayRef<BasicBlock *> IndirectDests,
4040                     ArrayRef<Value *> Args,
4041                     ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4042                     const Twine &NameStr, BasicBlock *InsertAtEnd);
4043 
4044   void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4045             ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4046             ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4047 
4048   /// Compute the number of operands to allocate.
4049   static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4050                                 int NumBundleInputs = 0) {
4051     // We need one operand for the called function, plus our extra operands and
4052     // the input operand counts provided.
4053     return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4054   }
4055 
4056 protected:
4057   // Note: Instruction needs to be a friend here to call cloneImpl.
4058   friend class Instruction;
4059 
4060   CallBrInst *cloneImpl() const;
4061 
4062 public:
4063   static CallBrInst *Create(FunctionType *Ty, Value *Func,
4064                             BasicBlock *DefaultDest,
4065                             ArrayRef<BasicBlock *> IndirectDests,
4066                             ArrayRef<Value *> Args, const Twine &NameStr,
4067                             Instruction *InsertBefore = nullptr) {
4068     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4069     return new (NumOperands)
4070         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4071                    NumOperands, NameStr, InsertBefore);
4072   }
4073 
4074   static CallBrInst *
4075   Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4076          ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4077          ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4078          const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4079     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4080                                          CountBundleInputs(Bundles));
4081     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4082 
4083     return new (NumOperands, DescriptorBytes)
4084         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4085                    NumOperands, NameStr, InsertBefore);
4086   }
4087 
4088   static CallBrInst *Create(FunctionType *Ty, Value *Func,
4089                             BasicBlock *DefaultDest,
4090                             ArrayRef<BasicBlock *> IndirectDests,
4091                             ArrayRef<Value *> Args, const Twine &NameStr,
4092                             BasicBlock *InsertAtEnd) {
4093     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4094     return new (NumOperands)
4095         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4096                    NumOperands, NameStr, InsertAtEnd);
4097   }
4098 
4099   static CallBrInst *Create(FunctionType *Ty, Value *Func,
4100                             BasicBlock *DefaultDest,
4101                             ArrayRef<BasicBlock *> IndirectDests,
4102                             ArrayRef<Value *> Args,
4103                             ArrayRef<OperandBundleDef> Bundles,
4104                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
4105     int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4106                                          CountBundleInputs(Bundles));
4107     unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4108 
4109     return new (NumOperands, DescriptorBytes)
4110         CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4111                    NumOperands, NameStr, InsertAtEnd);
4112   }
4113 
4114   static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4115                             ArrayRef<BasicBlock *> IndirectDests,
4116                             ArrayRef<Value *> Args, const Twine &NameStr,
4117                             Instruction *InsertBefore = nullptr) {
4118     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4119                   IndirectDests, Args, NameStr, InsertBefore);
4120   }
4121 
4122   static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4123                             ArrayRef<BasicBlock *> IndirectDests,
4124                             ArrayRef<Value *> Args,
4125                             ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4126                             const Twine &NameStr = "",
4127                             Instruction *InsertBefore = nullptr) {
4128     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4129                   IndirectDests, Args, Bundles, NameStr, InsertBefore);
4130   }
4131 
4132   static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4133                             ArrayRef<BasicBlock *> IndirectDests,
4134                             ArrayRef<Value *> Args, const Twine &NameStr,
4135                             BasicBlock *InsertAtEnd) {
4136     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4137                   IndirectDests, Args, NameStr, InsertAtEnd);
4138   }
4139 
4140   static CallBrInst *Create(FunctionCallee Func,
4141                             BasicBlock *DefaultDest,
4142                             ArrayRef<BasicBlock *> IndirectDests,
4143                             ArrayRef<Value *> Args,
4144                             ArrayRef<OperandBundleDef> Bundles,
4145                             const Twine &NameStr, BasicBlock *InsertAtEnd) {
4146     return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4147                   IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4148   }
4149 
4150   /// Create a clone of \p CBI with a different set of operand bundles and
4151   /// insert it before \p InsertPt.
4152   ///
4153   /// The returned callbr instruction is identical to \p CBI in every way
4154   /// except that the operand bundles for the new instruction are set to the
4155   /// operand bundles in \p Bundles.
4156   static CallBrInst *Create(CallBrInst *CBI,
4157                             ArrayRef<OperandBundleDef> Bundles,
4158                             Instruction *InsertPt = nullptr);
4159 
4160   /// Return the number of callbr indirect dest labels.
4161   ///
4162   unsigned getNumIndirectDests() const { return NumIndirectDests; }
4163 
4164   /// getIndirectDestLabel - Return the i-th indirect dest label.
4165   ///
4166   Value *getIndirectDestLabel(unsigned i) const {
4167     assert(i < getNumIndirectDests() && "Out of bounds!");
4168     return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4169   }
4170 
4171   Value *getIndirectDestLabelUse(unsigned i) const {
4172     assert(i < getNumIndirectDests() && "Out of bounds!");
4173     return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4174   }
4175 
4176   // Return the destination basic blocks...
4177   BasicBlock *getDefaultDest() const {
4178     return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4179   }
4180   BasicBlock *getIndirectDest(unsigned i) const {
4181     return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4182   }
4183   SmallVector<BasicBlock *, 16> getIndirectDests() const {
4184     SmallVector<BasicBlock *, 16> IndirectDests;
4185     for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4186       IndirectDests.push_back(getIndirectDest(i));
4187     return IndirectDests;
4188   }
4189   void setDefaultDest(BasicBlock *B) {
4190     *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4191   }
4192   void setIndirectDest(unsigned i, BasicBlock *B) {
4193     *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4194   }
4195 
4196   BasicBlock *getSuccessor(unsigned i) const {
4197     assert(i < getNumSuccessors() + 1 &&
4198            "Successor # out of range for callbr!");
4199     return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4200   }
4201 
4202   void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4203     assert(i < getNumIndirectDests() + 1 &&
4204            "Successor # out of range for callbr!");
4205     return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4206   }
4207 
4208   unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4209 
4210   // Methods for support type inquiry through isa, cast, and dyn_cast:
4211   static bool classof(const Instruction *I) {
4212     return (I->getOpcode() == Instruction::CallBr);
4213   }
4214   static bool classof(const Value *V) {
4215     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4216   }
4217 
4218 private:
4219   // Shadow Instruction::setInstructionSubclassData with a private forwarding
4220   // method so that subclasses cannot accidentally use it.
4221   template <typename Bitfield>
4222   void setSubclassData(typename Bitfield::Type Value) {
4223     Instruction::setSubclassData<Bitfield>(Value);
4224   }
4225 };
4226 
4227 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4228                        ArrayRef<BasicBlock *> IndirectDests,
4229                        ArrayRef<Value *> Args,
4230                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4231                        const Twine &NameStr, Instruction *InsertBefore)
4232     : CallBase(Ty->getReturnType(), Instruction::CallBr,
4233                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4234                InsertBefore) {
4235   init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4236 }
4237 
4238 CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4239                        ArrayRef<BasicBlock *> IndirectDests,
4240                        ArrayRef<Value *> Args,
4241                        ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4242                        const Twine &NameStr, BasicBlock *InsertAtEnd)
4243     : CallBase(Ty->getReturnType(), Instruction::CallBr,
4244                OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4245                InsertAtEnd) {
4246   init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4247 }
4248 
4249 //===----------------------------------------------------------------------===//
4250 //                              ResumeInst Class
4251 //===----------------------------------------------------------------------===//
4252 
4253 //===---------------------------------------------------------------------------
4254 /// Resume the propagation of an exception.
4255 ///
4256 class ResumeInst : public Instruction {
4257   ResumeInst(const ResumeInst &RI);
4258 
4259   explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4260   ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4261 
4262 protected:
4263   // Note: Instruction needs to be a friend here to call cloneImpl.
4264   friend class Instruction;
4265 
4266   ResumeInst *cloneImpl() const;
4267 
4268 public:
4269   static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4270     return new(1) ResumeInst(Exn, InsertBefore);
4271   }
4272 
4273   static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4274     return new(1) ResumeInst(Exn, InsertAtEnd);
4275   }
4276 
4277   /// Provide fast operand accessors
4278   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4279 
4280   /// Convenience accessor.
4281   Value *getValue() const { return Op<0>(); }
4282 
4283   unsigned getNumSuccessors() const { return 0; }
4284 
4285   // Methods for support type inquiry through isa, cast, and dyn_cast:
4286   static bool classof(const Instruction *I) {
4287     return I->getOpcode() == Instruction::Resume;
4288   }
4289   static bool classof(const Value *V) {
4290     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4291   }
4292 
4293 private:
4294   BasicBlock *getSuccessor(unsigned idx) const {
4295     llvm_unreachable("ResumeInst has no successors!");
4296   }
4297 
4298   void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4299     llvm_unreachable("ResumeInst has no successors!");
4300   }
4301 };
4302 
4303 template <>
4304 struct OperandTraits<ResumeInst> :
4305     public FixedNumOperandTraits<ResumeInst, 1> {
4306 };
4307 
4308 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
4309 
4310 //===----------------------------------------------------------------------===//
4311 //                         CatchSwitchInst Class
4312 //===----------------------------------------------------------------------===//
4313 class CatchSwitchInst : public Instruction {
4314   using UnwindDestField = BoolBitfieldElementT<0>;
4315 
4316   /// The number of operands actually allocated.  NumOperands is
4317   /// the number actually in use.
4318   unsigned ReservedSpace;
4319 
4320   // Operand[0] = Outer scope
4321   // Operand[1] = Unwind block destination
4322   // Operand[n] = BasicBlock to go to on match
4323   CatchSwitchInst(const CatchSwitchInst &CSI);
4324 
4325   /// Create a new switch instruction, specifying a
4326   /// default destination.  The number of additional handlers can be specified
4327   /// here to make memory allocation more efficient.
4328   /// This constructor can also autoinsert before another instruction.
4329   CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4330                   unsigned NumHandlers, const Twine &NameStr,
4331                   Instruction *InsertBefore);
4332 
4333   /// Create a new switch instruction, specifying a
4334   /// default destination.  The number of additional handlers can be specified
4335   /// here to make memory allocation more efficient.
4336   /// This constructor also autoinserts at the end of the specified BasicBlock.
4337   CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4338                   unsigned NumHandlers, const Twine &NameStr,
4339                   BasicBlock *InsertAtEnd);
4340 
4341   // allocate space for exactly zero operands
4342   void *operator new(size_t S) { return User::operator new(S); }
4343 
4344   void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4345   void growOperands(unsigned Size);
4346 
4347 protected:
4348   // Note: Instruction needs to be a friend here to call cloneImpl.
4349   friend class Instruction;
4350 
4351   CatchSwitchInst *cloneImpl() const;
4352 
4353 public:
4354   void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4355 
4356   static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4357                                  unsigned NumHandlers,
4358                                  const Twine &NameStr = "",
4359                                  Instruction *InsertBefore = nullptr) {
4360     return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4361                                InsertBefore);
4362   }
4363 
4364   static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4365                                  unsigned NumHandlers, const Twine &NameStr,
4366                                  BasicBlock *InsertAtEnd) {
4367     return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4368                                InsertAtEnd);
4369   }
4370 
4371   /// Provide fast operand accessors
4372   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4373 
4374   // Accessor Methods for CatchSwitch stmt
4375   Value *getParentPad() const { return getOperand(0); }
4376   void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4377 
4378   // Accessor Methods for CatchSwitch stmt
4379   bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4380   bool unwindsToCaller() const { return !hasUnwindDest(); }
4381   BasicBlock *getUnwindDest() const {
4382     if (hasUnwindDest())
4383       return cast<BasicBlock>(getOperand(1));
4384     return nullptr;
4385   }
4386   void setUnwindDest(BasicBlock *UnwindDest) {
4387     assert(UnwindDest);
4388     assert(hasUnwindDest());
4389     setOperand(1, UnwindDest);
4390   }
4391 
4392   /// return the number of 'handlers' in this catchswitch
4393   /// instruction, except the default handler
4394   unsigned getNumHandlers() const {
4395     if (hasUnwindDest())
4396       return getNumOperands() - 2;
4397     return getNumOperands() - 1;
4398   }
4399 
4400 private:
4401   static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4402   static const BasicBlock *handler_helper(const Value *V) {
4403     return cast<BasicBlock>(V);
4404   }
4405 
4406 public:
4407   using DerefFnTy = BasicBlock *(*)(Value *);
4408   using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4409   using handler_range = iterator_range<handler_iterator>;
4410   using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4411   using const_handler_iterator =
4412       mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4413   using const_handler_range = iterator_range<const_handler_iterator>;
4414 
4415   /// Returns an iterator that points to the first handler in CatchSwitchInst.
4416   handler_iterator handler_begin() {
4417     op_iterator It = op_begin() + 1;
4418     if (hasUnwindDest())
4419       ++It;
4420     return handler_iterator(It, DerefFnTy(handler_helper));
4421   }
4422 
4423   /// Returns an iterator that points to the first handler in the
4424   /// CatchSwitchInst.
4425   const_handler_iterator handler_begin() const {
4426     const_op_iterator It = op_begin() + 1;
4427     if (hasUnwindDest())
4428       ++It;
4429     return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4430   }
4431 
4432   /// Returns a read-only iterator that points one past the last
4433   /// handler in the CatchSwitchInst.
4434   handler_iterator handler_end() {
4435     return handler_iterator(op_end(), DerefFnTy(handler_helper));
4436   }
4437 
4438   /// Returns an iterator that points one past the last handler in the
4439   /// CatchSwitchInst.
4440   const_handler_iterator handler_end() const {
4441     return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4442   }
4443 
4444   /// iteration adapter for range-for loops.
4445   handler_range handlers() {
4446     return make_range(handler_begin(), handler_end());
4447   }
4448 
4449   /// iteration adapter for range-for loops.
4450   const_handler_range handlers() const {
4451     return make_range(handler_begin(), handler_end());
4452   }
4453 
4454   /// Add an entry to the switch instruction...
4455   /// Note:
4456   /// This action invalidates handler_end(). Old handler_end() iterator will
4457   /// point to the added handler.
4458   void addHandler(BasicBlock *Dest);
4459 
4460   void removeHandler(handler_iterator HI);
4461 
4462   unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4463   BasicBlock *getSuccessor(unsigned Idx) const {
4464     assert(Idx < getNumSuccessors() &&
4465            "Successor # out of range for catchswitch!");
4466     return cast<BasicBlock>(getOperand(Idx + 1));
4467   }
4468   void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4469     assert(Idx < getNumSuccessors() &&
4470            "Successor # out of range for catchswitch!");
4471     setOperand(Idx + 1, NewSucc);
4472   }
4473 
4474   // Methods for support type inquiry through isa, cast, and dyn_cast:
4475   static bool classof(const Instruction *I) {
4476     return I->getOpcode() == Instruction::CatchSwitch;
4477   }
4478   static bool classof(const Value *V) {
4479     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4480   }
4481 };
4482 
4483 template <>
4484 struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4485 
4486 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)
4487 
4488 //===----------------------------------------------------------------------===//
4489 //                               CleanupPadInst Class
4490 //===----------------------------------------------------------------------===//
4491 class CleanupPadInst : public FuncletPadInst {
4492 private:
4493   explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4494                           unsigned Values, const Twine &NameStr,
4495                           Instruction *InsertBefore)
4496       : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4497                        NameStr, InsertBefore) {}
4498   explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4499                           unsigned Values, const Twine &NameStr,
4500                           BasicBlock *InsertAtEnd)
4501       : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4502                        NameStr, InsertAtEnd) {}
4503 
4504 public:
4505   static CleanupPadInst *Create(Value *ParentPad,
4506                                 ArrayRef<Value *> Args = std::nullopt,
4507                                 const Twine &NameStr = "",
4508                                 Instruction *InsertBefore = nullptr) {
4509     unsigned Values = 1 + Args.size();
4510     return new (Values)
4511         CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4512   }
4513 
4514   static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4515                                 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4516     unsigned Values = 1 + Args.size();
4517     return new (Values)
4518         CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4519   }
4520 
4521   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4522   static bool classof(const Instruction *I) {
4523     return I->getOpcode() == Instruction::CleanupPad;
4524   }
4525   static bool classof(const Value *V) {
4526     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4527   }
4528 };
4529 
4530 //===----------------------------------------------------------------------===//
4531 //                               CatchPadInst Class
4532 //===----------------------------------------------------------------------===//
4533 class CatchPadInst : public FuncletPadInst {
4534 private:
4535   explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4536                         unsigned Values, const Twine &NameStr,
4537                         Instruction *InsertBefore)
4538       : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4539                        NameStr, InsertBefore) {}
4540   explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4541                         unsigned Values, const Twine &NameStr,
4542                         BasicBlock *InsertAtEnd)
4543       : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4544                        NameStr, InsertAtEnd) {}
4545 
4546 public:
4547   static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4548                               const Twine &NameStr = "",
4549                               Instruction *InsertBefore = nullptr) {
4550     unsigned Values = 1 + Args.size();
4551     return new (Values)
4552         CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4553   }
4554 
4555   static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4556                               const Twine &NameStr, BasicBlock *InsertAtEnd) {
4557     unsigned Values = 1 + Args.size();
4558     return new (Values)
4559         CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4560   }
4561 
4562   /// Convenience accessors
4563   CatchSwitchInst *getCatchSwitch() const {
4564     return cast<CatchSwitchInst>(Op<-1>());
4565   }
4566   void setCatchSwitch(Value *CatchSwitch) {
4567     assert(CatchSwitch);
4568     Op<-1>() = CatchSwitch;
4569   }
4570 
4571   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4572   static bool classof(const Instruction *I) {
4573     return I->getOpcode() == Instruction::CatchPad;
4574   }
4575   static bool classof(const Value *V) {
4576     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4577   }
4578 };
4579 
4580 //===----------------------------------------------------------------------===//
4581 //                               CatchReturnInst Class
4582 //===----------------------------------------------------------------------===//
4583 
4584 class CatchReturnInst : public Instruction {
4585   CatchReturnInst(const CatchReturnInst &RI);
4586   CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4587   CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4588 
4589   void init(Value *CatchPad, BasicBlock *BB);
4590 
4591 protected:
4592   // Note: Instruction needs to be a friend here to call cloneImpl.
4593   friend class Instruction;
4594 
4595   CatchReturnInst *cloneImpl() const;
4596 
4597 public:
4598   static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4599                                  Instruction *InsertBefore = nullptr) {
4600     assert(CatchPad);
4601     assert(BB);
4602     return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4603   }
4604 
4605   static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4606                                  BasicBlock *InsertAtEnd) {
4607     assert(CatchPad);
4608     assert(BB);
4609     return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4610   }
4611 
4612   /// Provide fast operand accessors
4613   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4614 
4615   /// Convenience accessors.
4616   CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4617   void setCatchPad(CatchPadInst *CatchPad) {
4618     assert(CatchPad);
4619     Op<0>() = CatchPad;
4620   }
4621 
4622   BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4623   void setSuccessor(BasicBlock *NewSucc) {
4624     assert(NewSucc);
4625     Op<1>() = NewSucc;
4626   }
4627   unsigned getNumSuccessors() const { return 1; }
4628 
4629   /// Get the parentPad of this catchret's catchpad's catchswitch.
4630   /// The successor block is implicitly a member of this funclet.
4631   Value *getCatchSwitchParentPad() const {
4632     return getCatchPad()->getCatchSwitch()->getParentPad();
4633   }
4634 
4635   // Methods for support type inquiry through isa, cast, and dyn_cast:
4636   static bool classof(const Instruction *I) {
4637     return (I->getOpcode() == Instruction::CatchRet);
4638   }
4639   static bool classof(const Value *V) {
4640     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4641   }
4642 
4643 private:
4644   BasicBlock *getSuccessor(unsigned Idx) const {
4645     assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4646     return getSuccessor();
4647   }
4648 
4649   void setSuccessor(unsigned Idx, BasicBlock *B) {
4650     assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4651     setSuccessor(B);
4652   }
4653 };
4654 
4655 template <>
4656 struct OperandTraits<CatchReturnInst>
4657     : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4658 
4659 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
4660 
4661 //===----------------------------------------------------------------------===//
4662 //                               CleanupReturnInst Class
4663 //===----------------------------------------------------------------------===//
4664 
4665 class CleanupReturnInst : public Instruction {
4666   using UnwindDestField = BoolBitfieldElementT<0>;
4667 
4668 private:
4669   CleanupReturnInst(const CleanupReturnInst &RI);
4670   CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4671                     Instruction *InsertBefore = nullptr);
4672   CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4673                     BasicBlock *InsertAtEnd);
4674 
4675   void init(Value *CleanupPad, BasicBlock *UnwindBB);
4676 
4677 protected:
4678   // Note: Instruction needs to be a friend here to call cloneImpl.
4679   friend class Instruction;
4680 
4681   CleanupReturnInst *cloneImpl() const;
4682 
4683 public:
4684   static CleanupReturnInst *Create(Value *CleanupPad,
4685                                    BasicBlock *UnwindBB = nullptr,
4686                                    Instruction *InsertBefore = nullptr) {
4687     assert(CleanupPad);
4688     unsigned Values = 1;
4689     if (UnwindBB)
4690       ++Values;
4691     return new (Values)
4692         CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4693   }
4694 
4695   static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4696                                    BasicBlock *InsertAtEnd) {
4697     assert(CleanupPad);
4698     unsigned Values = 1;
4699     if (UnwindBB)
4700       ++Values;
4701     return new (Values)
4702         CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4703   }
4704 
4705   /// Provide fast operand accessors
4706   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4707 
4708   bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4709   bool unwindsToCaller() const { return !hasUnwindDest(); }
4710 
4711   /// Convenience accessor.
4712   CleanupPadInst *getCleanupPad() const {
4713     return cast<CleanupPadInst>(Op<0>());
4714   }
4715   void setCleanupPad(CleanupPadInst *CleanupPad) {
4716     assert(CleanupPad);
4717     Op<0>() = CleanupPad;
4718   }
4719 
4720   unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4721 
4722   BasicBlock *getUnwindDest() const {
4723     return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4724   }
4725   void setUnwindDest(BasicBlock *NewDest) {
4726     assert(NewDest);
4727     assert(hasUnwindDest());
4728     Op<1>() = NewDest;
4729   }
4730 
4731   // Methods for support type inquiry through isa, cast, and dyn_cast:
4732   static bool classof(const Instruction *I) {
4733     return (I->getOpcode() == Instruction::CleanupRet);
4734   }
4735   static bool classof(const Value *V) {
4736     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4737   }
4738 
4739 private:
4740   BasicBlock *getSuccessor(unsigned Idx) const {
4741     assert(Idx == 0);
4742     return getUnwindDest();
4743   }
4744 
4745   void setSuccessor(unsigned Idx, BasicBlock *B) {
4746     assert(Idx == 0);
4747     setUnwindDest(B);
4748   }
4749 
4750   // Shadow Instruction::setInstructionSubclassData with a private forwarding
4751   // method so that subclasses cannot accidentally use it.
4752   template <typename Bitfield>
4753   void setSubclassData(typename Bitfield::Type Value) {
4754     Instruction::setSubclassData<Bitfield>(Value);
4755   }
4756 };
4757 
4758 template <>
4759 struct OperandTraits<CleanupReturnInst>
4760     : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4761 
4762 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
4763 
4764 //===----------------------------------------------------------------------===//
4765 //                           UnreachableInst Class
4766 //===----------------------------------------------------------------------===//
4767 
4768 //===---------------------------------------------------------------------------
4769 /// This function has undefined behavior.  In particular, the
4770 /// presence of this instruction indicates some higher level knowledge that the
4771 /// end of the block cannot be reached.
4772 ///
4773 class UnreachableInst : public Instruction {
4774 protected:
4775   // Note: Instruction needs to be a friend here to call cloneImpl.
4776   friend class Instruction;
4777 
4778   UnreachableInst *cloneImpl() const;
4779 
4780 public:
4781   explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4782   explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4783 
4784   // allocate space for exactly zero operands
4785   void *operator new(size_t S) { return User::operator new(S, 0); }
4786   void operator delete(void *Ptr) { User::operator delete(Ptr); }
4787 
4788   unsigned getNumSuccessors() const { return 0; }
4789 
4790   // Methods for support type inquiry through isa, cast, and dyn_cast:
4791   static bool classof(const Instruction *I) {
4792     return I->getOpcode() == Instruction::Unreachable;
4793   }
4794   static bool classof(const Value *V) {
4795     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4796   }
4797 
4798 private:
4799   BasicBlock *getSuccessor(unsigned idx) const {
4800     llvm_unreachable("UnreachableInst has no successors!");
4801   }
4802 
4803   void setSuccessor(unsigned idx, BasicBlock *B) {
4804     llvm_unreachable("UnreachableInst has no successors!");
4805   }
4806 };
4807 
4808 //===----------------------------------------------------------------------===//
4809 //                                 TruncInst Class
4810 //===----------------------------------------------------------------------===//
4811 
4812 /// This class represents a truncation of integer types.
4813 class TruncInst : public CastInst {
4814 protected:
4815   // Note: Instruction needs to be a friend here to call cloneImpl.
4816   friend class Instruction;
4817 
4818   /// Clone an identical TruncInst
4819   TruncInst *cloneImpl() const;
4820 
4821 public:
4822   /// Constructor with insert-before-instruction semantics
4823   TruncInst(
4824     Value *S,                           ///< The value to be truncated
4825     Type *Ty,                           ///< The (smaller) type to truncate to
4826     const Twine &NameStr = "",          ///< A name for the new instruction
4827     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4828   );
4829 
4830   /// Constructor with insert-at-end-of-block semantics
4831   TruncInst(
4832     Value *S,                     ///< The value to be truncated
4833     Type *Ty,                     ///< The (smaller) type to truncate to
4834     const Twine &NameStr,         ///< A name for the new instruction
4835     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4836   );
4837 
4838   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4839   static bool classof(const Instruction *I) {
4840     return I->getOpcode() == Trunc;
4841   }
4842   static bool classof(const Value *V) {
4843     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4844   }
4845 };
4846 
4847 //===----------------------------------------------------------------------===//
4848 //                                 ZExtInst Class
4849 //===----------------------------------------------------------------------===//
4850 
4851 /// This class represents zero extension of integer types.
4852 class ZExtInst : public CastInst {
4853 protected:
4854   // Note: Instruction needs to be a friend here to call cloneImpl.
4855   friend class Instruction;
4856 
4857   /// Clone an identical ZExtInst
4858   ZExtInst *cloneImpl() const;
4859 
4860 public:
4861   /// Constructor with insert-before-instruction semantics
4862   ZExtInst(
4863     Value *S,                           ///< The value to be zero extended
4864     Type *Ty,                           ///< The type to zero extend to
4865     const Twine &NameStr = "",          ///< A name for the new instruction
4866     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4867   );
4868 
4869   /// Constructor with insert-at-end semantics.
4870   ZExtInst(
4871     Value *S,                     ///< The value to be zero extended
4872     Type *Ty,                     ///< The type to zero extend to
4873     const Twine &NameStr,         ///< A name for the new instruction
4874     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4875   );
4876 
4877   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4878   static bool classof(const Instruction *I) {
4879     return I->getOpcode() == ZExt;
4880   }
4881   static bool classof(const Value *V) {
4882     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4883   }
4884 };
4885 
4886 //===----------------------------------------------------------------------===//
4887 //                                 SExtInst Class
4888 //===----------------------------------------------------------------------===//
4889 
4890 /// This class represents a sign extension of integer types.
4891 class SExtInst : public CastInst {
4892 protected:
4893   // Note: Instruction needs to be a friend here to call cloneImpl.
4894   friend class Instruction;
4895 
4896   /// Clone an identical SExtInst
4897   SExtInst *cloneImpl() const;
4898 
4899 public:
4900   /// Constructor with insert-before-instruction semantics
4901   SExtInst(
4902     Value *S,                           ///< The value to be sign extended
4903     Type *Ty,                           ///< The type to sign extend to
4904     const Twine &NameStr = "",          ///< A name for the new instruction
4905     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4906   );
4907 
4908   /// Constructor with insert-at-end-of-block semantics
4909   SExtInst(
4910     Value *S,                     ///< The value to be sign extended
4911     Type *Ty,                     ///< The type to sign extend to
4912     const Twine &NameStr,         ///< A name for the new instruction
4913     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4914   );
4915 
4916   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4917   static bool classof(const Instruction *I) {
4918     return I->getOpcode() == SExt;
4919   }
4920   static bool classof(const Value *V) {
4921     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4922   }
4923 };
4924 
4925 //===----------------------------------------------------------------------===//
4926 //                                 FPTruncInst Class
4927 //===----------------------------------------------------------------------===//
4928 
4929 /// This class represents a truncation of floating point types.
4930 class FPTruncInst : public CastInst {
4931 protected:
4932   // Note: Instruction needs to be a friend here to call cloneImpl.
4933   friend class Instruction;
4934 
4935   /// Clone an identical FPTruncInst
4936   FPTruncInst *cloneImpl() const;
4937 
4938 public:
4939   /// Constructor with insert-before-instruction semantics
4940   FPTruncInst(
4941     Value *S,                           ///< The value to be truncated
4942     Type *Ty,                           ///< The type to truncate to
4943     const Twine &NameStr = "",          ///< A name for the new instruction
4944     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4945   );
4946 
4947   /// Constructor with insert-before-instruction semantics
4948   FPTruncInst(
4949     Value *S,                     ///< The value to be truncated
4950     Type *Ty,                     ///< The type to truncate to
4951     const Twine &NameStr,         ///< A name for the new instruction
4952     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4953   );
4954 
4955   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4956   static bool classof(const Instruction *I) {
4957     return I->getOpcode() == FPTrunc;
4958   }
4959   static bool classof(const Value *V) {
4960     return isa<Instruction>(V) && classof(cast<Instruction>(V));
4961   }
4962 };
4963 
4964 //===----------------------------------------------------------------------===//
4965 //                                 FPExtInst Class
4966 //===----------------------------------------------------------------------===//
4967 
4968 /// This class represents an extension of floating point types.
4969 class FPExtInst : public CastInst {
4970 protected:
4971   // Note: Instruction needs to be a friend here to call cloneImpl.
4972   friend class Instruction;
4973 
4974   /// Clone an identical FPExtInst
4975   FPExtInst *cloneImpl() const;
4976 
4977 public:
4978   /// Constructor with insert-before-instruction semantics
4979   FPExtInst(
4980     Value *S,                           ///< The value to be extended
4981     Type *Ty,                           ///< The type to extend to
4982     const Twine &NameStr = "",          ///< A name for the new instruction
4983     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4984   );
4985 
4986   /// Constructor with insert-at-end-of-block semantics
4987   FPExtInst(
4988     Value *S,                     ///< The value to be extended
4989     Type *Ty,                     ///< The type to extend to
4990     const Twine &NameStr,         ///< A name for the new instruction
4991     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
4992   );
4993 
4994   /// Methods for support type inquiry through isa, cast, and dyn_cast:
4995   static bool classof(const Instruction *I) {
4996     return I->getOpcode() == FPExt;
4997   }
4998   static bool classof(const Value *V) {
4999     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5000   }
5001 };
5002 
5003 //===----------------------------------------------------------------------===//
5004 //                                 UIToFPInst Class
5005 //===----------------------------------------------------------------------===//
5006 
5007 /// This class represents a cast unsigned integer to floating point.
5008 class UIToFPInst : public CastInst {
5009 protected:
5010   // Note: Instruction needs to be a friend here to call cloneImpl.
5011   friend class Instruction;
5012 
5013   /// Clone an identical UIToFPInst
5014   UIToFPInst *cloneImpl() const;
5015 
5016 public:
5017   /// Constructor with insert-before-instruction semantics
5018   UIToFPInst(
5019     Value *S,                           ///< The value to be converted
5020     Type *Ty,                           ///< The type to convert to
5021     const Twine &NameStr = "",          ///< A name for the new instruction
5022     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5023   );
5024 
5025   /// Constructor with insert-at-end-of-block semantics
5026   UIToFPInst(
5027     Value *S,                     ///< The value to be converted
5028     Type *Ty,                     ///< The type to convert to
5029     const Twine &NameStr,         ///< A name for the new instruction
5030     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5031   );
5032 
5033   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5034   static bool classof(const Instruction *I) {
5035     return I->getOpcode() == UIToFP;
5036   }
5037   static bool classof(const Value *V) {
5038     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5039   }
5040 };
5041 
5042 //===----------------------------------------------------------------------===//
5043 //                                 SIToFPInst Class
5044 //===----------------------------------------------------------------------===//
5045 
5046 /// This class represents a cast from signed integer to floating point.
5047 class SIToFPInst : public CastInst {
5048 protected:
5049   // Note: Instruction needs to be a friend here to call cloneImpl.
5050   friend class Instruction;
5051 
5052   /// Clone an identical SIToFPInst
5053   SIToFPInst *cloneImpl() const;
5054 
5055 public:
5056   /// Constructor with insert-before-instruction semantics
5057   SIToFPInst(
5058     Value *S,                           ///< The value to be converted
5059     Type *Ty,                           ///< The type to convert to
5060     const Twine &NameStr = "",          ///< A name for the new instruction
5061     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5062   );
5063 
5064   /// Constructor with insert-at-end-of-block semantics
5065   SIToFPInst(
5066     Value *S,                     ///< The value to be converted
5067     Type *Ty,                     ///< The type to convert to
5068     const Twine &NameStr,         ///< A name for the new instruction
5069     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5070   );
5071 
5072   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5073   static bool classof(const Instruction *I) {
5074     return I->getOpcode() == SIToFP;
5075   }
5076   static bool classof(const Value *V) {
5077     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5078   }
5079 };
5080 
5081 //===----------------------------------------------------------------------===//
5082 //                                 FPToUIInst Class
5083 //===----------------------------------------------------------------------===//
5084 
5085 /// This class represents a cast from floating point to unsigned integer
5086 class FPToUIInst  : public CastInst {
5087 protected:
5088   // Note: Instruction needs to be a friend here to call cloneImpl.
5089   friend class Instruction;
5090 
5091   /// Clone an identical FPToUIInst
5092   FPToUIInst *cloneImpl() const;
5093 
5094 public:
5095   /// Constructor with insert-before-instruction semantics
5096   FPToUIInst(
5097     Value *S,                           ///< The value to be converted
5098     Type *Ty,                           ///< The type to convert to
5099     const Twine &NameStr = "",          ///< A name for the new instruction
5100     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5101   );
5102 
5103   /// Constructor with insert-at-end-of-block semantics
5104   FPToUIInst(
5105     Value *S,                     ///< The value to be converted
5106     Type *Ty,                     ///< The type to convert to
5107     const Twine &NameStr,         ///< A name for the new instruction
5108     BasicBlock *InsertAtEnd       ///< Where to insert the new instruction
5109   );
5110 
5111   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5112   static bool classof(const Instruction *I) {
5113     return I->getOpcode() == FPToUI;
5114   }
5115   static bool classof(const Value *V) {
5116     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5117   }
5118 };
5119 
5120 //===----------------------------------------------------------------------===//
5121 //                                 FPToSIInst Class
5122 //===----------------------------------------------------------------------===//
5123 
5124 /// This class represents a cast from floating point to signed integer.
5125 class FPToSIInst  : public CastInst {
5126 protected:
5127   // Note: Instruction needs to be a friend here to call cloneImpl.
5128   friend class Instruction;
5129 
5130   /// Clone an identical FPToSIInst
5131   FPToSIInst *cloneImpl() const;
5132 
5133 public:
5134   /// Constructor with insert-before-instruction semantics
5135   FPToSIInst(
5136     Value *S,                           ///< The value to be converted
5137     Type *Ty,                           ///< The type to convert to
5138     const Twine &NameStr = "",          ///< A name for the new instruction
5139     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5140   );
5141 
5142   /// Constructor with insert-at-end-of-block semantics
5143   FPToSIInst(
5144     Value *S,                     ///< The value to be converted
5145     Type *Ty,                     ///< The type to convert to
5146     const Twine &NameStr,         ///< A name for the new instruction
5147     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5148   );
5149 
5150   /// Methods for support type inquiry through isa, cast, and dyn_cast:
5151   static bool classof(const Instruction *I) {
5152     return I->getOpcode() == FPToSI;
5153   }
5154   static bool classof(const Value *V) {
5155     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5156   }
5157 };
5158 
5159 //===----------------------------------------------------------------------===//
5160 //                                 IntToPtrInst Class
5161 //===----------------------------------------------------------------------===//
5162 
5163 /// This class represents a cast from an integer to a pointer.
5164 class IntToPtrInst : public CastInst {
5165 public:
5166   // Note: Instruction needs to be a friend here to call cloneImpl.
5167   friend class Instruction;
5168 
5169   /// Constructor with insert-before-instruction semantics
5170   IntToPtrInst(
5171     Value *S,                           ///< The value to be converted
5172     Type *Ty,                           ///< The type to convert to
5173     const Twine &NameStr = "",          ///< A name for the new instruction
5174     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5175   );
5176 
5177   /// Constructor with insert-at-end-of-block semantics
5178   IntToPtrInst(
5179     Value *S,                     ///< The value to be converted
5180     Type *Ty,                     ///< The type to convert to
5181     const Twine &NameStr,         ///< A name for the new instruction
5182     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5183   );
5184 
5185   /// Clone an identical IntToPtrInst.
5186   IntToPtrInst *cloneImpl() const;
5187 
5188   /// Returns the address space of this instruction's pointer type.
5189   unsigned getAddressSpace() const {
5190     return getType()->getPointerAddressSpace();
5191   }
5192 
5193   // Methods for support type inquiry through isa, cast, and dyn_cast:
5194   static bool classof(const Instruction *I) {
5195     return I->getOpcode() == IntToPtr;
5196   }
5197   static bool classof(const Value *V) {
5198     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5199   }
5200 };
5201 
5202 //===----------------------------------------------------------------------===//
5203 //                                 PtrToIntInst Class
5204 //===----------------------------------------------------------------------===//
5205 
5206 /// This class represents a cast from a pointer to an integer.
5207 class PtrToIntInst : public CastInst {
5208 protected:
5209   // Note: Instruction needs to be a friend here to call cloneImpl.
5210   friend class Instruction;
5211 
5212   /// Clone an identical PtrToIntInst.
5213   PtrToIntInst *cloneImpl() const;
5214 
5215 public:
5216   /// Constructor with insert-before-instruction semantics
5217   PtrToIntInst(
5218     Value *S,                           ///< The value to be converted
5219     Type *Ty,                           ///< The type to convert to
5220     const Twine &NameStr = "",          ///< A name for the new instruction
5221     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5222   );
5223 
5224   /// Constructor with insert-at-end-of-block semantics
5225   PtrToIntInst(
5226     Value *S,                     ///< The value to be converted
5227     Type *Ty,                     ///< The type to convert to
5228     const Twine &NameStr,         ///< A name for the new instruction
5229     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5230   );
5231 
5232   /// Gets the pointer operand.
5233   Value *getPointerOperand() { return getOperand(0); }
5234   /// Gets the pointer operand.
5235   const Value *getPointerOperand() const { return getOperand(0); }
5236   /// Gets the operand index of the pointer operand.
5237   static unsigned getPointerOperandIndex() { return 0U; }
5238 
5239   /// Returns the address space of the pointer operand.
5240   unsigned getPointerAddressSpace() const {
5241     return getPointerOperand()->getType()->getPointerAddressSpace();
5242   }
5243 
5244   // Methods for support type inquiry through isa, cast, and dyn_cast:
5245   static bool classof(const Instruction *I) {
5246     return I->getOpcode() == PtrToInt;
5247   }
5248   static bool classof(const Value *V) {
5249     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5250   }
5251 };
5252 
5253 //===----------------------------------------------------------------------===//
5254 //                             BitCastInst Class
5255 //===----------------------------------------------------------------------===//
5256 
5257 /// This class represents a no-op cast from one type to another.
5258 class BitCastInst : public CastInst {
5259 protected:
5260   // Note: Instruction needs to be a friend here to call cloneImpl.
5261   friend class Instruction;
5262 
5263   /// Clone an identical BitCastInst.
5264   BitCastInst *cloneImpl() const;
5265 
5266 public:
5267   /// Constructor with insert-before-instruction semantics
5268   BitCastInst(
5269     Value *S,                           ///< The value to be casted
5270     Type *Ty,                           ///< The type to casted to
5271     const Twine &NameStr = "",          ///< A name for the new instruction
5272     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5273   );
5274 
5275   /// Constructor with insert-at-end-of-block semantics
5276   BitCastInst(
5277     Value *S,                     ///< The value to be casted
5278     Type *Ty,                     ///< The type to casted to
5279     const Twine &NameStr,         ///< A name for the new instruction
5280     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5281   );
5282 
5283   // Methods for support type inquiry through isa, cast, and dyn_cast:
5284   static bool classof(const Instruction *I) {
5285     return I->getOpcode() == BitCast;
5286   }
5287   static bool classof(const Value *V) {
5288     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5289   }
5290 };
5291 
5292 //===----------------------------------------------------------------------===//
5293 //                          AddrSpaceCastInst Class
5294 //===----------------------------------------------------------------------===//
5295 
5296 /// This class represents a conversion between pointers from one address space
5297 /// to another.
5298 class AddrSpaceCastInst : public CastInst {
5299 protected:
5300   // Note: Instruction needs to be a friend here to call cloneImpl.
5301   friend class Instruction;
5302 
5303   /// Clone an identical AddrSpaceCastInst.
5304   AddrSpaceCastInst *cloneImpl() const;
5305 
5306 public:
5307   /// Constructor with insert-before-instruction semantics
5308   AddrSpaceCastInst(
5309     Value *S,                           ///< The value to be casted
5310     Type *Ty,                           ///< The type to casted to
5311     const Twine &NameStr = "",          ///< A name for the new instruction
5312     Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5313   );
5314 
5315   /// Constructor with insert-at-end-of-block semantics
5316   AddrSpaceCastInst(
5317     Value *S,                     ///< The value to be casted
5318     Type *Ty,                     ///< The type to casted to
5319     const Twine &NameStr,         ///< A name for the new instruction
5320     BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
5321   );
5322 
5323   // Methods for support type inquiry through isa, cast, and dyn_cast:
5324   static bool classof(const Instruction *I) {
5325     return I->getOpcode() == AddrSpaceCast;
5326   }
5327   static bool classof(const Value *V) {
5328     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5329   }
5330 
5331   /// Gets the pointer operand.
5332   Value *getPointerOperand() {
5333     return getOperand(0);
5334   }
5335 
5336   /// Gets the pointer operand.
5337   const Value *getPointerOperand() const {
5338     return getOperand(0);
5339   }
5340 
5341   /// Gets the operand index of the pointer operand.
5342   static unsigned getPointerOperandIndex() {
5343     return 0U;
5344   }
5345 
5346   /// Returns the address space of the pointer operand.
5347   unsigned getSrcAddressSpace() const {
5348     return getPointerOperand()->getType()->getPointerAddressSpace();
5349   }
5350 
5351   /// Returns the address space of the result.
5352   unsigned getDestAddressSpace() const {
5353     return getType()->getPointerAddressSpace();
5354   }
5355 };
5356 
5357 //===----------------------------------------------------------------------===//
5358 //                          Helper functions
5359 //===----------------------------------------------------------------------===//
5360 
5361 /// A helper function that returns the pointer operand of a load or store
5362 /// instruction. Returns nullptr if not load or store.
5363 inline const Value *getLoadStorePointerOperand(const Value *V) {
5364   if (auto *Load = dyn_cast<LoadInst>(V))
5365     return Load->getPointerOperand();
5366   if (auto *Store = dyn_cast<StoreInst>(V))
5367     return Store->getPointerOperand();
5368   return nullptr;
5369 }
5370 inline Value *getLoadStorePointerOperand(Value *V) {
5371   return const_cast<Value *>(
5372       getLoadStorePointerOperand(static_cast<const Value *>(V)));
5373 }
5374 
5375 /// A helper function that returns the pointer operand of a load, store
5376 /// or GEP instruction. Returns nullptr if not load, store, or GEP.
5377 inline const Value *getPointerOperand(const Value *V) {
5378   if (auto *Ptr = getLoadStorePointerOperand(V))
5379     return Ptr;
5380   if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5381     return Gep->getPointerOperand();
5382   return nullptr;
5383 }
5384 inline Value *getPointerOperand(Value *V) {
5385   return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5386 }
5387 
5388 /// A helper function that returns the alignment of load or store instruction.
5389 inline Align getLoadStoreAlignment(Value *I) {
5390   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5391          "Expected Load or Store instruction");
5392   if (auto *LI = dyn_cast<LoadInst>(I))
5393     return LI->getAlign();
5394   return cast<StoreInst>(I)->getAlign();
5395 }
5396 
5397 /// A helper function that returns the address space of the pointer operand of
5398 /// load or store instruction.
5399 inline unsigned getLoadStoreAddressSpace(Value *I) {
5400   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5401          "Expected Load or Store instruction");
5402   if (auto *LI = dyn_cast<LoadInst>(I))
5403     return LI->getPointerAddressSpace();
5404   return cast<StoreInst>(I)->getPointerAddressSpace();
5405 }
5406 
5407 /// A helper function that returns the type of a load or store instruction.
5408 inline Type *getLoadStoreType(Value *I) {
5409   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5410          "Expected Load or Store instruction");
5411   if (auto *LI = dyn_cast<LoadInst>(I))
5412     return LI->getType();
5413   return cast<StoreInst>(I)->getValueOperand()->getType();
5414 }
5415 
5416 /// A helper function that returns an atomic operation's sync scope; returns
5417 /// std::nullopt if it is not an atomic operation.
5418 inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5419   if (!I->isAtomic())
5420     return std::nullopt;
5421   if (auto *AI = dyn_cast<LoadInst>(I))
5422     return AI->getSyncScopeID();
5423   if (auto *AI = dyn_cast<StoreInst>(I))
5424     return AI->getSyncScopeID();
5425   if (auto *AI = dyn_cast<FenceInst>(I))
5426     return AI->getSyncScopeID();
5427   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5428     return AI->getSyncScopeID();
5429   if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5430     return AI->getSyncScopeID();
5431   llvm_unreachable("unhandled atomic operation");
5432 }
5433 
5434 //===----------------------------------------------------------------------===//
5435 //                              FreezeInst Class
5436 //===----------------------------------------------------------------------===//
5437 
5438 /// This class represents a freeze function that returns random concrete
5439 /// value if an operand is either a poison value or an undef value
5440 class FreezeInst : public UnaryInstruction {
5441 protected:
5442   // Note: Instruction needs to be a friend here to call cloneImpl.
5443   friend class Instruction;
5444 
5445   /// Clone an identical FreezeInst
5446   FreezeInst *cloneImpl() const;
5447 
5448 public:
5449   explicit FreezeInst(Value *S,
5450                       const Twine &NameStr = "",
5451                       Instruction *InsertBefore = nullptr);
5452   FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5453 
5454   // Methods for support type inquiry through isa, cast, and dyn_cast:
5455   static inline bool classof(const Instruction *I) {
5456     return I->getOpcode() == Freeze;
5457   }
5458   static inline bool classof(const Value *V) {
5459     return isa<Instruction>(V) && classof(cast<Instruction>(V));
5460   }
5461 };
5462 
5463 } // end namespace llvm
5464 
5465 #endif // LLVM_IR_INSTRUCTIONS_H
5466