1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements all of the non-inline methods for the LLVM instruction
10 // classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/Instructions.h"
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/InstrTypes.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/IR/MDBuilder.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/ProfDataUtils.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/AtomicOrdering.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/ModRef.h"
42 #include "llvm/Support/TypeSize.h"
43 #include <algorithm>
44 #include <cassert>
45 #include <cstdint>
46 #include <optional>
47 #include <vector>
48 
49 using namespace llvm;
50 
51 static cl::opt<bool> DisableI2pP2iOpt(
52     "disable-i2p-p2i-opt", cl::init(false),
53     cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
54 
55 //===----------------------------------------------------------------------===//
56 //                            AllocaInst Class
57 //===----------------------------------------------------------------------===//
58 
59 std::optional<TypeSize>
60 AllocaInst::getAllocationSize(const DataLayout &DL) const {
61   TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
62   if (isArrayAllocation()) {
63     auto *C = dyn_cast<ConstantInt>(getArraySize());
64     if (!C)
65       return std::nullopt;
66     assert(!Size.isScalable() && "Array elements cannot have a scalable size");
67     Size *= C->getZExtValue();
68   }
69   return Size;
70 }
71 
72 std::optional<TypeSize>
73 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
74   std::optional<TypeSize> Size = getAllocationSize(DL);
75   if (Size)
76     return *Size * 8;
77   return std::nullopt;
78 }
79 
80 //===----------------------------------------------------------------------===//
81 //                              SelectInst Class
82 //===----------------------------------------------------------------------===//
83 
84 /// areInvalidOperands - Return a string if the specified operands are invalid
85 /// for a select operation, otherwise return null.
86 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
87   if (Op1->getType() != Op2->getType())
88     return "both values to select must have same type";
89 
90   if (Op1->getType()->isTokenTy())
91     return "select values cannot have token type";
92 
93   if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
94     // Vector select.
95     if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
96       return "vector select condition element type must be i1";
97     VectorType *ET = dyn_cast<VectorType>(Op1->getType());
98     if (!ET)
99       return "selected values for vector select must be vectors";
100     if (ET->getElementCount() != VT->getElementCount())
101       return "vector select requires selected vectors to have "
102                    "the same vector length as select condition";
103   } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
104     return "select condition must be i1 or <n x i1>";
105   }
106   return nullptr;
107 }
108 
109 //===----------------------------------------------------------------------===//
110 //                               PHINode Class
111 //===----------------------------------------------------------------------===//
112 
113 PHINode::PHINode(const PHINode &PN)
114     : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
115       ReservedSpace(PN.getNumOperands()) {
116   allocHungoffUses(PN.getNumOperands());
117   std::copy(PN.op_begin(), PN.op_end(), op_begin());
118   copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
119   SubclassOptionalData = PN.SubclassOptionalData;
120 }
121 
122 // removeIncomingValue - Remove an incoming value.  This is useful if a
123 // predecessor basic block is deleted.
124 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
125   Value *Removed = getIncomingValue(Idx);
126 
127   // Move everything after this operand down.
128   //
129   // FIXME: we could just swap with the end of the list, then erase.  However,
130   // clients might not expect this to happen.  The code as it is thrashes the
131   // use/def lists, which is kinda lame.
132   std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
133   copyIncomingBlocks(make_range(block_begin() + Idx + 1, block_end()), Idx);
134 
135   // Nuke the last value.
136   Op<-1>().set(nullptr);
137   setNumHungOffUseOperands(getNumOperands() - 1);
138 
139   // If the PHI node is dead, because it has zero entries, nuke it now.
140   if (getNumOperands() == 0 && DeletePHIIfEmpty) {
141     // If anyone is using this PHI, make them use a dummy value instead...
142     replaceAllUsesWith(PoisonValue::get(getType()));
143     eraseFromParent();
144   }
145   return Removed;
146 }
147 
148 /// growOperands - grow operands - This grows the operand list in response
149 /// to a push_back style of operation.  This grows the number of ops by 1.5
150 /// times.
151 ///
152 void PHINode::growOperands() {
153   unsigned e = getNumOperands();
154   unsigned NumOps = e + e / 2;
155   if (NumOps < 2) NumOps = 2;      // 2 op PHI nodes are VERY common.
156 
157   ReservedSpace = NumOps;
158   growHungoffUses(ReservedSpace, /* IsPhi */ true);
159 }
160 
161 /// hasConstantValue - If the specified PHI node always merges together the same
162 /// value, return the value, otherwise return null.
163 Value *PHINode::hasConstantValue() const {
164   // Exploit the fact that phi nodes always have at least one entry.
165   Value *ConstantValue = getIncomingValue(0);
166   for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
167     if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
168       if (ConstantValue != this)
169         return nullptr; // Incoming values not all the same.
170        // The case where the first value is this PHI.
171       ConstantValue = getIncomingValue(i);
172     }
173   if (ConstantValue == this)
174     return UndefValue::get(getType());
175   return ConstantValue;
176 }
177 
178 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
179 /// together the same value, assuming that undefs result in the same value as
180 /// non-undefs.
181 /// Unlike \ref hasConstantValue, this does not return a value because the
182 /// unique non-undef incoming value need not dominate the PHI node.
183 bool PHINode::hasConstantOrUndefValue() const {
184   Value *ConstantValue = nullptr;
185   for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
186     Value *Incoming = getIncomingValue(i);
187     if (Incoming != this && !isa<UndefValue>(Incoming)) {
188       if (ConstantValue && ConstantValue != Incoming)
189         return false;
190       ConstantValue = Incoming;
191     }
192   }
193   return true;
194 }
195 
196 //===----------------------------------------------------------------------===//
197 //                       LandingPadInst Implementation
198 //===----------------------------------------------------------------------===//
199 
200 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
201                                const Twine &NameStr, Instruction *InsertBefore)
202     : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
203   init(NumReservedValues, NameStr);
204 }
205 
206 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
207                                const Twine &NameStr, BasicBlock *InsertAtEnd)
208     : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
209   init(NumReservedValues, NameStr);
210 }
211 
212 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
213     : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
214                   LP.getNumOperands()),
215       ReservedSpace(LP.getNumOperands()) {
216   allocHungoffUses(LP.getNumOperands());
217   Use *OL = getOperandList();
218   const Use *InOL = LP.getOperandList();
219   for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
220     OL[I] = InOL[I];
221 
222   setCleanup(LP.isCleanup());
223 }
224 
225 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
226                                        const Twine &NameStr,
227                                        Instruction *InsertBefore) {
228   return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
229 }
230 
231 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
232                                        const Twine &NameStr,
233                                        BasicBlock *InsertAtEnd) {
234   return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
235 }
236 
237 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
238   ReservedSpace = NumReservedValues;
239   setNumHungOffUseOperands(0);
240   allocHungoffUses(ReservedSpace);
241   setName(NameStr);
242   setCleanup(false);
243 }
244 
245 /// growOperands - grow operands - This grows the operand list in response to a
246 /// push_back style of operation. This grows the number of ops by 2 times.
247 void LandingPadInst::growOperands(unsigned Size) {
248   unsigned e = getNumOperands();
249   if (ReservedSpace >= e + Size) return;
250   ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
251   growHungoffUses(ReservedSpace);
252 }
253 
254 void LandingPadInst::addClause(Constant *Val) {
255   unsigned OpNo = getNumOperands();
256   growOperands(1);
257   assert(OpNo < ReservedSpace && "Growing didn't work!");
258   setNumHungOffUseOperands(getNumOperands() + 1);
259   getOperandList()[OpNo] = Val;
260 }
261 
262 //===----------------------------------------------------------------------===//
263 //                        CallBase Implementation
264 //===----------------------------------------------------------------------===//
265 
266 CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
267                            Instruction *InsertPt) {
268   switch (CB->getOpcode()) {
269   case Instruction::Call:
270     return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
271   case Instruction::Invoke:
272     return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
273   case Instruction::CallBr:
274     return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
275   default:
276     llvm_unreachable("Unknown CallBase sub-class!");
277   }
278 }
279 
280 CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
281                            Instruction *InsertPt) {
282   SmallVector<OperandBundleDef, 2> OpDefs;
283   for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
284     auto ChildOB = CI->getOperandBundleAt(i);
285     if (ChildOB.getTagName() != OpB.getTag())
286       OpDefs.emplace_back(ChildOB);
287   }
288   OpDefs.emplace_back(OpB);
289   return CallBase::Create(CI, OpDefs, InsertPt);
290 }
291 
292 
293 Function *CallBase::getCaller() { return getParent()->getParent(); }
294 
295 unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
296   assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
297   return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
298 }
299 
300 bool CallBase::isIndirectCall() const {
301   const Value *V = getCalledOperand();
302   if (isa<Function>(V) || isa<Constant>(V))
303     return false;
304   return !isInlineAsm();
305 }
306 
307 /// Tests if this call site must be tail call optimized. Only a CallInst can
308 /// be tail call optimized.
309 bool CallBase::isMustTailCall() const {
310   if (auto *CI = dyn_cast<CallInst>(this))
311     return CI->isMustTailCall();
312   return false;
313 }
314 
315 /// Tests if this call site is marked as a tail call.
316 bool CallBase::isTailCall() const {
317   if (auto *CI = dyn_cast<CallInst>(this))
318     return CI->isTailCall();
319   return false;
320 }
321 
322 Intrinsic::ID CallBase::getIntrinsicID() const {
323   if (auto *F = getCalledFunction())
324     return F->getIntrinsicID();
325   return Intrinsic::not_intrinsic;
326 }
327 
328 FPClassTest CallBase::getRetNoFPClass() const {
329   FPClassTest Mask = Attrs.getRetNoFPClass();
330 
331   if (const Function *F = getCalledFunction())
332     Mask |= F->getAttributes().getRetNoFPClass();
333   return Mask;
334 }
335 
336 FPClassTest CallBase::getParamNoFPClass(unsigned i) const {
337   FPClassTest Mask = Attrs.getParamNoFPClass(i);
338 
339   if (const Function *F = getCalledFunction())
340     Mask |= F->getAttributes().getParamNoFPClass(i);
341   return Mask;
342 }
343 
344 bool CallBase::isReturnNonNull() const {
345   if (hasRetAttr(Attribute::NonNull))
346     return true;
347 
348   if (getRetDereferenceableBytes() > 0 &&
349       !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
350     return true;
351 
352   return false;
353 }
354 
355 Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
356   unsigned Index;
357 
358   if (Attrs.hasAttrSomewhere(Kind, &Index))
359     return getArgOperand(Index - AttributeList::FirstArgIndex);
360   if (const Function *F = getCalledFunction())
361     if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
362       return getArgOperand(Index - AttributeList::FirstArgIndex);
363 
364   return nullptr;
365 }
366 
367 /// Determine whether the argument or parameter has the given attribute.
368 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
369   assert(ArgNo < arg_size() && "Param index out of bounds!");
370 
371   if (Attrs.hasParamAttr(ArgNo, Kind))
372     return true;
373 
374   const Function *F = getCalledFunction();
375   if (!F)
376     return false;
377 
378   if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
379     return false;
380 
381   // Take into account mod/ref by operand bundles.
382   switch (Kind) {
383   case Attribute::ReadNone:
384     return !hasReadingOperandBundles() && !hasClobberingOperandBundles();
385   case Attribute::ReadOnly:
386     return !hasClobberingOperandBundles();
387   case Attribute::WriteOnly:
388     return !hasReadingOperandBundles();
389   default:
390     return true;
391   }
392 }
393 
394 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
395   Value *V = getCalledOperand();
396   if (auto *CE = dyn_cast<ConstantExpr>(V))
397     if (CE->getOpcode() == BitCast)
398       V = CE->getOperand(0);
399 
400   if (auto *F = dyn_cast<Function>(V))
401     return F->getAttributes().hasFnAttr(Kind);
402 
403   return false;
404 }
405 
406 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
407   Value *V = getCalledOperand();
408   if (auto *CE = dyn_cast<ConstantExpr>(V))
409     if (CE->getOpcode() == BitCast)
410       V = CE->getOperand(0);
411 
412   if (auto *F = dyn_cast<Function>(V))
413     return F->getAttributes().hasFnAttr(Kind);
414 
415   return false;
416 }
417 
418 template <typename AK>
419 Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
420   if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
421     // getMemoryEffects() correctly combines memory effects from the call-site,
422     // operand bundles and function.
423     assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
424   }
425 
426   Value *V = getCalledOperand();
427   if (auto *CE = dyn_cast<ConstantExpr>(V))
428     if (CE->getOpcode() == BitCast)
429       V = CE->getOperand(0);
430 
431   if (auto *F = dyn_cast<Function>(V))
432     return F->getAttributes().getFnAttr(Kind);
433 
434   return Attribute();
435 }
436 
437 template Attribute
438 CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
439 template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
440 
441 void CallBase::getOperandBundlesAsDefs(
442     SmallVectorImpl<OperandBundleDef> &Defs) const {
443   for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
444     Defs.emplace_back(getOperandBundleAt(i));
445 }
446 
447 CallBase::op_iterator
448 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
449                                      const unsigned BeginIndex) {
450   auto It = op_begin() + BeginIndex;
451   for (auto &B : Bundles)
452     It = std::copy(B.input_begin(), B.input_end(), It);
453 
454   auto *ContextImpl = getContext().pImpl;
455   auto BI = Bundles.begin();
456   unsigned CurrentIndex = BeginIndex;
457 
458   for (auto &BOI : bundle_op_infos()) {
459     assert(BI != Bundles.end() && "Incorrect allocation?");
460 
461     BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
462     BOI.Begin = CurrentIndex;
463     BOI.End = CurrentIndex + BI->input_size();
464     CurrentIndex = BOI.End;
465     BI++;
466   }
467 
468   assert(BI == Bundles.end() && "Incorrect allocation?");
469 
470   return It;
471 }
472 
473 CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
474   /// When there isn't many bundles, we do a simple linear search.
475   /// Else fallback to a binary-search that use the fact that bundles usually
476   /// have similar number of argument to get faster convergence.
477   if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
478     for (auto &BOI : bundle_op_infos())
479       if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
480         return BOI;
481 
482     llvm_unreachable("Did not find operand bundle for operand!");
483   }
484 
485   assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
486   assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
487          OpIdx < std::prev(bundle_op_info_end())->End &&
488          "The Idx isn't in the operand bundle");
489 
490   /// We need a decimal number below and to prevent using floating point numbers
491   /// we use an intergal value multiplied by this constant.
492   constexpr unsigned NumberScaling = 1024;
493 
494   bundle_op_iterator Begin = bundle_op_info_begin();
495   bundle_op_iterator End = bundle_op_info_end();
496   bundle_op_iterator Current = Begin;
497 
498   while (Begin != End) {
499     unsigned ScaledOperandPerBundle =
500         NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
501     Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
502                        ScaledOperandPerBundle);
503     if (Current >= End)
504       Current = std::prev(End);
505     assert(Current < End && Current >= Begin &&
506            "the operand bundle doesn't cover every value in the range");
507     if (OpIdx >= Current->Begin && OpIdx < Current->End)
508       break;
509     if (OpIdx >= Current->End)
510       Begin = Current + 1;
511     else
512       End = Current;
513   }
514 
515   assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
516          "the operand bundle doesn't cover every value in the range");
517   return *Current;
518 }
519 
520 CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
521                                      OperandBundleDef OB,
522                                      Instruction *InsertPt) {
523   if (CB->getOperandBundle(ID))
524     return CB;
525 
526   SmallVector<OperandBundleDef, 1> Bundles;
527   CB->getOperandBundlesAsDefs(Bundles);
528   Bundles.push_back(OB);
529   return Create(CB, Bundles, InsertPt);
530 }
531 
532 CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
533                                         Instruction *InsertPt) {
534   SmallVector<OperandBundleDef, 1> Bundles;
535   bool CreateNew = false;
536 
537   for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
538     auto Bundle = CB->getOperandBundleAt(I);
539     if (Bundle.getTagID() == ID) {
540       CreateNew = true;
541       continue;
542     }
543     Bundles.emplace_back(Bundle);
544   }
545 
546   return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
547 }
548 
549 bool CallBase::hasReadingOperandBundles() const {
550   // Implementation note: this is a conservative implementation of operand
551   // bundle semantics, where *any* non-assume operand bundle (other than
552   // ptrauth) forces a callsite to be at least readonly.
553   return hasOperandBundlesOtherThan(
554              {LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
555          getIntrinsicID() != Intrinsic::assume;
556 }
557 
558 bool CallBase::hasClobberingOperandBundles() const {
559   return hasOperandBundlesOtherThan(
560              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
561               LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}) &&
562          getIntrinsicID() != Intrinsic::assume;
563 }
564 
565 MemoryEffects CallBase::getMemoryEffects() const {
566   MemoryEffects ME = getAttributes().getMemoryEffects();
567   if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
568     MemoryEffects FnME = Fn->getMemoryEffects();
569     if (hasOperandBundles()) {
570       // TODO: Add a method to get memory effects for operand bundles instead.
571       if (hasReadingOperandBundles())
572         FnME |= MemoryEffects::readOnly();
573       if (hasClobberingOperandBundles())
574         FnME |= MemoryEffects::writeOnly();
575     }
576     ME &= FnME;
577   }
578   return ME;
579 }
580 void CallBase::setMemoryEffects(MemoryEffects ME) {
581   addFnAttr(Attribute::getWithMemoryEffects(getContext(), ME));
582 }
583 
584 /// Determine if the function does not access memory.
585 bool CallBase::doesNotAccessMemory() const {
586   return getMemoryEffects().doesNotAccessMemory();
587 }
588 void CallBase::setDoesNotAccessMemory() {
589   setMemoryEffects(MemoryEffects::none());
590 }
591 
592 /// Determine if the function does not access or only reads memory.
593 bool CallBase::onlyReadsMemory() const {
594   return getMemoryEffects().onlyReadsMemory();
595 }
596 void CallBase::setOnlyReadsMemory() {
597   setMemoryEffects(getMemoryEffects() & MemoryEffects::readOnly());
598 }
599 
600 /// Determine if the function does not access or only writes memory.
601 bool CallBase::onlyWritesMemory() const {
602   return getMemoryEffects().onlyWritesMemory();
603 }
604 void CallBase::setOnlyWritesMemory() {
605   setMemoryEffects(getMemoryEffects() & MemoryEffects::writeOnly());
606 }
607 
608 /// Determine if the call can access memmory only using pointers based
609 /// on its arguments.
610 bool CallBase::onlyAccessesArgMemory() const {
611   return getMemoryEffects().onlyAccessesArgPointees();
612 }
613 void CallBase::setOnlyAccessesArgMemory() {
614   setMemoryEffects(getMemoryEffects() & MemoryEffects::argMemOnly());
615 }
616 
617 /// Determine if the function may only access memory that is
618 ///  inaccessible from the IR.
619 bool CallBase::onlyAccessesInaccessibleMemory() const {
620   return getMemoryEffects().onlyAccessesInaccessibleMem();
621 }
622 void CallBase::setOnlyAccessesInaccessibleMemory() {
623   setMemoryEffects(getMemoryEffects() & MemoryEffects::inaccessibleMemOnly());
624 }
625 
626 /// Determine if the function may only access memory that is
627 ///  either inaccessible from the IR or pointed to by its arguments.
628 bool CallBase::onlyAccessesInaccessibleMemOrArgMem() const {
629   return getMemoryEffects().onlyAccessesInaccessibleOrArgMem();
630 }
631 void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
632   setMemoryEffects(getMemoryEffects() &
633                    MemoryEffects::inaccessibleOrArgMemOnly());
634 }
635 
636 //===----------------------------------------------------------------------===//
637 //                        CallInst Implementation
638 //===----------------------------------------------------------------------===//
639 
640 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
641                     ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
642   this->FTy = FTy;
643   assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
644          "NumOperands not set up?");
645 
646 #ifndef NDEBUG
647   assert((Args.size() == FTy->getNumParams() ||
648           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
649          "Calling a function with bad signature!");
650 
651   for (unsigned i = 0; i != Args.size(); ++i)
652     assert((i >= FTy->getNumParams() ||
653             FTy->getParamType(i) == Args[i]->getType()) &&
654            "Calling a function with a bad signature!");
655 #endif
656 
657   // Set operands in order of their index to match use-list-order
658   // prediction.
659   llvm::copy(Args, op_begin());
660   setCalledOperand(Func);
661 
662   auto It = populateBundleOperandInfos(Bundles, Args.size());
663   (void)It;
664   assert(It + 1 == op_end() && "Should add up!");
665 
666   setName(NameStr);
667 }
668 
669 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
670   this->FTy = FTy;
671   assert(getNumOperands() == 1 && "NumOperands not set up?");
672   setCalledOperand(Func);
673 
674   assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
675 
676   setName(NameStr);
677 }
678 
679 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
680                    Instruction *InsertBefore)
681     : CallBase(Ty->getReturnType(), Instruction::Call,
682                OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
683   init(Ty, Func, Name);
684 }
685 
686 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
687                    BasicBlock *InsertAtEnd)
688     : CallBase(Ty->getReturnType(), Instruction::Call,
689                OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
690   init(Ty, Func, Name);
691 }
692 
693 CallInst::CallInst(const CallInst &CI)
694     : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
695                OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
696                CI.getNumOperands()) {
697   setTailCallKind(CI.getTailCallKind());
698   setCallingConv(CI.getCallingConv());
699 
700   std::copy(CI.op_begin(), CI.op_end(), op_begin());
701   std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
702             bundle_op_info_begin());
703   SubclassOptionalData = CI.SubclassOptionalData;
704 }
705 
706 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
707                            Instruction *InsertPt) {
708   std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
709 
710   auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
711                                  Args, OpB, CI->getName(), InsertPt);
712   NewCI->setTailCallKind(CI->getTailCallKind());
713   NewCI->setCallingConv(CI->getCallingConv());
714   NewCI->SubclassOptionalData = CI->SubclassOptionalData;
715   NewCI->setAttributes(CI->getAttributes());
716   NewCI->setDebugLoc(CI->getDebugLoc());
717   return NewCI;
718 }
719 
720 // Update profile weight for call instruction by scaling it using the ratio
721 // of S/T. The meaning of "branch_weights" meta data for call instruction is
722 // transfered to represent call count.
723 void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
724   auto *ProfileData = getMetadata(LLVMContext::MD_prof);
725   if (ProfileData == nullptr)
726     return;
727 
728   auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
729   if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
730                         !ProfDataName->getString().equals("VP")))
731     return;
732 
733   if (T == 0) {
734     LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
735                          "div by 0. Ignoring. Likely the function "
736                       << getParent()->getParent()->getName()
737                       << " has 0 entry count, and contains call instructions "
738                          "with non-zero prof info.");
739     return;
740   }
741 
742   MDBuilder MDB(getContext());
743   SmallVector<Metadata *, 3> Vals;
744   Vals.push_back(ProfileData->getOperand(0));
745   APInt APS(128, S), APT(128, T);
746   if (ProfDataName->getString().equals("branch_weights") &&
747       ProfileData->getNumOperands() > 0) {
748     // Using APInt::div may be expensive, but most cases should fit 64 bits.
749     APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
750                        ->getValue()
751                        .getZExtValue());
752     Val *= APS;
753     Vals.push_back(MDB.createConstant(
754         ConstantInt::get(Type::getInt32Ty(getContext()),
755                          Val.udiv(APT).getLimitedValue(UINT32_MAX))));
756   } else if (ProfDataName->getString().equals("VP"))
757     for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
758       // The first value is the key of the value profile, which will not change.
759       Vals.push_back(ProfileData->getOperand(i));
760       uint64_t Count =
761           mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
762               ->getValue()
763               .getZExtValue();
764       // Don't scale the magic number.
765       if (Count == NOMORE_ICP_MAGICNUM) {
766         Vals.push_back(ProfileData->getOperand(i + 1));
767         continue;
768       }
769       // Using APInt::div may be expensive, but most cases should fit 64 bits.
770       APInt Val(128, Count);
771       Val *= APS;
772       Vals.push_back(MDB.createConstant(
773           ConstantInt::get(Type::getInt64Ty(getContext()),
774                            Val.udiv(APT).getLimitedValue())));
775     }
776   setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
777 }
778 
779 /// IsConstantOne - Return true only if val is constant int 1
780 static bool IsConstantOne(Value *val) {
781   assert(val && "IsConstantOne does not work with nullptr val");
782   const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
783   return CVal && CVal->isOne();
784 }
785 
786 static Instruction *createMalloc(Instruction *InsertBefore,
787                                  BasicBlock *InsertAtEnd, Type *IntPtrTy,
788                                  Type *AllocTy, Value *AllocSize,
789                                  Value *ArraySize,
790                                  ArrayRef<OperandBundleDef> OpB,
791                                  Function *MallocF, const Twine &Name) {
792   assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
793          "createMalloc needs either InsertBefore or InsertAtEnd");
794 
795   // malloc(type) becomes:
796   //       bitcast (i8* malloc(typeSize)) to type*
797   // malloc(type, arraySize) becomes:
798   //       bitcast (i8* malloc(typeSize*arraySize)) to type*
799   if (!ArraySize)
800     ArraySize = ConstantInt::get(IntPtrTy, 1);
801   else if (ArraySize->getType() != IntPtrTy) {
802     if (InsertBefore)
803       ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
804                                               "", InsertBefore);
805     else
806       ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
807                                               "", InsertAtEnd);
808   }
809 
810   if (!IsConstantOne(ArraySize)) {
811     if (IsConstantOne(AllocSize)) {
812       AllocSize = ArraySize;         // Operand * 1 = Operand
813     } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
814       Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
815                                                      false /*ZExt*/);
816       // Malloc arg is constant product of type size and array size
817       AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
818     } else {
819       // Multiply type size by the array size...
820       if (InsertBefore)
821         AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
822                                               "mallocsize", InsertBefore);
823       else
824         AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
825                                               "mallocsize", InsertAtEnd);
826     }
827   }
828 
829   assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
830   // Create the call to Malloc.
831   BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
832   Module *M = BB->getParent()->getParent();
833   Type *BPTy = Type::getInt8PtrTy(BB->getContext());
834   FunctionCallee MallocFunc = MallocF;
835   if (!MallocFunc)
836     // prototype malloc as "void *malloc(size_t)"
837     MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
838   PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
839   CallInst *MCall = nullptr;
840   Instruction *Result = nullptr;
841   if (InsertBefore) {
842     MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
843                              InsertBefore);
844     Result = MCall;
845     if (Result->getType() != AllocPtrType)
846       // Create a cast instruction to convert to the right type...
847       Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
848   } else {
849     MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
850     Result = MCall;
851     if (Result->getType() != AllocPtrType) {
852       MCall->insertInto(InsertAtEnd, InsertAtEnd->end());
853       // Create a cast instruction to convert to the right type...
854       Result = new BitCastInst(MCall, AllocPtrType, Name);
855     }
856   }
857   MCall->setTailCall();
858   if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
859     MCall->setCallingConv(F->getCallingConv());
860     if (!F->returnDoesNotAlias())
861       F->setReturnDoesNotAlias();
862   }
863   assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
864 
865   return Result;
866 }
867 
868 /// CreateMalloc - Generate the IR for a call to malloc:
869 /// 1. Compute the malloc call's argument as the specified type's size,
870 ///    possibly multiplied by the array size if the array size is not
871 ///    constant 1.
872 /// 2. Call malloc with that argument.
873 /// 3. Bitcast the result of the malloc call to the specified type.
874 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
875                                     Type *IntPtrTy, Type *AllocTy,
876                                     Value *AllocSize, Value *ArraySize,
877                                     Function *MallocF,
878                                     const Twine &Name) {
879   return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
880                       ArraySize, std::nullopt, MallocF, Name);
881 }
882 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
883                                     Type *IntPtrTy, Type *AllocTy,
884                                     Value *AllocSize, Value *ArraySize,
885                                     ArrayRef<OperandBundleDef> OpB,
886                                     Function *MallocF,
887                                     const Twine &Name) {
888   return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
889                       ArraySize, OpB, MallocF, Name);
890 }
891 
892 /// CreateMalloc - Generate the IR for a call to malloc:
893 /// 1. Compute the malloc call's argument as the specified type's size,
894 ///    possibly multiplied by the array size if the array size is not
895 ///    constant 1.
896 /// 2. Call malloc with that argument.
897 /// 3. Bitcast the result of the malloc call to the specified type.
898 /// Note: This function does not add the bitcast to the basic block, that is the
899 /// responsibility of the caller.
900 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
901                                     Type *IntPtrTy, Type *AllocTy,
902                                     Value *AllocSize, Value *ArraySize,
903                                     Function *MallocF, const Twine &Name) {
904   return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
905                       ArraySize, std::nullopt, MallocF, Name);
906 }
907 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
908                                     Type *IntPtrTy, Type *AllocTy,
909                                     Value *AllocSize, Value *ArraySize,
910                                     ArrayRef<OperandBundleDef> OpB,
911                                     Function *MallocF, const Twine &Name) {
912   return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
913                       ArraySize, OpB, MallocF, Name);
914 }
915 
916 static Instruction *createFree(Value *Source,
917                                ArrayRef<OperandBundleDef> Bundles,
918                                Instruction *InsertBefore,
919                                BasicBlock *InsertAtEnd) {
920   assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
921          "createFree needs either InsertBefore or InsertAtEnd");
922   assert(Source->getType()->isPointerTy() &&
923          "Can not free something of nonpointer type!");
924 
925   BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
926   Module *M = BB->getParent()->getParent();
927 
928   Type *VoidTy = Type::getVoidTy(M->getContext());
929   Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
930   // prototype free as "void free(void*)"
931   FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
932   CallInst *Result = nullptr;
933   Value *PtrCast = Source;
934   if (InsertBefore) {
935     if (Source->getType() != IntPtrTy)
936       PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
937     Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
938   } else {
939     if (Source->getType() != IntPtrTy)
940       PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
941     Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
942   }
943   Result->setTailCall();
944   if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
945     Result->setCallingConv(F->getCallingConv());
946 
947   return Result;
948 }
949 
950 /// CreateFree - Generate the IR for a call to the builtin free function.
951 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
952   return createFree(Source, std::nullopt, InsertBefore, nullptr);
953 }
954 Instruction *CallInst::CreateFree(Value *Source,
955                                   ArrayRef<OperandBundleDef> Bundles,
956                                   Instruction *InsertBefore) {
957   return createFree(Source, Bundles, InsertBefore, nullptr);
958 }
959 
960 /// CreateFree - Generate the IR for a call to the builtin free function.
961 /// Note: This function does not add the call to the basic block, that is the
962 /// responsibility of the caller.
963 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
964   Instruction *FreeCall =
965       createFree(Source, std::nullopt, nullptr, InsertAtEnd);
966   assert(FreeCall && "CreateFree did not create a CallInst");
967   return FreeCall;
968 }
969 Instruction *CallInst::CreateFree(Value *Source,
970                                   ArrayRef<OperandBundleDef> Bundles,
971                                   BasicBlock *InsertAtEnd) {
972   Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
973   assert(FreeCall && "CreateFree did not create a CallInst");
974   return FreeCall;
975 }
976 
977 //===----------------------------------------------------------------------===//
978 //                        InvokeInst Implementation
979 //===----------------------------------------------------------------------===//
980 
981 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
982                       BasicBlock *IfException, ArrayRef<Value *> Args,
983                       ArrayRef<OperandBundleDef> Bundles,
984                       const Twine &NameStr) {
985   this->FTy = FTy;
986 
987   assert((int)getNumOperands() ==
988              ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
989          "NumOperands not set up?");
990 
991 #ifndef NDEBUG
992   assert(((Args.size() == FTy->getNumParams()) ||
993           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
994          "Invoking a function with bad signature");
995 
996   for (unsigned i = 0, e = Args.size(); i != e; i++)
997     assert((i >= FTy->getNumParams() ||
998             FTy->getParamType(i) == Args[i]->getType()) &&
999            "Invoking a function with a bad signature!");
1000 #endif
1001 
1002   // Set operands in order of their index to match use-list-order
1003   // prediction.
1004   llvm::copy(Args, op_begin());
1005   setNormalDest(IfNormal);
1006   setUnwindDest(IfException);
1007   setCalledOperand(Fn);
1008 
1009   auto It = populateBundleOperandInfos(Bundles, Args.size());
1010   (void)It;
1011   assert(It + 3 == op_end() && "Should add up!");
1012 
1013   setName(NameStr);
1014 }
1015 
1016 InvokeInst::InvokeInst(const InvokeInst &II)
1017     : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
1018                OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
1019                II.getNumOperands()) {
1020   setCallingConv(II.getCallingConv());
1021   std::copy(II.op_begin(), II.op_end(), op_begin());
1022   std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
1023             bundle_op_info_begin());
1024   SubclassOptionalData = II.SubclassOptionalData;
1025 }
1026 
1027 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
1028                                Instruction *InsertPt) {
1029   std::vector<Value *> Args(II->arg_begin(), II->arg_end());
1030 
1031   auto *NewII = InvokeInst::Create(
1032       II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
1033       II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
1034   NewII->setCallingConv(II->getCallingConv());
1035   NewII->SubclassOptionalData = II->SubclassOptionalData;
1036   NewII->setAttributes(II->getAttributes());
1037   NewII->setDebugLoc(II->getDebugLoc());
1038   return NewII;
1039 }
1040 
1041 LandingPadInst *InvokeInst::getLandingPadInst() const {
1042   return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
1043 }
1044 
1045 //===----------------------------------------------------------------------===//
1046 //                        CallBrInst Implementation
1047 //===----------------------------------------------------------------------===//
1048 
1049 void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
1050                       ArrayRef<BasicBlock *> IndirectDests,
1051                       ArrayRef<Value *> Args,
1052                       ArrayRef<OperandBundleDef> Bundles,
1053                       const Twine &NameStr) {
1054   this->FTy = FTy;
1055 
1056   assert((int)getNumOperands() ==
1057              ComputeNumOperands(Args.size(), IndirectDests.size(),
1058                                 CountBundleInputs(Bundles)) &&
1059          "NumOperands not set up?");
1060 
1061 #ifndef NDEBUG
1062   assert(((Args.size() == FTy->getNumParams()) ||
1063           (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
1064          "Calling a function with bad signature");
1065 
1066   for (unsigned i = 0, e = Args.size(); i != e; i++)
1067     assert((i >= FTy->getNumParams() ||
1068             FTy->getParamType(i) == Args[i]->getType()) &&
1069            "Calling a function with a bad signature!");
1070 #endif
1071 
1072   // Set operands in order of their index to match use-list-order
1073   // prediction.
1074   std::copy(Args.begin(), Args.end(), op_begin());
1075   NumIndirectDests = IndirectDests.size();
1076   setDefaultDest(Fallthrough);
1077   for (unsigned i = 0; i != NumIndirectDests; ++i)
1078     setIndirectDest(i, IndirectDests[i]);
1079   setCalledOperand(Fn);
1080 
1081   auto It = populateBundleOperandInfos(Bundles, Args.size());
1082   (void)It;
1083   assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
1084 
1085   setName(NameStr);
1086 }
1087 
1088 CallBrInst::CallBrInst(const CallBrInst &CBI)
1089     : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
1090                OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
1091                CBI.getNumOperands()) {
1092   setCallingConv(CBI.getCallingConv());
1093   std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
1094   std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
1095             bundle_op_info_begin());
1096   SubclassOptionalData = CBI.SubclassOptionalData;
1097   NumIndirectDests = CBI.NumIndirectDests;
1098 }
1099 
1100 CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
1101                                Instruction *InsertPt) {
1102   std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1103 
1104   auto *NewCBI = CallBrInst::Create(
1105       CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1106       CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1107   NewCBI->setCallingConv(CBI->getCallingConv());
1108   NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1109   NewCBI->setAttributes(CBI->getAttributes());
1110   NewCBI->setDebugLoc(CBI->getDebugLoc());
1111   NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1112   return NewCBI;
1113 }
1114 
1115 //===----------------------------------------------------------------------===//
1116 //                        ReturnInst Implementation
1117 //===----------------------------------------------------------------------===//
1118 
1119 ReturnInst::ReturnInst(const ReturnInst &RI)
1120     : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1121                   OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1122                   RI.getNumOperands()) {
1123   if (RI.getNumOperands())
1124     Op<0>() = RI.Op<0>();
1125   SubclassOptionalData = RI.SubclassOptionalData;
1126 }
1127 
1128 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
1129     : Instruction(Type::getVoidTy(C), Instruction::Ret,
1130                   OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1131                   InsertBefore) {
1132   if (retVal)
1133     Op<0>() = retVal;
1134 }
1135 
1136 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1137     : Instruction(Type::getVoidTy(C), Instruction::Ret,
1138                   OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1139                   InsertAtEnd) {
1140   if (retVal)
1141     Op<0>() = retVal;
1142 }
1143 
1144 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1145     : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1146                   OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1147 
1148 //===----------------------------------------------------------------------===//
1149 //                        ResumeInst Implementation
1150 //===----------------------------------------------------------------------===//
1151 
1152 ResumeInst::ResumeInst(const ResumeInst &RI)
1153     : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1154                   OperandTraits<ResumeInst>::op_begin(this), 1) {
1155   Op<0>() = RI.Op<0>();
1156 }
1157 
1158 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1159     : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1160                   OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1161   Op<0>() = Exn;
1162 }
1163 
1164 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1165     : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1166                   OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1167   Op<0>() = Exn;
1168 }
1169 
1170 //===----------------------------------------------------------------------===//
1171 //                        CleanupReturnInst Implementation
1172 //===----------------------------------------------------------------------===//
1173 
1174 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1175     : Instruction(CRI.getType(), Instruction::CleanupRet,
1176                   OperandTraits<CleanupReturnInst>::op_end(this) -
1177                       CRI.getNumOperands(),
1178                   CRI.getNumOperands()) {
1179   setSubclassData<Instruction::OpaqueField>(
1180       CRI.getSubclassData<Instruction::OpaqueField>());
1181   Op<0>() = CRI.Op<0>();
1182   if (CRI.hasUnwindDest())
1183     Op<1>() = CRI.Op<1>();
1184 }
1185 
1186 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1187   if (UnwindBB)
1188     setSubclassData<UnwindDestField>(true);
1189 
1190   Op<0>() = CleanupPad;
1191   if (UnwindBB)
1192     Op<1>() = UnwindBB;
1193 }
1194 
1195 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1196                                      unsigned Values, Instruction *InsertBefore)
1197     : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1198                   Instruction::CleanupRet,
1199                   OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1200                   Values, InsertBefore) {
1201   init(CleanupPad, UnwindBB);
1202 }
1203 
1204 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1205                                      unsigned Values, BasicBlock *InsertAtEnd)
1206     : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1207                   Instruction::CleanupRet,
1208                   OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1209                   Values, InsertAtEnd) {
1210   init(CleanupPad, UnwindBB);
1211 }
1212 
1213 //===----------------------------------------------------------------------===//
1214 //                        CatchReturnInst Implementation
1215 //===----------------------------------------------------------------------===//
1216 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1217   Op<0>() = CatchPad;
1218   Op<1>() = BB;
1219 }
1220 
1221 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1222     : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1223                   OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1224   Op<0>() = CRI.Op<0>();
1225   Op<1>() = CRI.Op<1>();
1226 }
1227 
1228 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1229                                  Instruction *InsertBefore)
1230     : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1231                   OperandTraits<CatchReturnInst>::op_begin(this), 2,
1232                   InsertBefore) {
1233   init(CatchPad, BB);
1234 }
1235 
1236 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1237                                  BasicBlock *InsertAtEnd)
1238     : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1239                   OperandTraits<CatchReturnInst>::op_begin(this), 2,
1240                   InsertAtEnd) {
1241   init(CatchPad, BB);
1242 }
1243 
1244 //===----------------------------------------------------------------------===//
1245 //                       CatchSwitchInst Implementation
1246 //===----------------------------------------------------------------------===//
1247 
1248 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1249                                  unsigned NumReservedValues,
1250                                  const Twine &NameStr,
1251                                  Instruction *InsertBefore)
1252     : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1253                   InsertBefore) {
1254   if (UnwindDest)
1255     ++NumReservedValues;
1256   init(ParentPad, UnwindDest, NumReservedValues + 1);
1257   setName(NameStr);
1258 }
1259 
1260 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1261                                  unsigned NumReservedValues,
1262                                  const Twine &NameStr, BasicBlock *InsertAtEnd)
1263     : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1264                   InsertAtEnd) {
1265   if (UnwindDest)
1266     ++NumReservedValues;
1267   init(ParentPad, UnwindDest, NumReservedValues + 1);
1268   setName(NameStr);
1269 }
1270 
1271 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1272     : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1273                   CSI.getNumOperands()) {
1274   init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1275   setNumHungOffUseOperands(ReservedSpace);
1276   Use *OL = getOperandList();
1277   const Use *InOL = CSI.getOperandList();
1278   for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1279     OL[I] = InOL[I];
1280 }
1281 
1282 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1283                            unsigned NumReservedValues) {
1284   assert(ParentPad && NumReservedValues);
1285 
1286   ReservedSpace = NumReservedValues;
1287   setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1288   allocHungoffUses(ReservedSpace);
1289 
1290   Op<0>() = ParentPad;
1291   if (UnwindDest) {
1292     setSubclassData<UnwindDestField>(true);
1293     setUnwindDest(UnwindDest);
1294   }
1295 }
1296 
1297 /// growOperands - grow operands - This grows the operand list in response to a
1298 /// push_back style of operation. This grows the number of ops by 2 times.
1299 void CatchSwitchInst::growOperands(unsigned Size) {
1300   unsigned NumOperands = getNumOperands();
1301   assert(NumOperands >= 1);
1302   if (ReservedSpace >= NumOperands + Size)
1303     return;
1304   ReservedSpace = (NumOperands + Size / 2) * 2;
1305   growHungoffUses(ReservedSpace);
1306 }
1307 
1308 void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1309   unsigned OpNo = getNumOperands();
1310   growOperands(1);
1311   assert(OpNo < ReservedSpace && "Growing didn't work!");
1312   setNumHungOffUseOperands(getNumOperands() + 1);
1313   getOperandList()[OpNo] = Handler;
1314 }
1315 
1316 void CatchSwitchInst::removeHandler(handler_iterator HI) {
1317   // Move all subsequent handlers up one.
1318   Use *EndDst = op_end() - 1;
1319   for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1320     *CurDst = *(CurDst + 1);
1321   // Null out the last handler use.
1322   *EndDst = nullptr;
1323 
1324   setNumHungOffUseOperands(getNumOperands() - 1);
1325 }
1326 
1327 //===----------------------------------------------------------------------===//
1328 //                        FuncletPadInst Implementation
1329 //===----------------------------------------------------------------------===//
1330 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1331                           const Twine &NameStr) {
1332   assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1333   llvm::copy(Args, op_begin());
1334   setParentPad(ParentPad);
1335   setName(NameStr);
1336 }
1337 
1338 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1339     : Instruction(FPI.getType(), FPI.getOpcode(),
1340                   OperandTraits<FuncletPadInst>::op_end(this) -
1341                       FPI.getNumOperands(),
1342                   FPI.getNumOperands()) {
1343   std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1344   setParentPad(FPI.getParentPad());
1345 }
1346 
1347 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1348                                ArrayRef<Value *> Args, unsigned Values,
1349                                const Twine &NameStr, Instruction *InsertBefore)
1350     : Instruction(ParentPad->getType(), Op,
1351                   OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1352                   InsertBefore) {
1353   init(ParentPad, Args, NameStr);
1354 }
1355 
1356 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1357                                ArrayRef<Value *> Args, unsigned Values,
1358                                const Twine &NameStr, BasicBlock *InsertAtEnd)
1359     : Instruction(ParentPad->getType(), Op,
1360                   OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1361                   InsertAtEnd) {
1362   init(ParentPad, Args, NameStr);
1363 }
1364 
1365 //===----------------------------------------------------------------------===//
1366 //                      UnreachableInst Implementation
1367 //===----------------------------------------------------------------------===//
1368 
1369 UnreachableInst::UnreachableInst(LLVMContext &Context,
1370                                  Instruction *InsertBefore)
1371     : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1372                   0, InsertBefore) {}
1373 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1374     : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1375                   0, InsertAtEnd) {}
1376 
1377 //===----------------------------------------------------------------------===//
1378 //                        BranchInst Implementation
1379 //===----------------------------------------------------------------------===//
1380 
1381 void BranchInst::AssertOK() {
1382   if (isConditional())
1383     assert(getCondition()->getType()->isIntegerTy(1) &&
1384            "May only branch on boolean predicates!");
1385 }
1386 
1387 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1388     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1389                   OperandTraits<BranchInst>::op_end(this) - 1, 1,
1390                   InsertBefore) {
1391   assert(IfTrue && "Branch destination may not be null!");
1392   Op<-1>() = IfTrue;
1393 }
1394 
1395 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1396                        Instruction *InsertBefore)
1397     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1398                   OperandTraits<BranchInst>::op_end(this) - 3, 3,
1399                   InsertBefore) {
1400   // Assign in order of operand index to make use-list order predictable.
1401   Op<-3>() = Cond;
1402   Op<-2>() = IfFalse;
1403   Op<-1>() = IfTrue;
1404 #ifndef NDEBUG
1405   AssertOK();
1406 #endif
1407 }
1408 
1409 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1410     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1411                   OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1412   assert(IfTrue && "Branch destination may not be null!");
1413   Op<-1>() = IfTrue;
1414 }
1415 
1416 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1417                        BasicBlock *InsertAtEnd)
1418     : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1419                   OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1420   // Assign in order of operand index to make use-list order predictable.
1421   Op<-3>() = Cond;
1422   Op<-2>() = IfFalse;
1423   Op<-1>() = IfTrue;
1424 #ifndef NDEBUG
1425   AssertOK();
1426 #endif
1427 }
1428 
1429 BranchInst::BranchInst(const BranchInst &BI)
1430     : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1431                   OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1432                   BI.getNumOperands()) {
1433   // Assign in order of operand index to make use-list order predictable.
1434   if (BI.getNumOperands() != 1) {
1435     assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1436     Op<-3>() = BI.Op<-3>();
1437     Op<-2>() = BI.Op<-2>();
1438   }
1439   Op<-1>() = BI.Op<-1>();
1440   SubclassOptionalData = BI.SubclassOptionalData;
1441 }
1442 
1443 void BranchInst::swapSuccessors() {
1444   assert(isConditional() &&
1445          "Cannot swap successors of an unconditional branch");
1446   Op<-1>().swap(Op<-2>());
1447 
1448   // Update profile metadata if present and it matches our structural
1449   // expectations.
1450   swapProfMetadata();
1451 }
1452 
1453 //===----------------------------------------------------------------------===//
1454 //                        AllocaInst Implementation
1455 //===----------------------------------------------------------------------===//
1456 
1457 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1458   if (!Amt)
1459     Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1460   else {
1461     assert(!isa<BasicBlock>(Amt) &&
1462            "Passed basic block into allocation size parameter! Use other ctor");
1463     assert(Amt->getType()->isIntegerTy() &&
1464            "Allocation array size is not an integer!");
1465   }
1466   return Amt;
1467 }
1468 
1469 static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
1470   assert(BB && "Insertion BB cannot be null when alignment not provided!");
1471   assert(BB->getParent() &&
1472          "BB must be in a Function when alignment not provided!");
1473   const DataLayout &DL = BB->getModule()->getDataLayout();
1474   return DL.getPrefTypeAlign(Ty);
1475 }
1476 
1477 static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
1478   assert(I && "Insertion position cannot be null when alignment not provided!");
1479   return computeAllocaDefaultAlign(Ty, I->getParent());
1480 }
1481 
1482 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1483                        Instruction *InsertBefore)
1484   : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1485 
1486 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1487                        BasicBlock *InsertAtEnd)
1488   : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1489 
1490 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1491                        const Twine &Name, Instruction *InsertBefore)
1492     : AllocaInst(Ty, AddrSpace, ArraySize,
1493                  computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1494                  InsertBefore) {}
1495 
1496 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1497                        const Twine &Name, BasicBlock *InsertAtEnd)
1498     : AllocaInst(Ty, AddrSpace, ArraySize,
1499                  computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1500                  InsertAtEnd) {}
1501 
1502 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1503                        Align Align, const Twine &Name,
1504                        Instruction *InsertBefore)
1505     : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1506                        getAISize(Ty->getContext(), ArraySize), InsertBefore),
1507       AllocatedType(Ty) {
1508   setAlignment(Align);
1509   assert(!Ty->isVoidTy() && "Cannot allocate void!");
1510   setName(Name);
1511 }
1512 
1513 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1514                        Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1515     : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1516                        getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1517       AllocatedType(Ty) {
1518   setAlignment(Align);
1519   assert(!Ty->isVoidTy() && "Cannot allocate void!");
1520   setName(Name);
1521 }
1522 
1523 
1524 bool AllocaInst::isArrayAllocation() const {
1525   if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1526     return !CI->isOne();
1527   return true;
1528 }
1529 
1530 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1531 /// function and is a constant size.  If so, the code generator will fold it
1532 /// into the prolog/epilog code, so it is basically free.
1533 bool AllocaInst::isStaticAlloca() const {
1534   // Must be constant size.
1535   if (!isa<ConstantInt>(getArraySize())) return false;
1536 
1537   // Must be in the entry block.
1538   const BasicBlock *Parent = getParent();
1539   return Parent->isEntryBlock() && !isUsedWithInAlloca();
1540 }
1541 
1542 //===----------------------------------------------------------------------===//
1543 //                           LoadInst Implementation
1544 //===----------------------------------------------------------------------===//
1545 
1546 void LoadInst::AssertOK() {
1547   assert(getOperand(0)->getType()->isPointerTy() &&
1548          "Ptr must have pointer type.");
1549 }
1550 
1551 static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
1552   assert(BB && "Insertion BB cannot be null when alignment not provided!");
1553   assert(BB->getParent() &&
1554          "BB must be in a Function when alignment not provided!");
1555   const DataLayout &DL = BB->getModule()->getDataLayout();
1556   return DL.getABITypeAlign(Ty);
1557 }
1558 
1559 static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
1560   assert(I && "Insertion position cannot be null when alignment not provided!");
1561   return computeLoadStoreDefaultAlign(Ty, I->getParent());
1562 }
1563 
1564 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1565                    Instruction *InsertBef)
1566     : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1567 
1568 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1569                    BasicBlock *InsertAE)
1570     : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1571 
1572 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1573                    Instruction *InsertBef)
1574     : LoadInst(Ty, Ptr, Name, isVolatile,
1575                computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1576 
1577 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1578                    BasicBlock *InsertAE)
1579     : LoadInst(Ty, Ptr, Name, isVolatile,
1580                computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1581 
1582 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1583                    Align Align, Instruction *InsertBef)
1584     : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1585                SyncScope::System, InsertBef) {}
1586 
1587 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1588                    Align Align, BasicBlock *InsertAE)
1589     : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1590                SyncScope::System, InsertAE) {}
1591 
1592 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1593                    Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1594                    Instruction *InsertBef)
1595     : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1596   setVolatile(isVolatile);
1597   setAlignment(Align);
1598   setAtomic(Order, SSID);
1599   AssertOK();
1600   setName(Name);
1601 }
1602 
1603 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1604                    Align Align, AtomicOrdering Order, SyncScope::ID SSID,
1605                    BasicBlock *InsertAE)
1606     : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1607   setVolatile(isVolatile);
1608   setAlignment(Align);
1609   setAtomic(Order, SSID);
1610   AssertOK();
1611   setName(Name);
1612 }
1613 
1614 //===----------------------------------------------------------------------===//
1615 //                           StoreInst Implementation
1616 //===----------------------------------------------------------------------===//
1617 
1618 void StoreInst::AssertOK() {
1619   assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1620   assert(getOperand(1)->getType()->isPointerTy() &&
1621          "Ptr must have pointer type!");
1622 }
1623 
1624 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1625     : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1626 
1627 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1628     : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1629 
1630 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1631                      Instruction *InsertBefore)
1632     : StoreInst(val, addr, isVolatile,
1633                 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1634                 InsertBefore) {}
1635 
1636 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1637                      BasicBlock *InsertAtEnd)
1638     : StoreInst(val, addr, isVolatile,
1639                 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1640                 InsertAtEnd) {}
1641 
1642 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1643                      Instruction *InsertBefore)
1644     : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1645                 SyncScope::System, InsertBefore) {}
1646 
1647 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1648                      BasicBlock *InsertAtEnd)
1649     : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1650                 SyncScope::System, InsertAtEnd) {}
1651 
1652 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1653                      AtomicOrdering Order, SyncScope::ID SSID,
1654                      Instruction *InsertBefore)
1655     : Instruction(Type::getVoidTy(val->getContext()), Store,
1656                   OperandTraits<StoreInst>::op_begin(this),
1657                   OperandTraits<StoreInst>::operands(this), InsertBefore) {
1658   Op<0>() = val;
1659   Op<1>() = addr;
1660   setVolatile(isVolatile);
1661   setAlignment(Align);
1662   setAtomic(Order, SSID);
1663   AssertOK();
1664 }
1665 
1666 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1667                      AtomicOrdering Order, SyncScope::ID SSID,
1668                      BasicBlock *InsertAtEnd)
1669     : Instruction(Type::getVoidTy(val->getContext()), Store,
1670                   OperandTraits<StoreInst>::op_begin(this),
1671                   OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1672   Op<0>() = val;
1673   Op<1>() = addr;
1674   setVolatile(isVolatile);
1675   setAlignment(Align);
1676   setAtomic(Order, SSID);
1677   AssertOK();
1678 }
1679 
1680 
1681 //===----------------------------------------------------------------------===//
1682 //                       AtomicCmpXchgInst Implementation
1683 //===----------------------------------------------------------------------===//
1684 
1685 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1686                              Align Alignment, AtomicOrdering SuccessOrdering,
1687                              AtomicOrdering FailureOrdering,
1688                              SyncScope::ID SSID) {
1689   Op<0>() = Ptr;
1690   Op<1>() = Cmp;
1691   Op<2>() = NewVal;
1692   setSuccessOrdering(SuccessOrdering);
1693   setFailureOrdering(FailureOrdering);
1694   setSyncScopeID(SSID);
1695   setAlignment(Alignment);
1696 
1697   assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1698          "All operands must be non-null!");
1699   assert(getOperand(0)->getType()->isPointerTy() &&
1700          "Ptr must have pointer type!");
1701   assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1702          "Cmp type and NewVal type must be same!");
1703 }
1704 
1705 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1706                                      Align Alignment,
1707                                      AtomicOrdering SuccessOrdering,
1708                                      AtomicOrdering FailureOrdering,
1709                                      SyncScope::ID SSID,
1710                                      Instruction *InsertBefore)
1711     : Instruction(
1712           StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1713           AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1714           OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1715   Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1716 }
1717 
1718 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1719                                      Align Alignment,
1720                                      AtomicOrdering SuccessOrdering,
1721                                      AtomicOrdering FailureOrdering,
1722                                      SyncScope::ID SSID,
1723                                      BasicBlock *InsertAtEnd)
1724     : Instruction(
1725           StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1726           AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1727           OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1728   Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1729 }
1730 
1731 //===----------------------------------------------------------------------===//
1732 //                       AtomicRMWInst Implementation
1733 //===----------------------------------------------------------------------===//
1734 
1735 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1736                          Align Alignment, AtomicOrdering Ordering,
1737                          SyncScope::ID SSID) {
1738   assert(Ordering != AtomicOrdering::NotAtomic &&
1739          "atomicrmw instructions can only be atomic.");
1740   assert(Ordering != AtomicOrdering::Unordered &&
1741          "atomicrmw instructions cannot be unordered.");
1742   Op<0>() = Ptr;
1743   Op<1>() = Val;
1744   setOperation(Operation);
1745   setOrdering(Ordering);
1746   setSyncScopeID(SSID);
1747   setAlignment(Alignment);
1748 
1749   assert(getOperand(0) && getOperand(1) &&
1750          "All operands must be non-null!");
1751   assert(getOperand(0)->getType()->isPointerTy() &&
1752          "Ptr must have pointer type!");
1753   assert(Ordering != AtomicOrdering::NotAtomic &&
1754          "AtomicRMW instructions must be atomic!");
1755 }
1756 
1757 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1758                              Align Alignment, AtomicOrdering Ordering,
1759                              SyncScope::ID SSID, Instruction *InsertBefore)
1760     : Instruction(Val->getType(), AtomicRMW,
1761                   OperandTraits<AtomicRMWInst>::op_begin(this),
1762                   OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1763   Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1764 }
1765 
1766 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1767                              Align Alignment, AtomicOrdering Ordering,
1768                              SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1769     : Instruction(Val->getType(), AtomicRMW,
1770                   OperandTraits<AtomicRMWInst>::op_begin(this),
1771                   OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1772   Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1773 }
1774 
1775 StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1776   switch (Op) {
1777   case AtomicRMWInst::Xchg:
1778     return "xchg";
1779   case AtomicRMWInst::Add:
1780     return "add";
1781   case AtomicRMWInst::Sub:
1782     return "sub";
1783   case AtomicRMWInst::And:
1784     return "and";
1785   case AtomicRMWInst::Nand:
1786     return "nand";
1787   case AtomicRMWInst::Or:
1788     return "or";
1789   case AtomicRMWInst::Xor:
1790     return "xor";
1791   case AtomicRMWInst::Max:
1792     return "max";
1793   case AtomicRMWInst::Min:
1794     return "min";
1795   case AtomicRMWInst::UMax:
1796     return "umax";
1797   case AtomicRMWInst::UMin:
1798     return "umin";
1799   case AtomicRMWInst::FAdd:
1800     return "fadd";
1801   case AtomicRMWInst::FSub:
1802     return "fsub";
1803   case AtomicRMWInst::FMax:
1804     return "fmax";
1805   case AtomicRMWInst::FMin:
1806     return "fmin";
1807   case AtomicRMWInst::UIncWrap:
1808     return "uinc_wrap";
1809   case AtomicRMWInst::UDecWrap:
1810     return "udec_wrap";
1811   case AtomicRMWInst::BAD_BINOP:
1812     return "<invalid operation>";
1813   }
1814 
1815   llvm_unreachable("invalid atomicrmw operation");
1816 }
1817 
1818 //===----------------------------------------------------------------------===//
1819 //                       FenceInst Implementation
1820 //===----------------------------------------------------------------------===//
1821 
1822 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1823                      SyncScope::ID SSID,
1824                      Instruction *InsertBefore)
1825   : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1826   setOrdering(Ordering);
1827   setSyncScopeID(SSID);
1828 }
1829 
1830 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1831                      SyncScope::ID SSID,
1832                      BasicBlock *InsertAtEnd)
1833   : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1834   setOrdering(Ordering);
1835   setSyncScopeID(SSID);
1836 }
1837 
1838 //===----------------------------------------------------------------------===//
1839 //                       GetElementPtrInst Implementation
1840 //===----------------------------------------------------------------------===//
1841 
1842 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1843                              const Twine &Name) {
1844   assert(getNumOperands() == 1 + IdxList.size() &&
1845          "NumOperands not initialized?");
1846   Op<0>() = Ptr;
1847   llvm::copy(IdxList, op_begin() + 1);
1848   setName(Name);
1849 }
1850 
1851 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1852     : Instruction(GEPI.getType(), GetElementPtr,
1853                   OperandTraits<GetElementPtrInst>::op_end(this) -
1854                       GEPI.getNumOperands(),
1855                   GEPI.getNumOperands()),
1856       SourceElementType(GEPI.SourceElementType),
1857       ResultElementType(GEPI.ResultElementType) {
1858   std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1859   SubclassOptionalData = GEPI.SubclassOptionalData;
1860 }
1861 
1862 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
1863   if (auto *Struct = dyn_cast<StructType>(Ty)) {
1864     if (!Struct->indexValid(Idx))
1865       return nullptr;
1866     return Struct->getTypeAtIndex(Idx);
1867   }
1868   if (!Idx->getType()->isIntOrIntVectorTy())
1869     return nullptr;
1870   if (auto *Array = dyn_cast<ArrayType>(Ty))
1871     return Array->getElementType();
1872   if (auto *Vector = dyn_cast<VectorType>(Ty))
1873     return Vector->getElementType();
1874   return nullptr;
1875 }
1876 
1877 Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
1878   if (auto *Struct = dyn_cast<StructType>(Ty)) {
1879     if (Idx >= Struct->getNumElements())
1880       return nullptr;
1881     return Struct->getElementType(Idx);
1882   }
1883   if (auto *Array = dyn_cast<ArrayType>(Ty))
1884     return Array->getElementType();
1885   if (auto *Vector = dyn_cast<VectorType>(Ty))
1886     return Vector->getElementType();
1887   return nullptr;
1888 }
1889 
1890 template <typename IndexTy>
1891 static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
1892   if (IdxList.empty())
1893     return Ty;
1894   for (IndexTy V : IdxList.slice(1)) {
1895     Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
1896     if (!Ty)
1897       return Ty;
1898   }
1899   return Ty;
1900 }
1901 
1902 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1903   return getIndexedTypeInternal(Ty, IdxList);
1904 }
1905 
1906 Type *GetElementPtrInst::getIndexedType(Type *Ty,
1907                                         ArrayRef<Constant *> IdxList) {
1908   return getIndexedTypeInternal(Ty, IdxList);
1909 }
1910 
1911 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1912   return getIndexedTypeInternal(Ty, IdxList);
1913 }
1914 
1915 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1916 /// zeros.  If so, the result pointer and the first operand have the same
1917 /// value, just potentially different types.
1918 bool GetElementPtrInst::hasAllZeroIndices() const {
1919   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1920     if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1921       if (!CI->isZero()) return false;
1922     } else {
1923       return false;
1924     }
1925   }
1926   return true;
1927 }
1928 
1929 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1930 /// constant integers.  If so, the result pointer and the first operand have
1931 /// a constant offset between them.
1932 bool GetElementPtrInst::hasAllConstantIndices() const {
1933   for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1934     if (!isa<ConstantInt>(getOperand(i)))
1935       return false;
1936   }
1937   return true;
1938 }
1939 
1940 void GetElementPtrInst::setIsInBounds(bool B) {
1941   cast<GEPOperator>(this)->setIsInBounds(B);
1942 }
1943 
1944 bool GetElementPtrInst::isInBounds() const {
1945   return cast<GEPOperator>(this)->isInBounds();
1946 }
1947 
1948 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1949                                                  APInt &Offset) const {
1950   // Delegate to the generic GEPOperator implementation.
1951   return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1952 }
1953 
1954 bool GetElementPtrInst::collectOffset(
1955     const DataLayout &DL, unsigned BitWidth,
1956     MapVector<Value *, APInt> &VariableOffsets,
1957     APInt &ConstantOffset) const {
1958   // Delegate to the generic GEPOperator implementation.
1959   return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
1960                                                 ConstantOffset);
1961 }
1962 
1963 //===----------------------------------------------------------------------===//
1964 //                           ExtractElementInst Implementation
1965 //===----------------------------------------------------------------------===//
1966 
1967 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1968                                        const Twine &Name,
1969                                        Instruction *InsertBef)
1970   : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1971                 ExtractElement,
1972                 OperandTraits<ExtractElementInst>::op_begin(this),
1973                 2, InsertBef) {
1974   assert(isValidOperands(Val, Index) &&
1975          "Invalid extractelement instruction operands!");
1976   Op<0>() = Val;
1977   Op<1>() = Index;
1978   setName(Name);
1979 }
1980 
1981 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1982                                        const Twine &Name,
1983                                        BasicBlock *InsertAE)
1984   : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1985                 ExtractElement,
1986                 OperandTraits<ExtractElementInst>::op_begin(this),
1987                 2, InsertAE) {
1988   assert(isValidOperands(Val, Index) &&
1989          "Invalid extractelement instruction operands!");
1990 
1991   Op<0>() = Val;
1992   Op<1>() = Index;
1993   setName(Name);
1994 }
1995 
1996 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1997   if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1998     return false;
1999   return true;
2000 }
2001 
2002 //===----------------------------------------------------------------------===//
2003 //                           InsertElementInst Implementation
2004 //===----------------------------------------------------------------------===//
2005 
2006 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2007                                      const Twine &Name,
2008                                      Instruction *InsertBef)
2009   : Instruction(Vec->getType(), InsertElement,
2010                 OperandTraits<InsertElementInst>::op_begin(this),
2011                 3, InsertBef) {
2012   assert(isValidOperands(Vec, Elt, Index) &&
2013          "Invalid insertelement instruction operands!");
2014   Op<0>() = Vec;
2015   Op<1>() = Elt;
2016   Op<2>() = Index;
2017   setName(Name);
2018 }
2019 
2020 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2021                                      const Twine &Name,
2022                                      BasicBlock *InsertAE)
2023   : Instruction(Vec->getType(), InsertElement,
2024                 OperandTraits<InsertElementInst>::op_begin(this),
2025                 3, InsertAE) {
2026   assert(isValidOperands(Vec, Elt, Index) &&
2027          "Invalid insertelement instruction operands!");
2028 
2029   Op<0>() = Vec;
2030   Op<1>() = Elt;
2031   Op<2>() = Index;
2032   setName(Name);
2033 }
2034 
2035 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
2036                                         const Value *Index) {
2037   if (!Vec->getType()->isVectorTy())
2038     return false;   // First operand of insertelement must be vector type.
2039 
2040   if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2041     return false;// Second operand of insertelement must be vector element type.
2042 
2043   if (!Index->getType()->isIntegerTy())
2044     return false;  // Third operand of insertelement must be i32.
2045   return true;
2046 }
2047 
2048 //===----------------------------------------------------------------------===//
2049 //                      ShuffleVectorInst Implementation
2050 //===----------------------------------------------------------------------===//
2051 
2052 static Value *createPlaceholderForShuffleVector(Value *V) {
2053   assert(V && "Cannot create placeholder of nullptr V");
2054   return PoisonValue::get(V->getType());
2055 }
2056 
2057 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
2058                                      Instruction *InsertBefore)
2059     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
2060                         InsertBefore) {}
2061 
2062 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
2063                                      BasicBlock *InsertAtEnd)
2064     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
2065                         InsertAtEnd) {}
2066 
2067 ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
2068                                      const Twine &Name,
2069                                      Instruction *InsertBefore)
2070     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
2071                         InsertBefore) {}
2072 
2073 ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
2074                                      const Twine &Name, BasicBlock *InsertAtEnd)
2075     : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
2076                         InsertAtEnd) {}
2077 
2078 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2079                                      const Twine &Name,
2080                                      Instruction *InsertBefore)
2081     : Instruction(
2082           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2083                           cast<VectorType>(Mask->getType())->getElementCount()),
2084           ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2085           OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2086   assert(isValidOperands(V1, V2, Mask) &&
2087          "Invalid shuffle vector instruction operands!");
2088 
2089   Op<0>() = V1;
2090   Op<1>() = V2;
2091   SmallVector<int, 16> MaskArr;
2092   getShuffleMask(cast<Constant>(Mask), MaskArr);
2093   setShuffleMask(MaskArr);
2094   setName(Name);
2095 }
2096 
2097 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2098                                      const Twine &Name, BasicBlock *InsertAtEnd)
2099     : Instruction(
2100           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2101                           cast<VectorType>(Mask->getType())->getElementCount()),
2102           ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2103           OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2104   assert(isValidOperands(V1, V2, Mask) &&
2105          "Invalid shuffle vector instruction operands!");
2106 
2107   Op<0>() = V1;
2108   Op<1>() = V2;
2109   SmallVector<int, 16> MaskArr;
2110   getShuffleMask(cast<Constant>(Mask), MaskArr);
2111   setShuffleMask(MaskArr);
2112   setName(Name);
2113 }
2114 
2115 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2116                                      const Twine &Name,
2117                                      Instruction *InsertBefore)
2118     : Instruction(
2119           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2120                           Mask.size(), isa<ScalableVectorType>(V1->getType())),
2121           ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2122           OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2123   assert(isValidOperands(V1, V2, Mask) &&
2124          "Invalid shuffle vector instruction operands!");
2125   Op<0>() = V1;
2126   Op<1>() = V2;
2127   setShuffleMask(Mask);
2128   setName(Name);
2129 }
2130 
2131 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2132                                      const Twine &Name, BasicBlock *InsertAtEnd)
2133     : Instruction(
2134           VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2135                           Mask.size(), isa<ScalableVectorType>(V1->getType())),
2136           ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2137           OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2138   assert(isValidOperands(V1, V2, Mask) &&
2139          "Invalid shuffle vector instruction operands!");
2140 
2141   Op<0>() = V1;
2142   Op<1>() = V2;
2143   setShuffleMask(Mask);
2144   setName(Name);
2145 }
2146 
2147 void ShuffleVectorInst::commute() {
2148   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2149   int NumMaskElts = ShuffleMask.size();
2150   SmallVector<int, 16> NewMask(NumMaskElts);
2151   for (int i = 0; i != NumMaskElts; ++i) {
2152     int MaskElt = getMaskValue(i);
2153     if (MaskElt == PoisonMaskElem) {
2154       NewMask[i] = PoisonMaskElem;
2155       continue;
2156     }
2157     assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2158     MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2159     NewMask[i] = MaskElt;
2160   }
2161   setShuffleMask(NewMask);
2162   Op<0>().swap(Op<1>());
2163 }
2164 
2165 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
2166                                         ArrayRef<int> Mask) {
2167   // V1 and V2 must be vectors of the same type.
2168   if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2169     return false;
2170 
2171   // Make sure the mask elements make sense.
2172   int V1Size =
2173       cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2174   for (int Elem : Mask)
2175     if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2176       return false;
2177 
2178   if (isa<ScalableVectorType>(V1->getType()))
2179     if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2180       return false;
2181 
2182   return true;
2183 }
2184 
2185 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
2186                                         const Value *Mask) {
2187   // V1 and V2 must be vectors of the same type.
2188   if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2189     return false;
2190 
2191   // Mask must be vector of i32, and must be the same kind of vector as the
2192   // input vectors
2193   auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2194   if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2195       isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2196     return false;
2197 
2198   // Check to see if Mask is valid.
2199   if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2200     return true;
2201 
2202   if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2203     unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2204     for (Value *Op : MV->operands()) {
2205       if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2206         if (CI->uge(V1Size*2))
2207           return false;
2208       } else if (!isa<UndefValue>(Op)) {
2209         return false;
2210       }
2211     }
2212     return true;
2213   }
2214 
2215   if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2216     unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2217     for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2218          i != e; ++i)
2219       if (CDS->getElementAsInteger(i) >= V1Size*2)
2220         return false;
2221     return true;
2222   }
2223 
2224   return false;
2225 }
2226 
2227 void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
2228                                        SmallVectorImpl<int> &Result) {
2229   ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2230 
2231   if (isa<ConstantAggregateZero>(Mask)) {
2232     Result.resize(EC.getKnownMinValue(), 0);
2233     return;
2234   }
2235 
2236   Result.reserve(EC.getKnownMinValue());
2237 
2238   if (EC.isScalable()) {
2239     assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2240            "Scalable vector shuffle mask must be undef or zeroinitializer");
2241     int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2242     for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2243       Result.emplace_back(MaskVal);
2244     return;
2245   }
2246 
2247   unsigned NumElts = EC.getKnownMinValue();
2248 
2249   if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2250     for (unsigned i = 0; i != NumElts; ++i)
2251       Result.push_back(CDS->getElementAsInteger(i));
2252     return;
2253   }
2254   for (unsigned i = 0; i != NumElts; ++i) {
2255     Constant *C = Mask->getAggregateElement(i);
2256     Result.push_back(isa<UndefValue>(C) ? -1 :
2257                      cast<ConstantInt>(C)->getZExtValue());
2258   }
2259 }
2260 
2261 void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
2262   ShuffleMask.assign(Mask.begin(), Mask.end());
2263   ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2264 }
2265 
2266 Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2267                                                           Type *ResultTy) {
2268   Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2269   if (isa<ScalableVectorType>(ResultTy)) {
2270     assert(all_equal(Mask) && "Unexpected shuffle");
2271     Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2272     if (Mask[0] == 0)
2273       return Constant::getNullValue(VecTy);
2274     return UndefValue::get(VecTy);
2275   }
2276   SmallVector<Constant *, 16> MaskConst;
2277   for (int Elem : Mask) {
2278     if (Elem == PoisonMaskElem)
2279       MaskConst.push_back(PoisonValue::get(Int32Ty));
2280     else
2281       MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2282   }
2283   return ConstantVector::get(MaskConst);
2284 }
2285 
2286 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2287   assert(!Mask.empty() && "Shuffle mask must contain elements");
2288   bool UsesLHS = false;
2289   bool UsesRHS = false;
2290   for (int I : Mask) {
2291     if (I == -1)
2292       continue;
2293     assert(I >= 0 && I < (NumOpElts * 2) &&
2294            "Out-of-bounds shuffle mask element");
2295     UsesLHS |= (I < NumOpElts);
2296     UsesRHS |= (I >= NumOpElts);
2297     if (UsesLHS && UsesRHS)
2298       return false;
2299   }
2300   // Allow for degenerate case: completely undef mask means neither source is used.
2301   return UsesLHS || UsesRHS;
2302 }
2303 
2304 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
2305   // We don't have vector operand size information, so assume operands are the
2306   // same size as the mask.
2307   return isSingleSourceMaskImpl(Mask, Mask.size());
2308 }
2309 
2310 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2311   if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2312     return false;
2313   for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2314     if (Mask[i] == -1)
2315       continue;
2316     if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2317       return false;
2318   }
2319   return true;
2320 }
2321 
2322 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
2323   // We don't have vector operand size information, so assume operands are the
2324   // same size as the mask.
2325   return isIdentityMaskImpl(Mask, Mask.size());
2326 }
2327 
2328 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
2329   if (!isSingleSourceMask(Mask))
2330     return false;
2331 
2332   // The number of elements in the mask must be at least 2.
2333   int NumElts = Mask.size();
2334   if (NumElts < 2)
2335     return false;
2336 
2337   for (int i = 0; i < NumElts; ++i) {
2338     if (Mask[i] == -1)
2339       continue;
2340     if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
2341       return false;
2342   }
2343   return true;
2344 }
2345 
2346 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) {
2347   if (!isSingleSourceMask(Mask))
2348     return false;
2349   for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2350     if (Mask[i] == -1)
2351       continue;
2352     if (Mask[i] != 0 && Mask[i] != NumElts)
2353       return false;
2354   }
2355   return true;
2356 }
2357 
2358 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) {
2359   // Select is differentiated from identity. It requires using both sources.
2360   if (isSingleSourceMask(Mask))
2361     return false;
2362   for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
2363     if (Mask[i] == -1)
2364       continue;
2365     if (Mask[i] != i && Mask[i] != (NumElts + i))
2366       return false;
2367   }
2368   return true;
2369 }
2370 
2371 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
2372   // Example masks that will return true:
2373   // v1 = <a, b, c, d>
2374   // v2 = <e, f, g, h>
2375   // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2376   // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2377 
2378   // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2379   int NumElts = Mask.size();
2380   if (NumElts < 2 || !isPowerOf2_32(NumElts))
2381     return false;
2382 
2383   // 2. The first element of the mask must be either a 0 or a 1.
2384   if (Mask[0] != 0 && Mask[0] != 1)
2385     return false;
2386 
2387   // 3. The difference between the first 2 elements must be equal to the
2388   // number of elements in the mask.
2389   if ((Mask[1] - Mask[0]) != NumElts)
2390     return false;
2391 
2392   // 4. The difference between consecutive even-numbered and odd-numbered
2393   // elements must be equal to 2.
2394   for (int i = 2; i < NumElts; ++i) {
2395     int MaskEltVal = Mask[i];
2396     if (MaskEltVal == -1)
2397       return false;
2398     int MaskEltPrevVal = Mask[i - 2];
2399     if (MaskEltVal - MaskEltPrevVal != 2)
2400       return false;
2401   }
2402   return true;
2403 }
2404 
2405 bool ShuffleVectorInst::isSpliceMask(ArrayRef<int> Mask, int &Index) {
2406   // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2407   int StartIndex = -1;
2408   for (int I = 0, E = Mask.size(); I != E; ++I) {
2409     int MaskEltVal = Mask[I];
2410     if (MaskEltVal == -1)
2411       continue;
2412 
2413     if (StartIndex == -1) {
2414       // Don't support a StartIndex that begins in the second input, or if the
2415       // first non-undef index would access below the StartIndex.
2416       if (MaskEltVal < I || E <= (MaskEltVal - I))
2417         return false;
2418 
2419       StartIndex = MaskEltVal - I;
2420       continue;
2421     }
2422 
2423     // Splice is sequential starting from StartIndex.
2424     if (MaskEltVal != (StartIndex + I))
2425       return false;
2426   }
2427 
2428   if (StartIndex == -1)
2429     return false;
2430 
2431   // NOTE: This accepts StartIndex == 0 (COPY).
2432   Index = StartIndex;
2433   return true;
2434 }
2435 
2436 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
2437                                                int NumSrcElts, int &Index) {
2438   // Must extract from a single source.
2439   if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2440     return false;
2441 
2442   // Must be smaller (else this is an Identity shuffle).
2443   if (NumSrcElts <= (int)Mask.size())
2444     return false;
2445 
2446   // Find start of extraction, accounting that we may start with an UNDEF.
2447   int SubIndex = -1;
2448   for (int i = 0, e = Mask.size(); i != e; ++i) {
2449     int M = Mask[i];
2450     if (M < 0)
2451       continue;
2452     int Offset = (M % NumSrcElts) - i;
2453     if (0 <= SubIndex && SubIndex != Offset)
2454       return false;
2455     SubIndex = Offset;
2456   }
2457 
2458   if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2459     Index = SubIndex;
2460     return true;
2461   }
2462   return false;
2463 }
2464 
2465 bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
2466                                               int NumSrcElts, int &NumSubElts,
2467                                               int &Index) {
2468   int NumMaskElts = Mask.size();
2469 
2470   // Don't try to match if we're shuffling to a smaller size.
2471   if (NumMaskElts < NumSrcElts)
2472     return false;
2473 
2474   // TODO: We don't recognize self-insertion/widening.
2475   if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2476     return false;
2477 
2478   // Determine which mask elements are attributed to which source.
2479   APInt UndefElts = APInt::getZero(NumMaskElts);
2480   APInt Src0Elts = APInt::getZero(NumMaskElts);
2481   APInt Src1Elts = APInt::getZero(NumMaskElts);
2482   bool Src0Identity = true;
2483   bool Src1Identity = true;
2484 
2485   for (int i = 0; i != NumMaskElts; ++i) {
2486     int M = Mask[i];
2487     if (M < 0) {
2488       UndefElts.setBit(i);
2489       continue;
2490     }
2491     if (M < NumSrcElts) {
2492       Src0Elts.setBit(i);
2493       Src0Identity &= (M == i);
2494       continue;
2495     }
2496     Src1Elts.setBit(i);
2497     Src1Identity &= (M == (i + NumSrcElts));
2498   }
2499   assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2500          "unknown shuffle elements");
2501   assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2502          "2-source shuffle not found");
2503 
2504   // Determine lo/hi span ranges.
2505   // TODO: How should we handle undefs at the start of subvector insertions?
2506   int Src0Lo = Src0Elts.countr_zero();
2507   int Src1Lo = Src1Elts.countr_zero();
2508   int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2509   int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2510 
2511   // If src0 is in place, see if the src1 elements is inplace within its own
2512   // span.
2513   if (Src0Identity) {
2514     int NumSub1Elts = Src1Hi - Src1Lo;
2515     ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2516     if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2517       NumSubElts = NumSub1Elts;
2518       Index = Src1Lo;
2519       return true;
2520     }
2521   }
2522 
2523   // If src1 is in place, see if the src0 elements is inplace within its own
2524   // span.
2525   if (Src1Identity) {
2526     int NumSub0Elts = Src0Hi - Src0Lo;
2527     ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2528     if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2529       NumSubElts = NumSub0Elts;
2530       Index = Src0Lo;
2531       return true;
2532     }
2533   }
2534 
2535   return false;
2536 }
2537 
2538 bool ShuffleVectorInst::isIdentityWithPadding() const {
2539   if (isa<UndefValue>(Op<2>()))
2540     return false;
2541 
2542   // FIXME: Not currently possible to express a shuffle mask for a scalable
2543   // vector for this case.
2544   if (isa<ScalableVectorType>(getType()))
2545     return false;
2546 
2547   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2548   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2549   if (NumMaskElts <= NumOpElts)
2550     return false;
2551 
2552   // The first part of the mask must choose elements from exactly 1 source op.
2553   ArrayRef<int> Mask = getShuffleMask();
2554   if (!isIdentityMaskImpl(Mask, NumOpElts))
2555     return false;
2556 
2557   // All extending must be with undef elements.
2558   for (int i = NumOpElts; i < NumMaskElts; ++i)
2559     if (Mask[i] != -1)
2560       return false;
2561 
2562   return true;
2563 }
2564 
2565 bool ShuffleVectorInst::isIdentityWithExtract() const {
2566   if (isa<UndefValue>(Op<2>()))
2567     return false;
2568 
2569   // FIXME: Not currently possible to express a shuffle mask for a scalable
2570   // vector for this case.
2571   if (isa<ScalableVectorType>(getType()))
2572     return false;
2573 
2574   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2575   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2576   if (NumMaskElts >= NumOpElts)
2577     return false;
2578 
2579   return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2580 }
2581 
2582 bool ShuffleVectorInst::isConcat() const {
2583   // Vector concatenation is differentiated from identity with padding.
2584   if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
2585       isa<UndefValue>(Op<2>()))
2586     return false;
2587 
2588   // FIXME: Not currently possible to express a shuffle mask for a scalable
2589   // vector for this case.
2590   if (isa<ScalableVectorType>(getType()))
2591     return false;
2592 
2593   int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2594   int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2595   if (NumMaskElts != NumOpElts * 2)
2596     return false;
2597 
2598   // Use the mask length rather than the operands' vector lengths here. We
2599   // already know that the shuffle returns a vector twice as long as the inputs,
2600   // and neither of the inputs are undef vectors. If the mask picks consecutive
2601   // elements from both inputs, then this is a concatenation of the inputs.
2602   return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2603 }
2604 
2605 static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
2606                                         int ReplicationFactor, int VF) {
2607   assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2608          "Unexpected mask size.");
2609 
2610   for (int CurrElt : seq(0, VF)) {
2611     ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2612     assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2613            "Run out of mask?");
2614     Mask = Mask.drop_front(ReplicationFactor);
2615     if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2616           return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2617         }))
2618       return false;
2619   }
2620   assert(Mask.empty() && "Did not consume the whole mask?");
2621 
2622   return true;
2623 }
2624 
2625 bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
2626                                           int &ReplicationFactor, int &VF) {
2627   // undef-less case is trivial.
2628   if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2629     ReplicationFactor =
2630         Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2631     if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2632       return false;
2633     VF = Mask.size() / ReplicationFactor;
2634     return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2635   }
2636 
2637   // However, if the mask contains undef's, we have to enumerate possible tuples
2638   // and pick one. There are bounds on replication factor: [1, mask size]
2639   // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2640   // Additionally, mask size is a replication factor multiplied by vector size,
2641   // which further significantly reduces the search space.
2642 
2643   // Before doing that, let's perform basic correctness checking first.
2644   int Largest = -1;
2645   for (int MaskElt : Mask) {
2646     if (MaskElt == PoisonMaskElem)
2647       continue;
2648     // Elements must be in non-decreasing order.
2649     if (MaskElt < Largest)
2650       return false;
2651     Largest = std::max(Largest, MaskElt);
2652   }
2653 
2654   // Prefer larger replication factor if all else equal.
2655   for (int PossibleReplicationFactor :
2656        reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2657     if (Mask.size() % PossibleReplicationFactor != 0)
2658       continue;
2659     int PossibleVF = Mask.size() / PossibleReplicationFactor;
2660     if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2661                                      PossibleVF))
2662       continue;
2663     ReplicationFactor = PossibleReplicationFactor;
2664     VF = PossibleVF;
2665     return true;
2666   }
2667 
2668   return false;
2669 }
2670 
2671 bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2672                                           int &VF) const {
2673   // Not possible to express a shuffle mask for a scalable vector for this
2674   // case.
2675   if (isa<ScalableVectorType>(getType()))
2676     return false;
2677 
2678   VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2679   if (ShuffleMask.size() % VF != 0)
2680     return false;
2681   ReplicationFactor = ShuffleMask.size() / VF;
2682 
2683   return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2684 }
2685 
2686 bool ShuffleVectorInst::isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF) {
2687   if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2688       Mask.size() % VF != 0)
2689     return false;
2690   for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2691     ArrayRef<int> SubMask = Mask.slice(K, VF);
2692     if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2693       continue;
2694     SmallBitVector Used(VF, false);
2695     for_each(SubMask, [&Used, VF](int Idx) {
2696       if (Idx != PoisonMaskElem && Idx < VF)
2697         Used.set(Idx);
2698     });
2699     if (!Used.all())
2700       return false;
2701   }
2702   return true;
2703 }
2704 
2705 /// Return true if this shuffle mask is a replication mask.
2706 bool ShuffleVectorInst::isOneUseSingleSourceMask(int VF) const {
2707   // Not possible to express a shuffle mask for a scalable vector for this
2708   // case.
2709   if (isa<ScalableVectorType>(getType()))
2710     return false;
2711   if (!isSingleSourceMask(ShuffleMask))
2712     return false;
2713 
2714   return isOneUseSingleSourceMask(ShuffleMask, VF);
2715 }
2716 
2717 bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2718   FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2719   // shuffle_vector can only interleave fixed length vectors - for scalable
2720   // vectors, see the @llvm.experimental.vector.interleave2 intrinsic
2721   if (!OpTy)
2722     return false;
2723   unsigned OpNumElts = OpTy->getNumElements();
2724 
2725   return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2726 }
2727 
2728 bool ShuffleVectorInst::isInterleaveMask(
2729     ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2730     SmallVectorImpl<unsigned> &StartIndexes) {
2731   unsigned NumElts = Mask.size();
2732   if (NumElts % Factor)
2733     return false;
2734 
2735   unsigned LaneLen = NumElts / Factor;
2736   if (!isPowerOf2_32(LaneLen))
2737     return false;
2738 
2739   StartIndexes.resize(Factor);
2740 
2741   // Check whether each element matches the general interleaved rule.
2742   // Ignore undef elements, as long as the defined elements match the rule.
2743   // Outer loop processes all factors (x, y, z in the above example)
2744   unsigned I = 0, J;
2745   for (; I < Factor; I++) {
2746     unsigned SavedLaneValue;
2747     unsigned SavedNoUndefs = 0;
2748 
2749     // Inner loop processes consecutive accesses (x, x+1... in the example)
2750     for (J = 0; J < LaneLen - 1; J++) {
2751       // Lane computes x's position in the Mask
2752       unsigned Lane = J * Factor + I;
2753       unsigned NextLane = Lane + Factor;
2754       int LaneValue = Mask[Lane];
2755       int NextLaneValue = Mask[NextLane];
2756 
2757       // If both are defined, values must be sequential
2758       if (LaneValue >= 0 && NextLaneValue >= 0 &&
2759           LaneValue + 1 != NextLaneValue)
2760         break;
2761 
2762       // If the next value is undef, save the current one as reference
2763       if (LaneValue >= 0 && NextLaneValue < 0) {
2764         SavedLaneValue = LaneValue;
2765         SavedNoUndefs = 1;
2766       }
2767 
2768       // Undefs are allowed, but defined elements must still be consecutive:
2769       // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2770       // Verify this by storing the last non-undef followed by an undef
2771       // Check that following non-undef masks are incremented with the
2772       // corresponding distance.
2773       if (SavedNoUndefs > 0 && LaneValue < 0) {
2774         SavedNoUndefs++;
2775         if (NextLaneValue >= 0 &&
2776             SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2777           break;
2778       }
2779     }
2780 
2781     if (J < LaneLen - 1)
2782       return false;
2783 
2784     int StartMask = 0;
2785     if (Mask[I] >= 0) {
2786       // Check that the start of the I range (J=0) is greater than 0
2787       StartMask = Mask[I];
2788     } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2789       // StartMask defined by the last value in lane
2790       StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2791     } else if (SavedNoUndefs > 0) {
2792       // StartMask defined by some non-zero value in the j loop
2793       StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2794     }
2795     // else StartMask remains set to 0, i.e. all elements are undefs
2796 
2797     if (StartMask < 0)
2798       return false;
2799     // We must stay within the vectors; This case can happen with undefs.
2800     if (StartMask + LaneLen > NumInputElts)
2801       return false;
2802 
2803     StartIndexes[I] = StartMask;
2804   }
2805 
2806   return true;
2807 }
2808 
2809 //===----------------------------------------------------------------------===//
2810 //                             InsertValueInst Class
2811 //===----------------------------------------------------------------------===//
2812 
2813 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2814                            const Twine &Name) {
2815   assert(getNumOperands() == 2 && "NumOperands not initialized?");
2816 
2817   // There's no fundamental reason why we require at least one index
2818   // (other than weirdness with &*IdxBegin being invalid; see
2819   // getelementptr's init routine for example). But there's no
2820   // present need to support it.
2821   assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2822 
2823   assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2824          Val->getType() && "Inserted value must match indexed type!");
2825   Op<0>() = Agg;
2826   Op<1>() = Val;
2827 
2828   Indices.append(Idxs.begin(), Idxs.end());
2829   setName(Name);
2830 }
2831 
2832 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2833   : Instruction(IVI.getType(), InsertValue,
2834                 OperandTraits<InsertValueInst>::op_begin(this), 2),
2835     Indices(IVI.Indices) {
2836   Op<0>() = IVI.getOperand(0);
2837   Op<1>() = IVI.getOperand(1);
2838   SubclassOptionalData = IVI.SubclassOptionalData;
2839 }
2840 
2841 //===----------------------------------------------------------------------===//
2842 //                             ExtractValueInst Class
2843 //===----------------------------------------------------------------------===//
2844 
2845 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2846   assert(getNumOperands() == 1 && "NumOperands not initialized?");
2847 
2848   // There's no fundamental reason why we require at least one index.
2849   // But there's no present need to support it.
2850   assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2851 
2852   Indices.append(Idxs.begin(), Idxs.end());
2853   setName(Name);
2854 }
2855 
2856 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2857   : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2858     Indices(EVI.Indices) {
2859   SubclassOptionalData = EVI.SubclassOptionalData;
2860 }
2861 
2862 // getIndexedType - Returns the type of the element that would be extracted
2863 // with an extractvalue instruction with the specified parameters.
2864 //
2865 // A null type is returned if the indices are invalid for the specified
2866 // pointer type.
2867 //
2868 Type *ExtractValueInst::getIndexedType(Type *Agg,
2869                                        ArrayRef<unsigned> Idxs) {
2870   for (unsigned Index : Idxs) {
2871     // We can't use CompositeType::indexValid(Index) here.
2872     // indexValid() always returns true for arrays because getelementptr allows
2873     // out-of-bounds indices. Since we don't allow those for extractvalue and
2874     // insertvalue we need to check array indexing manually.
2875     // Since the only other types we can index into are struct types it's just
2876     // as easy to check those manually as well.
2877     if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2878       if (Index >= AT->getNumElements())
2879         return nullptr;
2880       Agg = AT->getElementType();
2881     } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2882       if (Index >= ST->getNumElements())
2883         return nullptr;
2884       Agg = ST->getElementType(Index);
2885     } else {
2886       // Not a valid type to index into.
2887       return nullptr;
2888     }
2889   }
2890   return const_cast<Type*>(Agg);
2891 }
2892 
2893 //===----------------------------------------------------------------------===//
2894 //                             UnaryOperator Class
2895 //===----------------------------------------------------------------------===//
2896 
2897 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
2898                              Type *Ty, const Twine &Name,
2899                              Instruction *InsertBefore)
2900   : UnaryInstruction(Ty, iType, S, InsertBefore) {
2901   Op<0>() = S;
2902   setName(Name);
2903   AssertOK();
2904 }
2905 
2906 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
2907                              Type *Ty, const Twine &Name,
2908                              BasicBlock *InsertAtEnd)
2909   : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2910   Op<0>() = S;
2911   setName(Name);
2912   AssertOK();
2913 }
2914 
2915 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
2916                                      const Twine &Name,
2917                                      Instruction *InsertBefore) {
2918   return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2919 }
2920 
2921 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
2922                                      const Twine &Name,
2923                                      BasicBlock *InsertAtEnd) {
2924   UnaryOperator *Res = Create(Op, S, Name);
2925   Res->insertInto(InsertAtEnd, InsertAtEnd->end());
2926   return Res;
2927 }
2928 
2929 void UnaryOperator::AssertOK() {
2930   Value *LHS = getOperand(0);
2931   (void)LHS; // Silence warnings.
2932 #ifndef NDEBUG
2933   switch (getOpcode()) {
2934   case FNeg:
2935     assert(getType() == LHS->getType() &&
2936            "Unary operation should return same type as operand!");
2937     assert(getType()->isFPOrFPVectorTy() &&
2938            "Tried to create a floating-point operation on a "
2939            "non-floating-point type!");
2940     break;
2941   default: llvm_unreachable("Invalid opcode provided");
2942   }
2943 #endif
2944 }
2945 
2946 //===----------------------------------------------------------------------===//
2947 //                             BinaryOperator Class
2948 //===----------------------------------------------------------------------===//
2949 
2950 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2951                                Type *Ty, const Twine &Name,
2952                                Instruction *InsertBefore)
2953   : Instruction(Ty, iType,
2954                 OperandTraits<BinaryOperator>::op_begin(this),
2955                 OperandTraits<BinaryOperator>::operands(this),
2956                 InsertBefore) {
2957   Op<0>() = S1;
2958   Op<1>() = S2;
2959   setName(Name);
2960   AssertOK();
2961 }
2962 
2963 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2964                                Type *Ty, const Twine &Name,
2965                                BasicBlock *InsertAtEnd)
2966   : Instruction(Ty, iType,
2967                 OperandTraits<BinaryOperator>::op_begin(this),
2968                 OperandTraits<BinaryOperator>::operands(this),
2969                 InsertAtEnd) {
2970   Op<0>() = S1;
2971   Op<1>() = S2;
2972   setName(Name);
2973   AssertOK();
2974 }
2975 
2976 void BinaryOperator::AssertOK() {
2977   Value *LHS = getOperand(0), *RHS = getOperand(1);
2978   (void)LHS; (void)RHS; // Silence warnings.
2979   assert(LHS->getType() == RHS->getType() &&
2980          "Binary operator operand types must match!");
2981 #ifndef NDEBUG
2982   switch (getOpcode()) {
2983   case Add: case Sub:
2984   case Mul:
2985     assert(getType() == LHS->getType() &&
2986            "Arithmetic operation should return same type as operands!");
2987     assert(getType()->isIntOrIntVectorTy() &&
2988            "Tried to create an integer operation on a non-integer type!");
2989     break;
2990   case FAdd: case FSub:
2991   case FMul:
2992     assert(getType() == LHS->getType() &&
2993            "Arithmetic operation should return same type as operands!");
2994     assert(getType()->isFPOrFPVectorTy() &&
2995            "Tried to create a floating-point operation on a "
2996            "non-floating-point type!");
2997     break;
2998   case UDiv:
2999   case SDiv:
3000     assert(getType() == LHS->getType() &&
3001            "Arithmetic operation should return same type as operands!");
3002     assert(getType()->isIntOrIntVectorTy() &&
3003            "Incorrect operand type (not integer) for S/UDIV");
3004     break;
3005   case FDiv:
3006     assert(getType() == LHS->getType() &&
3007            "Arithmetic operation should return same type as operands!");
3008     assert(getType()->isFPOrFPVectorTy() &&
3009            "Incorrect operand type (not floating point) for FDIV");
3010     break;
3011   case URem:
3012   case SRem:
3013     assert(getType() == LHS->getType() &&
3014            "Arithmetic operation should return same type as operands!");
3015     assert(getType()->isIntOrIntVectorTy() &&
3016            "Incorrect operand type (not integer) for S/UREM");
3017     break;
3018   case FRem:
3019     assert(getType() == LHS->getType() &&
3020            "Arithmetic operation should return same type as operands!");
3021     assert(getType()->isFPOrFPVectorTy() &&
3022            "Incorrect operand type (not floating point) for FREM");
3023     break;
3024   case Shl:
3025   case LShr:
3026   case AShr:
3027     assert(getType() == LHS->getType() &&
3028            "Shift operation should return same type as operands!");
3029     assert(getType()->isIntOrIntVectorTy() &&
3030            "Tried to create a shift operation on a non-integral type!");
3031     break;
3032   case And: case Or:
3033   case Xor:
3034     assert(getType() == LHS->getType() &&
3035            "Logical operation should return same type as operands!");
3036     assert(getType()->isIntOrIntVectorTy() &&
3037            "Tried to create a logical operation on a non-integral type!");
3038     break;
3039   default: llvm_unreachable("Invalid opcode provided");
3040   }
3041 #endif
3042 }
3043 
3044 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
3045                                        const Twine &Name,
3046                                        Instruction *InsertBefore) {
3047   assert(S1->getType() == S2->getType() &&
3048          "Cannot create binary operator with two operands of differing type!");
3049   return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3050 }
3051 
3052 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
3053                                        const Twine &Name,
3054                                        BasicBlock *InsertAtEnd) {
3055   BinaryOperator *Res = Create(Op, S1, S2, Name);
3056   Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3057   return Res;
3058 }
3059 
3060 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
3061                                           Instruction *InsertBefore) {
3062   Value *Zero = ConstantInt::get(Op->getType(), 0);
3063   return new BinaryOperator(Instruction::Sub,
3064                             Zero, Op,
3065                             Op->getType(), Name, InsertBefore);
3066 }
3067 
3068 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
3069                                           BasicBlock *InsertAtEnd) {
3070   Value *Zero = ConstantInt::get(Op->getType(), 0);
3071   return new BinaryOperator(Instruction::Sub,
3072                             Zero, Op,
3073                             Op->getType(), Name, InsertAtEnd);
3074 }
3075 
3076 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
3077                                              Instruction *InsertBefore) {
3078   Value *Zero = ConstantInt::get(Op->getType(), 0);
3079   return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
3080 }
3081 
3082 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
3083                                              BasicBlock *InsertAtEnd) {
3084   Value *Zero = ConstantInt::get(Op->getType(), 0);
3085   return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3086 }
3087 
3088 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
3089                                              Instruction *InsertBefore) {
3090   Value *Zero = ConstantInt::get(Op->getType(), 0);
3091   return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertBefore);
3092 }
3093 
3094 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
3095                                              BasicBlock *InsertAtEnd) {
3096   Value *Zero = ConstantInt::get(Op->getType(), 0);
3097   return BinaryOperator::CreateNUWSub(Zero, Op, Name, InsertAtEnd);
3098 }
3099 
3100 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3101                                           Instruction *InsertBefore) {
3102   Constant *C = Constant::getAllOnesValue(Op->getType());
3103   return new BinaryOperator(Instruction::Xor, Op, C,
3104                             Op->getType(), Name, InsertBefore);
3105 }
3106 
3107 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
3108                                           BasicBlock *InsertAtEnd) {
3109   Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
3110   return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3111                             Op->getType(), Name, InsertAtEnd);
3112 }
3113 
3114 // Exchange the two operands to this instruction. This instruction is safe to
3115 // use on any binary instruction and does not modify the semantics of the
3116 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3117 // is changed.
3118 bool BinaryOperator::swapOperands() {
3119   if (!isCommutative())
3120     return true; // Can't commute operands
3121   Op<0>().swap(Op<1>());
3122   return false;
3123 }
3124 
3125 //===----------------------------------------------------------------------===//
3126 //                             FPMathOperator Class
3127 //===----------------------------------------------------------------------===//
3128 
3129 float FPMathOperator::getFPAccuracy() const {
3130   const MDNode *MD =
3131       cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3132   if (!MD)
3133     return 0.0;
3134   ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3135   return Accuracy->getValueAPF().convertToFloat();
3136 }
3137 
3138 //===----------------------------------------------------------------------===//
3139 //                                CastInst Class
3140 //===----------------------------------------------------------------------===//
3141 
3142 // Just determine if this cast only deals with integral->integral conversion.
3143 bool CastInst::isIntegerCast() const {
3144   switch (getOpcode()) {
3145     default: return false;
3146     case Instruction::ZExt:
3147     case Instruction::SExt:
3148     case Instruction::Trunc:
3149       return true;
3150     case Instruction::BitCast:
3151       return getOperand(0)->getType()->isIntegerTy() &&
3152         getType()->isIntegerTy();
3153   }
3154 }
3155 
3156 /// This function determines if the CastInst does not require any bits to be
3157 /// changed in order to effect the cast. Essentially, it identifies cases where
3158 /// no code gen is necessary for the cast, hence the name no-op cast.  For
3159 /// example, the following are all no-op casts:
3160 /// # bitcast i32* %x to i8*
3161 /// # bitcast <2 x i32> %x to <4 x i16>
3162 /// # ptrtoint i32* %x to i32     ; on 32-bit plaforms only
3163 /// Determine if the described cast is a no-op.
3164 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
3165                           Type *SrcTy,
3166                           Type *DestTy,
3167                           const DataLayout &DL) {
3168   assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3169   switch (Opcode) {
3170     default: llvm_unreachable("Invalid CastOp");
3171     case Instruction::Trunc:
3172     case Instruction::ZExt:
3173     case Instruction::SExt:
3174     case Instruction::FPTrunc:
3175     case Instruction::FPExt:
3176     case Instruction::UIToFP:
3177     case Instruction::SIToFP:
3178     case Instruction::FPToUI:
3179     case Instruction::FPToSI:
3180     case Instruction::AddrSpaceCast:
3181       // TODO: Target informations may give a more accurate answer here.
3182       return false;
3183     case Instruction::BitCast:
3184       return true;  // BitCast never modifies bits.
3185     case Instruction::PtrToInt:
3186       return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3187              DestTy->getScalarSizeInBits();
3188     case Instruction::IntToPtr:
3189       return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3190              SrcTy->getScalarSizeInBits();
3191   }
3192 }
3193 
3194 bool CastInst::isNoopCast(const DataLayout &DL) const {
3195   return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3196 }
3197 
3198 /// This function determines if a pair of casts can be eliminated and what
3199 /// opcode should be used in the elimination. This assumes that there are two
3200 /// instructions like this:
3201 /// *  %F = firstOpcode SrcTy %x to MidTy
3202 /// *  %S = secondOpcode MidTy %F to DstTy
3203 /// The function returns a resultOpcode so these two casts can be replaced with:
3204 /// *  %Replacement = resultOpcode %SrcTy %x to DstTy
3205 /// If no such cast is permitted, the function returns 0.
3206 unsigned CastInst::isEliminableCastPair(
3207   Instruction::CastOps firstOp, Instruction::CastOps secondOp,
3208   Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3209   Type *DstIntPtrTy) {
3210   // Define the 144 possibilities for these two cast instructions. The values
3211   // in this matrix determine what to do in a given situation and select the
3212   // case in the switch below.  The rows correspond to firstOp, the columns
3213   // correspond to secondOp.  In looking at the table below, keep in mind
3214   // the following cast properties:
3215   //
3216   //          Size Compare       Source               Destination
3217   // Operator  Src ? Size   Type       Sign         Type       Sign
3218   // -------- ------------ -------------------   ---------------------
3219   // TRUNC         >       Integer      Any        Integral     Any
3220   // ZEXT          <       Integral   Unsigned     Integer      Any
3221   // SEXT          <       Integral    Signed      Integer      Any
3222   // FPTOUI       n/a      FloatPt      n/a        Integral   Unsigned
3223   // FPTOSI       n/a      FloatPt      n/a        Integral    Signed
3224   // UITOFP       n/a      Integral   Unsigned     FloatPt      n/a
3225   // SITOFP       n/a      Integral    Signed      FloatPt      n/a
3226   // FPTRUNC       >       FloatPt      n/a        FloatPt      n/a
3227   // FPEXT         <       FloatPt      n/a        FloatPt      n/a
3228   // PTRTOINT     n/a      Pointer      n/a        Integral   Unsigned
3229   // INTTOPTR     n/a      Integral   Unsigned     Pointer      n/a
3230   // BITCAST       =       FirstClass   n/a       FirstClass    n/a
3231   // ADDRSPCST    n/a      Pointer      n/a        Pointer      n/a
3232   //
3233   // NOTE: some transforms are safe, but we consider them to be non-profitable.
3234   // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3235   // into "fptoui double to i64", but this loses information about the range
3236   // of the produced value (we no longer know the top-part is all zeros).
3237   // Further this conversion is often much more expensive for typical hardware,
3238   // and causes issues when building libgcc.  We disallow fptosi+sext for the
3239   // same reason.
3240   const unsigned numCastOps =
3241     Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3242   static const uint8_t CastResults[numCastOps][numCastOps] = {
3243     // T        F  F  U  S  F  F  P  I  B  A  -+
3244     // R  Z  S  P  P  I  I  T  P  2  N  T  S   |
3245     // U  E  E  2  2  2  2  R  E  I  T  C  C   +- secondOp
3246     // N  X  X  U  S  F  F  N  X  N  2  V  V   |
3247     // C  T  T  I  I  P  P  C  T  T  P  T  T  -+
3248     {  1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc         -+
3249     {  8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt           |
3250     {  8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt           |
3251     {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI         |
3252     {  0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI         |
3253     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP         +- firstOp
3254     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP         |
3255     { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc        |
3256     { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt          |
3257     {  1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt       |
3258     { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr       |
3259     {  5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast        |
3260     {  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3261   };
3262 
3263   // TODO: This logic could be encoded into the table above and handled in the
3264   // switch below.
3265   // If either of the casts are a bitcast from scalar to vector, disallow the
3266   // merging. However, any pair of bitcasts are allowed.
3267   bool IsFirstBitcast  = (firstOp == Instruction::BitCast);
3268   bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3269   bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3270 
3271   // Check if any of the casts convert scalars <-> vectors.
3272   if ((IsFirstBitcast  && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3273       (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3274     if (!AreBothBitcasts)
3275       return 0;
3276 
3277   int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3278                             [secondOp-Instruction::CastOpsBegin];
3279   switch (ElimCase) {
3280     case 0:
3281       // Categorically disallowed.
3282       return 0;
3283     case 1:
3284       // Allowed, use first cast's opcode.
3285       return firstOp;
3286     case 2:
3287       // Allowed, use second cast's opcode.
3288       return secondOp;
3289     case 3:
3290       // No-op cast in second op implies firstOp as long as the DestTy
3291       // is integer and we are not converting between a vector and a
3292       // non-vector type.
3293       if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3294         return firstOp;
3295       return 0;
3296     case 4:
3297       // No-op cast in second op implies firstOp as long as the DestTy
3298       // is floating point.
3299       if (DstTy->isFloatingPointTy())
3300         return firstOp;
3301       return 0;
3302     case 5:
3303       // No-op cast in first op implies secondOp as long as the SrcTy
3304       // is an integer.
3305       if (SrcTy->isIntegerTy())
3306         return secondOp;
3307       return 0;
3308     case 6:
3309       // No-op cast in first op implies secondOp as long as the SrcTy
3310       // is a floating point.
3311       if (SrcTy->isFloatingPointTy())
3312         return secondOp;
3313       return 0;
3314     case 7: {
3315       // Disable inttoptr/ptrtoint optimization if enabled.
3316       if (DisableI2pP2iOpt)
3317         return 0;
3318 
3319       // Cannot simplify if address spaces are different!
3320       if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3321         return 0;
3322 
3323       unsigned MidSize = MidTy->getScalarSizeInBits();
3324       // We can still fold this without knowing the actual sizes as long we
3325       // know that the intermediate pointer is the largest possible
3326       // pointer size.
3327       // FIXME: Is this always true?
3328       if (MidSize == 64)
3329         return Instruction::BitCast;
3330 
3331       // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3332       if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3333         return 0;
3334       unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3335       if (MidSize >= PtrSize)
3336         return Instruction::BitCast;
3337       return 0;
3338     }
3339     case 8: {
3340       // ext, trunc -> bitcast,    if the SrcTy and DstTy are the same
3341       // ext, trunc -> ext,        if sizeof(SrcTy) < sizeof(DstTy)
3342       // ext, trunc -> trunc,      if sizeof(SrcTy) > sizeof(DstTy)
3343       unsigned SrcSize = SrcTy->getScalarSizeInBits();
3344       unsigned DstSize = DstTy->getScalarSizeInBits();
3345       if (SrcTy == DstTy)
3346         return Instruction::BitCast;
3347       if (SrcSize < DstSize)
3348         return firstOp;
3349       if (SrcSize > DstSize)
3350         return secondOp;
3351       return 0;
3352     }
3353     case 9:
3354       // zext, sext -> zext, because sext can't sign extend after zext
3355       return Instruction::ZExt;
3356     case 11: {
3357       // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3358       if (!MidIntPtrTy)
3359         return 0;
3360       unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3361       unsigned SrcSize = SrcTy->getScalarSizeInBits();
3362       unsigned DstSize = DstTy->getScalarSizeInBits();
3363       if (SrcSize <= PtrSize && SrcSize == DstSize)
3364         return Instruction::BitCast;
3365       return 0;
3366     }
3367     case 12:
3368       // addrspacecast, addrspacecast -> bitcast,       if SrcAS == DstAS
3369       // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3370       if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3371         return Instruction::AddrSpaceCast;
3372       return Instruction::BitCast;
3373     case 13:
3374       // FIXME: this state can be merged with (1), but the following assert
3375       // is useful to check the correcteness of the sequence due to semantic
3376       // change of bitcast.
3377       assert(
3378         SrcTy->isPtrOrPtrVectorTy() &&
3379         MidTy->isPtrOrPtrVectorTy() &&
3380         DstTy->isPtrOrPtrVectorTy() &&
3381         SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3382         MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3383         "Illegal addrspacecast, bitcast sequence!");
3384       // Allowed, use first cast's opcode
3385       return firstOp;
3386     case 14:
3387       // bitcast, addrspacecast -> addrspacecast
3388       return Instruction::AddrSpaceCast;
3389     case 15:
3390       // FIXME: this state can be merged with (1), but the following assert
3391       // is useful to check the correcteness of the sequence due to semantic
3392       // change of bitcast.
3393       assert(
3394         SrcTy->isIntOrIntVectorTy() &&
3395         MidTy->isPtrOrPtrVectorTy() &&
3396         DstTy->isPtrOrPtrVectorTy() &&
3397         MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3398         "Illegal inttoptr, bitcast sequence!");
3399       // Allowed, use first cast's opcode
3400       return firstOp;
3401     case 16:
3402       // FIXME: this state can be merged with (2), but the following assert
3403       // is useful to check the correcteness of the sequence due to semantic
3404       // change of bitcast.
3405       assert(
3406         SrcTy->isPtrOrPtrVectorTy() &&
3407         MidTy->isPtrOrPtrVectorTy() &&
3408         DstTy->isIntOrIntVectorTy() &&
3409         SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3410         "Illegal bitcast, ptrtoint sequence!");
3411       // Allowed, use second cast's opcode
3412       return secondOp;
3413     case 17:
3414       // (sitofp (zext x)) -> (uitofp x)
3415       return Instruction::UIToFP;
3416     case 99:
3417       // Cast combination can't happen (error in input). This is for all cases
3418       // where the MidTy is not the same for the two cast instructions.
3419       llvm_unreachable("Invalid Cast Combination");
3420     default:
3421       llvm_unreachable("Error in CastResults table!!!");
3422   }
3423 }
3424 
3425 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3426   const Twine &Name, Instruction *InsertBefore) {
3427   assert(castIsValid(op, S, Ty) && "Invalid cast!");
3428   // Construct and return the appropriate CastInst subclass
3429   switch (op) {
3430   case Trunc:         return new TruncInst         (S, Ty, Name, InsertBefore);
3431   case ZExt:          return new ZExtInst          (S, Ty, Name, InsertBefore);
3432   case SExt:          return new SExtInst          (S, Ty, Name, InsertBefore);
3433   case FPTrunc:       return new FPTruncInst       (S, Ty, Name, InsertBefore);
3434   case FPExt:         return new FPExtInst         (S, Ty, Name, InsertBefore);
3435   case UIToFP:        return new UIToFPInst        (S, Ty, Name, InsertBefore);
3436   case SIToFP:        return new SIToFPInst        (S, Ty, Name, InsertBefore);
3437   case FPToUI:        return new FPToUIInst        (S, Ty, Name, InsertBefore);
3438   case FPToSI:        return new FPToSIInst        (S, Ty, Name, InsertBefore);
3439   case PtrToInt:      return new PtrToIntInst      (S, Ty, Name, InsertBefore);
3440   case IntToPtr:      return new IntToPtrInst      (S, Ty, Name, InsertBefore);
3441   case BitCast:       return new BitCastInst       (S, Ty, Name, InsertBefore);
3442   case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3443   default: llvm_unreachable("Invalid opcode provided");
3444   }
3445 }
3446 
3447 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
3448   const Twine &Name, BasicBlock *InsertAtEnd) {
3449   assert(castIsValid(op, S, Ty) && "Invalid cast!");
3450   // Construct and return the appropriate CastInst subclass
3451   switch (op) {
3452   case Trunc:         return new TruncInst         (S, Ty, Name, InsertAtEnd);
3453   case ZExt:          return new ZExtInst          (S, Ty, Name, InsertAtEnd);
3454   case SExt:          return new SExtInst          (S, Ty, Name, InsertAtEnd);
3455   case FPTrunc:       return new FPTruncInst       (S, Ty, Name, InsertAtEnd);
3456   case FPExt:         return new FPExtInst         (S, Ty, Name, InsertAtEnd);
3457   case UIToFP:        return new UIToFPInst        (S, Ty, Name, InsertAtEnd);
3458   case SIToFP:        return new SIToFPInst        (S, Ty, Name, InsertAtEnd);
3459   case FPToUI:        return new FPToUIInst        (S, Ty, Name, InsertAtEnd);
3460   case FPToSI:        return new FPToSIInst        (S, Ty, Name, InsertAtEnd);
3461   case PtrToInt:      return new PtrToIntInst      (S, Ty, Name, InsertAtEnd);
3462   case IntToPtr:      return new IntToPtrInst      (S, Ty, Name, InsertAtEnd);
3463   case BitCast:       return new BitCastInst       (S, Ty, Name, InsertAtEnd);
3464   case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3465   default: llvm_unreachable("Invalid opcode provided");
3466   }
3467 }
3468 
3469 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
3470                                         const Twine &Name,
3471                                         Instruction *InsertBefore) {
3472   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3473     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3474   return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3475 }
3476 
3477 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
3478                                         const Twine &Name,
3479                                         BasicBlock *InsertAtEnd) {
3480   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3481     return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3482   return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3483 }
3484 
3485 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
3486                                         const Twine &Name,
3487                                         Instruction *InsertBefore) {
3488   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3489     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3490   return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3491 }
3492 
3493 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
3494                                         const Twine &Name,
3495                                         BasicBlock *InsertAtEnd) {
3496   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3497     return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3498   return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3499 }
3500 
3501 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
3502                                          const Twine &Name,
3503                                          Instruction *InsertBefore) {
3504   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3505     return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3506   return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3507 }
3508 
3509 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
3510                                          const Twine &Name,
3511                                          BasicBlock *InsertAtEnd) {
3512   if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3513     return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3514   return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3515 }
3516 
3517 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
3518                                       const Twine &Name,
3519                                       BasicBlock *InsertAtEnd) {
3520   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3521   assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3522          "Invalid cast");
3523   assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3524   assert((!Ty->isVectorTy() ||
3525           cast<VectorType>(Ty)->getElementCount() ==
3526               cast<VectorType>(S->getType())->getElementCount()) &&
3527          "Invalid cast");
3528 
3529   if (Ty->isIntOrIntVectorTy())
3530     return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3531 
3532   return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3533 }
3534 
3535 /// Create a BitCast or a PtrToInt cast instruction
3536 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
3537                                       const Twine &Name,
3538                                       Instruction *InsertBefore) {
3539   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3540   assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3541          "Invalid cast");
3542   assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3543   assert((!Ty->isVectorTy() ||
3544           cast<VectorType>(Ty)->getElementCount() ==
3545               cast<VectorType>(S->getType())->getElementCount()) &&
3546          "Invalid cast");
3547 
3548   if (Ty->isIntOrIntVectorTy())
3549     return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3550 
3551   return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3552 }
3553 
3554 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3555   Value *S, Type *Ty,
3556   const Twine &Name,
3557   BasicBlock *InsertAtEnd) {
3558   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3559   assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3560 
3561   if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3562     return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3563 
3564   return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3565 }
3566 
3567 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
3568   Value *S, Type *Ty,
3569   const Twine &Name,
3570   Instruction *InsertBefore) {
3571   assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3572   assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3573 
3574   if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
3575     return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3576 
3577   return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3578 }
3579 
3580 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
3581                                            const Twine &Name,
3582                                            Instruction *InsertBefore) {
3583   if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3584     return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3585   if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3586     return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3587 
3588   return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3589 }
3590 
3591 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
3592                                       bool isSigned, const Twine &Name,
3593                                       Instruction *InsertBefore) {
3594   assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3595          "Invalid integer cast");
3596   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3597   unsigned DstBits = Ty->getScalarSizeInBits();
3598   Instruction::CastOps opcode =
3599     (SrcBits == DstBits ? Instruction::BitCast :
3600      (SrcBits > DstBits ? Instruction::Trunc :
3601       (isSigned ? Instruction::SExt : Instruction::ZExt)));
3602   return Create(opcode, C, Ty, Name, InsertBefore);
3603 }
3604 
3605 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
3606                                       bool isSigned, const Twine &Name,
3607                                       BasicBlock *InsertAtEnd) {
3608   assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3609          "Invalid cast");
3610   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3611   unsigned DstBits = Ty->getScalarSizeInBits();
3612   Instruction::CastOps opcode =
3613     (SrcBits == DstBits ? Instruction::BitCast :
3614      (SrcBits > DstBits ? Instruction::Trunc :
3615       (isSigned ? Instruction::SExt : Instruction::ZExt)));
3616   return Create(opcode, C, Ty, Name, InsertAtEnd);
3617 }
3618 
3619 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
3620                                  const Twine &Name,
3621                                  Instruction *InsertBefore) {
3622   assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3623          "Invalid cast");
3624   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3625   unsigned DstBits = Ty->getScalarSizeInBits();
3626   Instruction::CastOps opcode =
3627     (SrcBits == DstBits ? Instruction::BitCast :
3628      (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3629   return Create(opcode, C, Ty, Name, InsertBefore);
3630 }
3631 
3632 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
3633                                  const Twine &Name,
3634                                  BasicBlock *InsertAtEnd) {
3635   assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3636          "Invalid cast");
3637   unsigned SrcBits = C->getType()->getScalarSizeInBits();
3638   unsigned DstBits = Ty->getScalarSizeInBits();
3639   Instruction::CastOps opcode =
3640     (SrcBits == DstBits ? Instruction::BitCast :
3641      (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3642   return Create(opcode, C, Ty, Name, InsertAtEnd);
3643 }
3644 
3645 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
3646   if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
3647     return false;
3648 
3649   if (SrcTy == DestTy)
3650     return true;
3651 
3652   if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3653     if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3654       if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3655         // An element by element cast. Valid if casting the elements is valid.
3656         SrcTy = SrcVecTy->getElementType();
3657         DestTy = DestVecTy->getElementType();
3658       }
3659     }
3660   }
3661 
3662   if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3663     if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3664       return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3665     }
3666   }
3667 
3668   TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3669   TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3670 
3671   // Could still have vectors of pointers if the number of elements doesn't
3672   // match
3673   if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
3674     return false;
3675 
3676   if (SrcBits != DestBits)
3677     return false;
3678 
3679   if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
3680     return false;
3681 
3682   return true;
3683 }
3684 
3685 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
3686                                           const DataLayout &DL) {
3687   // ptrtoint and inttoptr are not allowed on non-integral pointers
3688   if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3689     if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
3690       return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3691               !DL.isNonIntegralPointerType(PtrTy));
3692   if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3693     if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3694       return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
3695               !DL.isNonIntegralPointerType(PtrTy));
3696 
3697   return isBitCastable(SrcTy, DestTy);
3698 }
3699 
3700 // Provide a way to get a "cast" where the cast opcode is inferred from the
3701 // types and size of the operand. This, basically, is a parallel of the
3702 // logic in the castIsValid function below.  This axiom should hold:
3703 //   castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3704 // should not assert in castIsValid. In other words, this produces a "correct"
3705 // casting opcode for the arguments passed to it.
3706 Instruction::CastOps
3707 CastInst::getCastOpcode(
3708   const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3709   Type *SrcTy = Src->getType();
3710 
3711   assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3712          "Only first class types are castable!");
3713 
3714   if (SrcTy == DestTy)
3715     return BitCast;
3716 
3717   // FIXME: Check address space sizes here
3718   if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3719     if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3720       if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3721         // An element by element cast.  Find the appropriate opcode based on the
3722         // element types.
3723         SrcTy = SrcVecTy->getElementType();
3724         DestTy = DestVecTy->getElementType();
3725       }
3726 
3727   // Get the bit sizes, we'll need these
3728   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();   // 0 for ptr
3729   unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3730 
3731   // Run through the possibilities ...
3732   if (DestTy->isIntegerTy()) {                      // Casting to integral
3733     if (SrcTy->isIntegerTy()) {                     // Casting from integral
3734       if (DestBits < SrcBits)
3735         return Trunc;                               // int -> smaller int
3736       else if (DestBits > SrcBits) {                // its an extension
3737         if (SrcIsSigned)
3738           return SExt;                              // signed -> SEXT
3739         else
3740           return ZExt;                              // unsigned -> ZEXT
3741       } else {
3742         return BitCast;                             // Same size, No-op cast
3743       }
3744     } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3745       if (DestIsSigned)
3746         return FPToSI;                              // FP -> sint
3747       else
3748         return FPToUI;                              // FP -> uint
3749     } else if (SrcTy->isVectorTy()) {
3750       assert(DestBits == SrcBits &&
3751              "Casting vector to integer of different width");
3752       return BitCast;                             // Same size, no-op cast
3753     } else {
3754       assert(SrcTy->isPointerTy() &&
3755              "Casting from a value that is not first-class type");
3756       return PtrToInt;                              // ptr -> int
3757     }
3758   } else if (DestTy->isFloatingPointTy()) {         // Casting to floating pt
3759     if (SrcTy->isIntegerTy()) {                     // Casting from integral
3760       if (SrcIsSigned)
3761         return SIToFP;                              // sint -> FP
3762       else
3763         return UIToFP;                              // uint -> FP
3764     } else if (SrcTy->isFloatingPointTy()) {        // Casting from floating pt
3765       if (DestBits < SrcBits) {
3766         return FPTrunc;                             // FP -> smaller FP
3767       } else if (DestBits > SrcBits) {
3768         return FPExt;                               // FP -> larger FP
3769       } else  {
3770         return BitCast;                             // same size, no-op cast
3771       }
3772     } else if (SrcTy->isVectorTy()) {
3773       assert(DestBits == SrcBits &&
3774              "Casting vector to floating point of different width");
3775       return BitCast;                             // same size, no-op cast
3776     }
3777     llvm_unreachable("Casting pointer or non-first class to float");
3778   } else if (DestTy->isVectorTy()) {
3779     assert(DestBits == SrcBits &&
3780            "Illegal cast to vector (wrong type or size)");
3781     return BitCast;
3782   } else if (DestTy->isPointerTy()) {
3783     if (SrcTy->isPointerTy()) {
3784       if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3785         return AddrSpaceCast;
3786       return BitCast;                               // ptr -> ptr
3787     } else if (SrcTy->isIntegerTy()) {
3788       return IntToPtr;                              // int -> ptr
3789     }
3790     llvm_unreachable("Casting pointer to other than pointer or int");
3791   } else if (DestTy->isX86_MMXTy()) {
3792     if (SrcTy->isVectorTy()) {
3793       assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3794       return BitCast;                               // 64-bit vector to MMX
3795     }
3796     llvm_unreachable("Illegal cast to X86_MMX");
3797   }
3798   llvm_unreachable("Casting to type that is not first-class");
3799 }
3800 
3801 //===----------------------------------------------------------------------===//
3802 //                    CastInst SubClass Constructors
3803 //===----------------------------------------------------------------------===//
3804 
3805 /// Check that the construction parameters for a CastInst are correct. This
3806 /// could be broken out into the separate constructors but it is useful to have
3807 /// it in one place and to eliminate the redundant code for getting the sizes
3808 /// of the types involved.
3809 bool
3810 CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
3811   if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3812       SrcTy->isAggregateType() || DstTy->isAggregateType())
3813     return false;
3814 
3815   // Get the size of the types in bits, and whether we are dealing
3816   // with vector types, we'll need this later.
3817   bool SrcIsVec = isa<VectorType>(SrcTy);
3818   bool DstIsVec = isa<VectorType>(DstTy);
3819   unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
3820   unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
3821 
3822   // If these are vector types, get the lengths of the vectors (using zero for
3823   // scalar types means that checking that vector lengths match also checks that
3824   // scalars are not being converted to vectors or vectors to scalars).
3825   ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3826                                 : ElementCount::getFixed(0);
3827   ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3828                                 : ElementCount::getFixed(0);
3829 
3830   // Switch on the opcode provided
3831   switch (op) {
3832   default: return false; // This is an input error
3833   case Instruction::Trunc:
3834     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3835            SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3836   case Instruction::ZExt:
3837     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3838            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3839   case Instruction::SExt:
3840     return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3841            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3842   case Instruction::FPTrunc:
3843     return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3844            SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3845   case Instruction::FPExt:
3846     return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3847            SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3848   case Instruction::UIToFP:
3849   case Instruction::SIToFP:
3850     return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3851            SrcEC == DstEC;
3852   case Instruction::FPToUI:
3853   case Instruction::FPToSI:
3854     return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3855            SrcEC == DstEC;
3856   case Instruction::PtrToInt:
3857     if (SrcEC != DstEC)
3858       return false;
3859     return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3860   case Instruction::IntToPtr:
3861     if (SrcEC != DstEC)
3862       return false;
3863     return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3864   case Instruction::BitCast: {
3865     PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3866     PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3867 
3868     // BitCast implies a no-op cast of type only. No bits change.
3869     // However, you can't cast pointers to anything but pointers.
3870     if (!SrcPtrTy != !DstPtrTy)
3871       return false;
3872 
3873     // For non-pointer cases, the cast is okay if the source and destination bit
3874     // widths are identical.
3875     if (!SrcPtrTy)
3876       return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3877 
3878     // If both are pointers then the address spaces must match.
3879     if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3880       return false;
3881 
3882     // A vector of pointers must have the same number of elements.
3883     if (SrcIsVec && DstIsVec)
3884       return SrcEC == DstEC;
3885     if (SrcIsVec)
3886       return SrcEC == ElementCount::getFixed(1);
3887     if (DstIsVec)
3888       return DstEC == ElementCount::getFixed(1);
3889 
3890     return true;
3891   }
3892   case Instruction::AddrSpaceCast: {
3893     PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3894     if (!SrcPtrTy)
3895       return false;
3896 
3897     PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3898     if (!DstPtrTy)
3899       return false;
3900 
3901     if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3902       return false;
3903 
3904     return SrcEC == DstEC;
3905   }
3906   }
3907 }
3908 
3909 TruncInst::TruncInst(
3910   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3911 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3912   assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3913 }
3914 
3915 TruncInst::TruncInst(
3916   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3917 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3918   assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3919 }
3920 
3921 ZExtInst::ZExtInst(
3922   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3923 )  : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3924   assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3925 }
3926 
3927 ZExtInst::ZExtInst(
3928   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3929 )  : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3930   assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3931 }
3932 SExtInst::SExtInst(
3933   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3934 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3935   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3936 }
3937 
3938 SExtInst::SExtInst(
3939   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3940 )  : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3941   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3942 }
3943 
3944 FPTruncInst::FPTruncInst(
3945   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3946 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3947   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3948 }
3949 
3950 FPTruncInst::FPTruncInst(
3951   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3952 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3953   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3954 }
3955 
3956 FPExtInst::FPExtInst(
3957   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3958 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3959   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3960 }
3961 
3962 FPExtInst::FPExtInst(
3963   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3964 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3965   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3966 }
3967 
3968 UIToFPInst::UIToFPInst(
3969   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3970 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3971   assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3972 }
3973 
3974 UIToFPInst::UIToFPInst(
3975   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3976 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3977   assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3978 }
3979 
3980 SIToFPInst::SIToFPInst(
3981   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3982 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3983   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3984 }
3985 
3986 SIToFPInst::SIToFPInst(
3987   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3988 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3989   assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3990 }
3991 
3992 FPToUIInst::FPToUIInst(
3993   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3994 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3995   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3996 }
3997 
3998 FPToUIInst::FPToUIInst(
3999   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4000 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
4001   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4002 }
4003 
4004 FPToSIInst::FPToSIInst(
4005   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4006 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4007   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4008 }
4009 
4010 FPToSIInst::FPToSIInst(
4011   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4012 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
4013   assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4014 }
4015 
4016 PtrToIntInst::PtrToIntInst(
4017   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4018 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4019   assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4020 }
4021 
4022 PtrToIntInst::PtrToIntInst(
4023   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4024 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
4025   assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4026 }
4027 
4028 IntToPtrInst::IntToPtrInst(
4029   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4030 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4031   assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4032 }
4033 
4034 IntToPtrInst::IntToPtrInst(
4035   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4036 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
4037   assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4038 }
4039 
4040 BitCastInst::BitCastInst(
4041   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4042 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4043   assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4044 }
4045 
4046 BitCastInst::BitCastInst(
4047   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4048 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
4049   assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4050 }
4051 
4052 AddrSpaceCastInst::AddrSpaceCastInst(
4053   Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4054 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4055   assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4056 }
4057 
4058 AddrSpaceCastInst::AddrSpaceCastInst(
4059   Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4060 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
4061   assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4062 }
4063 
4064 //===----------------------------------------------------------------------===//
4065 //                               CmpInst Classes
4066 //===----------------------------------------------------------------------===//
4067 
4068 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
4069                  Value *RHS, const Twine &Name, Instruction *InsertBefore,
4070                  Instruction *FlagsSource)
4071   : Instruction(ty, op,
4072                 OperandTraits<CmpInst>::op_begin(this),
4073                 OperandTraits<CmpInst>::operands(this),
4074                 InsertBefore) {
4075   Op<0>() = LHS;
4076   Op<1>() = RHS;
4077   setPredicate((Predicate)predicate);
4078   setName(Name);
4079   if (FlagsSource)
4080     copyIRFlags(FlagsSource);
4081 }
4082 
4083 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
4084                  Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4085   : Instruction(ty, op,
4086                 OperandTraits<CmpInst>::op_begin(this),
4087                 OperandTraits<CmpInst>::operands(this),
4088                 InsertAtEnd) {
4089   Op<0>() = LHS;
4090   Op<1>() = RHS;
4091   setPredicate((Predicate)predicate);
4092   setName(Name);
4093 }
4094 
4095 CmpInst *
4096 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4097                 const Twine &Name, Instruction *InsertBefore) {
4098   if (Op == Instruction::ICmp) {
4099     if (InsertBefore)
4100       return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4101                           S1, S2, Name);
4102     else
4103       return new ICmpInst(CmpInst::Predicate(predicate),
4104                           S1, S2, Name);
4105   }
4106 
4107   if (InsertBefore)
4108     return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4109                         S1, S2, Name);
4110   else
4111     return new FCmpInst(CmpInst::Predicate(predicate),
4112                         S1, S2, Name);
4113 }
4114 
4115 CmpInst *
4116 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
4117                 const Twine &Name, BasicBlock *InsertAtEnd) {
4118   if (Op == Instruction::ICmp) {
4119     return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4120                         S1, S2, Name);
4121   }
4122   return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
4123                       S1, S2, Name);
4124 }
4125 
4126 void CmpInst::swapOperands() {
4127   if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4128     IC->swapOperands();
4129   else
4130     cast<FCmpInst>(this)->swapOperands();
4131 }
4132 
4133 bool CmpInst::isCommutative() const {
4134   if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4135     return IC->isCommutative();
4136   return cast<FCmpInst>(this)->isCommutative();
4137 }
4138 
4139 bool CmpInst::isEquality(Predicate P) {
4140   if (ICmpInst::isIntPredicate(P))
4141     return ICmpInst::isEquality(P);
4142   if (FCmpInst::isFPPredicate(P))
4143     return FCmpInst::isEquality(P);
4144   llvm_unreachable("Unsupported predicate kind");
4145 }
4146 
4147 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
4148   switch (pred) {
4149     default: llvm_unreachable("Unknown cmp predicate!");
4150     case ICMP_EQ: return ICMP_NE;
4151     case ICMP_NE: return ICMP_EQ;
4152     case ICMP_UGT: return ICMP_ULE;
4153     case ICMP_ULT: return ICMP_UGE;
4154     case ICMP_UGE: return ICMP_ULT;
4155     case ICMP_ULE: return ICMP_UGT;
4156     case ICMP_SGT: return ICMP_SLE;
4157     case ICMP_SLT: return ICMP_SGE;
4158     case ICMP_SGE: return ICMP_SLT;
4159     case ICMP_SLE: return ICMP_SGT;
4160 
4161     case FCMP_OEQ: return FCMP_UNE;
4162     case FCMP_ONE: return FCMP_UEQ;
4163     case FCMP_OGT: return FCMP_ULE;
4164     case FCMP_OLT: return FCMP_UGE;
4165     case FCMP_OGE: return FCMP_ULT;
4166     case FCMP_OLE: return FCMP_UGT;
4167     case FCMP_UEQ: return FCMP_ONE;
4168     case FCMP_UNE: return FCMP_OEQ;
4169     case FCMP_UGT: return FCMP_OLE;
4170     case FCMP_ULT: return FCMP_OGE;
4171     case FCMP_UGE: return FCMP_OLT;
4172     case FCMP_ULE: return FCMP_OGT;
4173     case FCMP_ORD: return FCMP_UNO;
4174     case FCMP_UNO: return FCMP_ORD;
4175     case FCMP_TRUE: return FCMP_FALSE;
4176     case FCMP_FALSE: return FCMP_TRUE;
4177   }
4178 }
4179 
4180 StringRef CmpInst::getPredicateName(Predicate Pred) {
4181   switch (Pred) {
4182   default:                   return "unknown";
4183   case FCmpInst::FCMP_FALSE: return "false";
4184   case FCmpInst::FCMP_OEQ:   return "oeq";
4185   case FCmpInst::FCMP_OGT:   return "ogt";
4186   case FCmpInst::FCMP_OGE:   return "oge";
4187   case FCmpInst::FCMP_OLT:   return "olt";
4188   case FCmpInst::FCMP_OLE:   return "ole";
4189   case FCmpInst::FCMP_ONE:   return "one";
4190   case FCmpInst::FCMP_ORD:   return "ord";
4191   case FCmpInst::FCMP_UNO:   return "uno";
4192   case FCmpInst::FCMP_UEQ:   return "ueq";
4193   case FCmpInst::FCMP_UGT:   return "ugt";
4194   case FCmpInst::FCMP_UGE:   return "uge";
4195   case FCmpInst::FCMP_ULT:   return "ult";
4196   case FCmpInst::FCMP_ULE:   return "ule";
4197   case FCmpInst::FCMP_UNE:   return "une";
4198   case FCmpInst::FCMP_TRUE:  return "true";
4199   case ICmpInst::ICMP_EQ:    return "eq";
4200   case ICmpInst::ICMP_NE:    return "ne";
4201   case ICmpInst::ICMP_SGT:   return "sgt";
4202   case ICmpInst::ICMP_SGE:   return "sge";
4203   case ICmpInst::ICMP_SLT:   return "slt";
4204   case ICmpInst::ICMP_SLE:   return "sle";
4205   case ICmpInst::ICMP_UGT:   return "ugt";
4206   case ICmpInst::ICMP_UGE:   return "uge";
4207   case ICmpInst::ICMP_ULT:   return "ult";
4208   case ICmpInst::ICMP_ULE:   return "ule";
4209   }
4210 }
4211 
4212 raw_ostream &llvm::operator<<(raw_ostream &OS, CmpInst::Predicate Pred) {
4213   OS << CmpInst::getPredicateName(Pred);
4214   return OS;
4215 }
4216 
4217 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
4218   switch (pred) {
4219     default: llvm_unreachable("Unknown icmp predicate!");
4220     case ICMP_EQ: case ICMP_NE:
4221     case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4222        return pred;
4223     case ICMP_UGT: return ICMP_SGT;
4224     case ICMP_ULT: return ICMP_SLT;
4225     case ICMP_UGE: return ICMP_SGE;
4226     case ICMP_ULE: return ICMP_SLE;
4227   }
4228 }
4229 
4230 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
4231   switch (pred) {
4232     default: llvm_unreachable("Unknown icmp predicate!");
4233     case ICMP_EQ: case ICMP_NE:
4234     case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4235        return pred;
4236     case ICMP_SGT: return ICMP_UGT;
4237     case ICMP_SLT: return ICMP_ULT;
4238     case ICMP_SGE: return ICMP_UGE;
4239     case ICMP_SLE: return ICMP_ULE;
4240   }
4241 }
4242 
4243 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
4244   switch (pred) {
4245     default: llvm_unreachable("Unknown cmp predicate!");
4246     case ICMP_EQ: case ICMP_NE:
4247       return pred;
4248     case ICMP_SGT: return ICMP_SLT;
4249     case ICMP_SLT: return ICMP_SGT;
4250     case ICMP_SGE: return ICMP_SLE;
4251     case ICMP_SLE: return ICMP_SGE;
4252     case ICMP_UGT: return ICMP_ULT;
4253     case ICMP_ULT: return ICMP_UGT;
4254     case ICMP_UGE: return ICMP_ULE;
4255     case ICMP_ULE: return ICMP_UGE;
4256 
4257     case FCMP_FALSE: case FCMP_TRUE:
4258     case FCMP_OEQ: case FCMP_ONE:
4259     case FCMP_UEQ: case FCMP_UNE:
4260     case FCMP_ORD: case FCMP_UNO:
4261       return pred;
4262     case FCMP_OGT: return FCMP_OLT;
4263     case FCMP_OLT: return FCMP_OGT;
4264     case FCMP_OGE: return FCMP_OLE;
4265     case FCMP_OLE: return FCMP_OGE;
4266     case FCMP_UGT: return FCMP_ULT;
4267     case FCMP_ULT: return FCMP_UGT;
4268     case FCMP_UGE: return FCMP_ULE;
4269     case FCMP_ULE: return FCMP_UGE;
4270   }
4271 }
4272 
4273 bool CmpInst::isNonStrictPredicate(Predicate pred) {
4274   switch (pred) {
4275   case ICMP_SGE:
4276   case ICMP_SLE:
4277   case ICMP_UGE:
4278   case ICMP_ULE:
4279   case FCMP_OGE:
4280   case FCMP_OLE:
4281   case FCMP_UGE:
4282   case FCMP_ULE:
4283     return true;
4284   default:
4285     return false;
4286   }
4287 }
4288 
4289 bool CmpInst::isStrictPredicate(Predicate pred) {
4290   switch (pred) {
4291   case ICMP_SGT:
4292   case ICMP_SLT:
4293   case ICMP_UGT:
4294   case ICMP_ULT:
4295   case FCMP_OGT:
4296   case FCMP_OLT:
4297   case FCMP_UGT:
4298   case FCMP_ULT:
4299     return true;
4300   default:
4301     return false;
4302   }
4303 }
4304 
4305 CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
4306   switch (pred) {
4307   case ICMP_SGE:
4308     return ICMP_SGT;
4309   case ICMP_SLE:
4310     return ICMP_SLT;
4311   case ICMP_UGE:
4312     return ICMP_UGT;
4313   case ICMP_ULE:
4314     return ICMP_ULT;
4315   case FCMP_OGE:
4316     return FCMP_OGT;
4317   case FCMP_OLE:
4318     return FCMP_OLT;
4319   case FCMP_UGE:
4320     return FCMP_UGT;
4321   case FCMP_ULE:
4322     return FCMP_ULT;
4323   default:
4324     return pred;
4325   }
4326 }
4327 
4328 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
4329   switch (pred) {
4330   case ICMP_SGT:
4331     return ICMP_SGE;
4332   case ICMP_SLT:
4333     return ICMP_SLE;
4334   case ICMP_UGT:
4335     return ICMP_UGE;
4336   case ICMP_ULT:
4337     return ICMP_ULE;
4338   case FCMP_OGT:
4339     return FCMP_OGE;
4340   case FCMP_OLT:
4341     return FCMP_OLE;
4342   case FCMP_UGT:
4343     return FCMP_UGE;
4344   case FCMP_ULT:
4345     return FCMP_ULE;
4346   default:
4347     return pred;
4348   }
4349 }
4350 
4351 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
4352   assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4353 
4354   if (isStrictPredicate(pred))
4355     return getNonStrictPredicate(pred);
4356   if (isNonStrictPredicate(pred))
4357     return getStrictPredicate(pred);
4358 
4359   llvm_unreachable("Unknown predicate!");
4360 }
4361 
4362 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
4363   assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4364 
4365   switch (pred) {
4366   default:
4367     llvm_unreachable("Unknown predicate!");
4368   case CmpInst::ICMP_ULT:
4369     return CmpInst::ICMP_SLT;
4370   case CmpInst::ICMP_ULE:
4371     return CmpInst::ICMP_SLE;
4372   case CmpInst::ICMP_UGT:
4373     return CmpInst::ICMP_SGT;
4374   case CmpInst::ICMP_UGE:
4375     return CmpInst::ICMP_SGE;
4376   }
4377 }
4378 
4379 CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
4380   assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4381 
4382   switch (pred) {
4383   default:
4384     llvm_unreachable("Unknown predicate!");
4385   case CmpInst::ICMP_SLT:
4386     return CmpInst::ICMP_ULT;
4387   case CmpInst::ICMP_SLE:
4388     return CmpInst::ICMP_ULE;
4389   case CmpInst::ICMP_SGT:
4390     return CmpInst::ICMP_UGT;
4391   case CmpInst::ICMP_SGE:
4392     return CmpInst::ICMP_UGE;
4393   }
4394 }
4395 
4396 bool CmpInst::isUnsigned(Predicate predicate) {
4397   switch (predicate) {
4398     default: return false;
4399     case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
4400     case ICmpInst::ICMP_UGE: return true;
4401   }
4402 }
4403 
4404 bool CmpInst::isSigned(Predicate predicate) {
4405   switch (predicate) {
4406     default: return false;
4407     case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
4408     case ICmpInst::ICMP_SGE: return true;
4409   }
4410 }
4411 
4412 bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4413                        ICmpInst::Predicate Pred) {
4414   assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4415   switch (Pred) {
4416   case ICmpInst::Predicate::ICMP_EQ:
4417     return LHS.eq(RHS);
4418   case ICmpInst::Predicate::ICMP_NE:
4419     return LHS.ne(RHS);
4420   case ICmpInst::Predicate::ICMP_UGT:
4421     return LHS.ugt(RHS);
4422   case ICmpInst::Predicate::ICMP_UGE:
4423     return LHS.uge(RHS);
4424   case ICmpInst::Predicate::ICMP_ULT:
4425     return LHS.ult(RHS);
4426   case ICmpInst::Predicate::ICMP_ULE:
4427     return LHS.ule(RHS);
4428   case ICmpInst::Predicate::ICMP_SGT:
4429     return LHS.sgt(RHS);
4430   case ICmpInst::Predicate::ICMP_SGE:
4431     return LHS.sge(RHS);
4432   case ICmpInst::Predicate::ICMP_SLT:
4433     return LHS.slt(RHS);
4434   case ICmpInst::Predicate::ICMP_SLE:
4435     return LHS.sle(RHS);
4436   default:
4437     llvm_unreachable("Unexpected non-integer predicate.");
4438   };
4439 }
4440 
4441 bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4442                        FCmpInst::Predicate Pred) {
4443   APFloat::cmpResult R = LHS.compare(RHS);
4444   switch (Pred) {
4445   default:
4446     llvm_unreachable("Invalid FCmp Predicate");
4447   case FCmpInst::FCMP_FALSE:
4448     return false;
4449   case FCmpInst::FCMP_TRUE:
4450     return true;
4451   case FCmpInst::FCMP_UNO:
4452     return R == APFloat::cmpUnordered;
4453   case FCmpInst::FCMP_ORD:
4454     return R != APFloat::cmpUnordered;
4455   case FCmpInst::FCMP_UEQ:
4456     return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4457   case FCmpInst::FCMP_OEQ:
4458     return R == APFloat::cmpEqual;
4459   case FCmpInst::FCMP_UNE:
4460     return R != APFloat::cmpEqual;
4461   case FCmpInst::FCMP_ONE:
4462     return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
4463   case FCmpInst::FCMP_ULT:
4464     return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4465   case FCmpInst::FCMP_OLT:
4466     return R == APFloat::cmpLessThan;
4467   case FCmpInst::FCMP_UGT:
4468     return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
4469   case FCmpInst::FCMP_OGT:
4470     return R == APFloat::cmpGreaterThan;
4471   case FCmpInst::FCMP_ULE:
4472     return R != APFloat::cmpGreaterThan;
4473   case FCmpInst::FCMP_OLE:
4474     return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4475   case FCmpInst::FCMP_UGE:
4476     return R != APFloat::cmpLessThan;
4477   case FCmpInst::FCMP_OGE:
4478     return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4479   }
4480 }
4481 
4482 CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
4483   assert(CmpInst::isRelational(pred) &&
4484          "Call only with non-equality predicates!");
4485 
4486   if (isSigned(pred))
4487     return getUnsignedPredicate(pred);
4488   if (isUnsigned(pred))
4489     return getSignedPredicate(pred);
4490 
4491   llvm_unreachable("Unknown predicate!");
4492 }
4493 
4494 bool CmpInst::isOrdered(Predicate predicate) {
4495   switch (predicate) {
4496     default: return false;
4497     case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
4498     case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
4499     case FCmpInst::FCMP_ORD: return true;
4500   }
4501 }
4502 
4503 bool CmpInst::isUnordered(Predicate predicate) {
4504   switch (predicate) {
4505     default: return false;
4506     case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
4507     case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
4508     case FCmpInst::FCMP_UNO: return true;
4509   }
4510 }
4511 
4512 bool CmpInst::isTrueWhenEqual(Predicate predicate) {
4513   switch(predicate) {
4514     default: return false;
4515     case ICMP_EQ:   case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
4516     case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
4517   }
4518 }
4519 
4520 bool CmpInst::isFalseWhenEqual(Predicate predicate) {
4521   switch(predicate) {
4522   case ICMP_NE:    case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
4523   case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
4524   default: return false;
4525   }
4526 }
4527 
4528 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
4529   // If the predicates match, then we know the first condition implies the
4530   // second is true.
4531   if (Pred1 == Pred2)
4532     return true;
4533 
4534   switch (Pred1) {
4535   default:
4536     break;
4537   case ICMP_EQ:
4538     // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
4539     return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
4540            Pred2 == ICMP_SLE;
4541   case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
4542     return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
4543   case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
4544     return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
4545   case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
4546     return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
4547   case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
4548     return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
4549   }
4550   return false;
4551 }
4552 
4553 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
4554   return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
4555 }
4556 
4557 //===----------------------------------------------------------------------===//
4558 //                        SwitchInst Implementation
4559 //===----------------------------------------------------------------------===//
4560 
4561 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
4562   assert(Value && Default && NumReserved);
4563   ReservedSpace = NumReserved;
4564   setNumHungOffUseOperands(2);
4565   allocHungoffUses(ReservedSpace);
4566 
4567   Op<0>() = Value;
4568   Op<1>() = Default;
4569 }
4570 
4571 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4572 /// switch on and a default destination.  The number of additional cases can
4573 /// be specified here to make memory allocation more efficient.  This
4574 /// constructor can also autoinsert before another instruction.
4575 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4576                        Instruction *InsertBefore)
4577     : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4578                   nullptr, 0, InsertBefore) {
4579   init(Value, Default, 2+NumCases*2);
4580 }
4581 
4582 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
4583 /// switch on and a default destination.  The number of additional cases can
4584 /// be specified here to make memory allocation more efficient.  This
4585 /// constructor also autoinserts at the end of the specified BasicBlock.
4586 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
4587                        BasicBlock *InsertAtEnd)
4588     : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
4589                   nullptr, 0, InsertAtEnd) {
4590   init(Value, Default, 2+NumCases*2);
4591 }
4592 
4593 SwitchInst::SwitchInst(const SwitchInst &SI)
4594     : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
4595   init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
4596   setNumHungOffUseOperands(SI.getNumOperands());
4597   Use *OL = getOperandList();
4598   const Use *InOL = SI.getOperandList();
4599   for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
4600     OL[i] = InOL[i];
4601     OL[i+1] = InOL[i+1];
4602   }
4603   SubclassOptionalData = SI.SubclassOptionalData;
4604 }
4605 
4606 /// addCase - Add an entry to the switch instruction...
4607 ///
4608 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
4609   unsigned NewCaseIdx = getNumCases();
4610   unsigned OpNo = getNumOperands();
4611   if (OpNo+2 > ReservedSpace)
4612     growOperands();  // Get more space!
4613   // Initialize some new operands.
4614   assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
4615   setNumHungOffUseOperands(OpNo+2);
4616   CaseHandle Case(this, NewCaseIdx);
4617   Case.setValue(OnVal);
4618   Case.setSuccessor(Dest);
4619 }
4620 
4621 /// removeCase - This method removes the specified case and its successor
4622 /// from the switch instruction.
4623 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
4624   unsigned idx = I->getCaseIndex();
4625 
4626   assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
4627 
4628   unsigned NumOps = getNumOperands();
4629   Use *OL = getOperandList();
4630 
4631   // Overwrite this case with the end of the list.
4632   if (2 + (idx + 1) * 2 != NumOps) {
4633     OL[2 + idx * 2] = OL[NumOps - 2];
4634     OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4635   }
4636 
4637   // Nuke the last value.
4638   OL[NumOps-2].set(nullptr);
4639   OL[NumOps-2+1].set(nullptr);
4640   setNumHungOffUseOperands(NumOps-2);
4641 
4642   return CaseIt(this, idx);
4643 }
4644 
4645 /// growOperands - grow operands - This grows the operand list in response
4646 /// to a push_back style of operation.  This grows the number of ops by 3 times.
4647 ///
4648 void SwitchInst::growOperands() {
4649   unsigned e = getNumOperands();
4650   unsigned NumOps = e*3;
4651 
4652   ReservedSpace = NumOps;
4653   growHungoffUses(ReservedSpace);
4654 }
4655 
4656 MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
4657   assert(Changed && "called only if metadata has changed");
4658 
4659   if (!Weights)
4660     return nullptr;
4661 
4662   assert(SI.getNumSuccessors() == Weights->size() &&
4663          "num of prof branch_weights must accord with num of successors");
4664 
4665   bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
4666 
4667   if (AllZeroes || Weights->size() < 2)
4668     return nullptr;
4669 
4670   return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
4671 }
4672 
4673 void SwitchInstProfUpdateWrapper::init() {
4674   MDNode *ProfileData = getBranchWeightMDNode(SI);
4675   if (!ProfileData)
4676     return;
4677 
4678   if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
4679     llvm_unreachable("number of prof branch_weights metadata operands does "
4680                      "not correspond to number of succesors");
4681   }
4682 
4683   SmallVector<uint32_t, 8> Weights;
4684   if (!extractBranchWeights(ProfileData, Weights))
4685     return;
4686   this->Weights = std::move(Weights);
4687 }
4688 
4689 SwitchInst::CaseIt
4690 SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
4691   if (Weights) {
4692     assert(SI.getNumSuccessors() == Weights->size() &&
4693            "num of prof branch_weights must accord with num of successors");
4694     Changed = true;
4695     // Copy the last case to the place of the removed one and shrink.
4696     // This is tightly coupled with the way SwitchInst::removeCase() removes
4697     // the cases in SwitchInst::removeCase(CaseIt).
4698     (*Weights)[I->getCaseIndex() + 1] = Weights->back();
4699     Weights->pop_back();
4700   }
4701   return SI.removeCase(I);
4702 }
4703 
4704 void SwitchInstProfUpdateWrapper::addCase(
4705     ConstantInt *OnVal, BasicBlock *Dest,
4706     SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4707   SI.addCase(OnVal, Dest);
4708 
4709   if (!Weights && W && *W) {
4710     Changed = true;
4711     Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4712     (*Weights)[SI.getNumSuccessors() - 1] = *W;
4713   } else if (Weights) {
4714     Changed = true;
4715     Weights->push_back(W.value_or(0));
4716   }
4717   if (Weights)
4718     assert(SI.getNumSuccessors() == Weights->size() &&
4719            "num of prof branch_weights must accord with num of successors");
4720 }
4721 
4722 SymbolTableList<Instruction>::iterator
4723 SwitchInstProfUpdateWrapper::eraseFromParent() {
4724   // Instruction is erased. Mark as unchanged to not touch it in the destructor.
4725   Changed = false;
4726   if (Weights)
4727     Weights->resize(0);
4728   return SI.eraseFromParent();
4729 }
4730 
4731 SwitchInstProfUpdateWrapper::CaseWeightOpt
4732 SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
4733   if (!Weights)
4734     return std::nullopt;
4735   return (*Weights)[idx];
4736 }
4737 
4738 void SwitchInstProfUpdateWrapper::setSuccessorWeight(
4739     unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
4740   if (!W)
4741     return;
4742 
4743   if (!Weights && *W)
4744     Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
4745 
4746   if (Weights) {
4747     auto &OldW = (*Weights)[idx];
4748     if (*W != OldW) {
4749       Changed = true;
4750       OldW = *W;
4751     }
4752   }
4753 }
4754 
4755 SwitchInstProfUpdateWrapper::CaseWeightOpt
4756 SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
4757                                                 unsigned idx) {
4758   if (MDNode *ProfileData = getBranchWeightMDNode(SI))
4759     if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4760       return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4761           ->getValue()
4762           .getZExtValue();
4763 
4764   return std::nullopt;
4765 }
4766 
4767 //===----------------------------------------------------------------------===//
4768 //                        IndirectBrInst Implementation
4769 //===----------------------------------------------------------------------===//
4770 
4771 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
4772   assert(Address && Address->getType()->isPointerTy() &&
4773          "Address of indirectbr must be a pointer");
4774   ReservedSpace = 1+NumDests;
4775   setNumHungOffUseOperands(1);
4776   allocHungoffUses(ReservedSpace);
4777 
4778   Op<0>() = Address;
4779 }
4780 
4781 
4782 /// growOperands - grow operands - This grows the operand list in response
4783 /// to a push_back style of operation.  This grows the number of ops by 2 times.
4784 ///
4785 void IndirectBrInst::growOperands() {
4786   unsigned e = getNumOperands();
4787   unsigned NumOps = e*2;
4788 
4789   ReservedSpace = NumOps;
4790   growHungoffUses(ReservedSpace);
4791 }
4792 
4793 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4794                                Instruction *InsertBefore)
4795     : Instruction(Type::getVoidTy(Address->getContext()),
4796                   Instruction::IndirectBr, nullptr, 0, InsertBefore) {
4797   init(Address, NumCases);
4798 }
4799 
4800 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
4801                                BasicBlock *InsertAtEnd)
4802     : Instruction(Type::getVoidTy(Address->getContext()),
4803                   Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
4804   init(Address, NumCases);
4805 }
4806 
4807 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
4808     : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
4809                   nullptr, IBI.getNumOperands()) {
4810   allocHungoffUses(IBI.getNumOperands());
4811   Use *OL = getOperandList();
4812   const Use *InOL = IBI.getOperandList();
4813   for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
4814     OL[i] = InOL[i];
4815   SubclassOptionalData = IBI.SubclassOptionalData;
4816 }
4817 
4818 /// addDestination - Add a destination.
4819 ///
4820 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
4821   unsigned OpNo = getNumOperands();
4822   if (OpNo+1 > ReservedSpace)
4823     growOperands();  // Get more space!
4824   // Initialize some new operands.
4825   assert(OpNo < ReservedSpace && "Growing didn't work!");
4826   setNumHungOffUseOperands(OpNo+1);
4827   getOperandList()[OpNo] = DestBB;
4828 }
4829 
4830 /// removeDestination - This method removes the specified successor from the
4831 /// indirectbr instruction.
4832 void IndirectBrInst::removeDestination(unsigned idx) {
4833   assert(idx < getNumOperands()-1 && "Successor index out of range!");
4834 
4835   unsigned NumOps = getNumOperands();
4836   Use *OL = getOperandList();
4837 
4838   // Replace this value with the last one.
4839   OL[idx+1] = OL[NumOps-1];
4840 
4841   // Nuke the last value.
4842   OL[NumOps-1].set(nullptr);
4843   setNumHungOffUseOperands(NumOps-1);
4844 }
4845 
4846 //===----------------------------------------------------------------------===//
4847 //                            FreezeInst Implementation
4848 //===----------------------------------------------------------------------===//
4849 
4850 FreezeInst::FreezeInst(Value *S,
4851                        const Twine &Name, Instruction *InsertBefore)
4852     : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
4853   setName(Name);
4854 }
4855 
4856 FreezeInst::FreezeInst(Value *S,
4857                        const Twine &Name, BasicBlock *InsertAtEnd)
4858     : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
4859   setName(Name);
4860 }
4861 
4862 //===----------------------------------------------------------------------===//
4863 //                           cloneImpl() implementations
4864 //===----------------------------------------------------------------------===//
4865 
4866 // Define these methods here so vtables don't get emitted into every translation
4867 // unit that uses these classes.
4868 
4869 GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
4870   return new (getNumOperands()) GetElementPtrInst(*this);
4871 }
4872 
4873 UnaryOperator *UnaryOperator::cloneImpl() const {
4874   return Create(getOpcode(), Op<0>());
4875 }
4876 
4877 BinaryOperator *BinaryOperator::cloneImpl() const {
4878   return Create(getOpcode(), Op<0>(), Op<1>());
4879 }
4880 
4881 FCmpInst *FCmpInst::cloneImpl() const {
4882   return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
4883 }
4884 
4885 ICmpInst *ICmpInst::cloneImpl() const {
4886   return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
4887 }
4888 
4889 ExtractValueInst *ExtractValueInst::cloneImpl() const {
4890   return new ExtractValueInst(*this);
4891 }
4892 
4893 InsertValueInst *InsertValueInst::cloneImpl() const {
4894   return new InsertValueInst(*this);
4895 }
4896 
4897 AllocaInst *AllocaInst::cloneImpl() const {
4898   AllocaInst *Result = new AllocaInst(getAllocatedType(), getAddressSpace(),
4899                                       getOperand(0), getAlign());
4900   Result->setUsedWithInAlloca(isUsedWithInAlloca());
4901   Result->setSwiftError(isSwiftError());
4902   return Result;
4903 }
4904 
4905 LoadInst *LoadInst::cloneImpl() const {
4906   return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
4907                       getAlign(), getOrdering(), getSyncScopeID());
4908 }
4909 
4910 StoreInst *StoreInst::cloneImpl() const {
4911   return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
4912                        getOrdering(), getSyncScopeID());
4913 }
4914 
4915 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
4916   AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
4917       getOperand(0), getOperand(1), getOperand(2), getAlign(),
4918       getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
4919   Result->setVolatile(isVolatile());
4920   Result->setWeak(isWeak());
4921   return Result;
4922 }
4923 
4924 AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
4925   AtomicRMWInst *Result =
4926       new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
4927                         getAlign(), getOrdering(), getSyncScopeID());
4928   Result->setVolatile(isVolatile());
4929   return Result;
4930 }
4931 
4932 FenceInst *FenceInst::cloneImpl() const {
4933   return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
4934 }
4935 
4936 TruncInst *TruncInst::cloneImpl() const {
4937   return new TruncInst(getOperand(0), getType());
4938 }
4939 
4940 ZExtInst *ZExtInst::cloneImpl() const {
4941   return new ZExtInst(getOperand(0), getType());
4942 }
4943 
4944 SExtInst *SExtInst::cloneImpl() const {
4945   return new SExtInst(getOperand(0), getType());
4946 }
4947 
4948 FPTruncInst *FPTruncInst::cloneImpl() const {
4949   return new FPTruncInst(getOperand(0), getType());
4950 }
4951 
4952 FPExtInst *FPExtInst::cloneImpl() const {
4953   return new FPExtInst(getOperand(0), getType());
4954 }
4955 
4956 UIToFPInst *UIToFPInst::cloneImpl() const {
4957   return new UIToFPInst(getOperand(0), getType());
4958 }
4959 
4960 SIToFPInst *SIToFPInst::cloneImpl() const {
4961   return new SIToFPInst(getOperand(0), getType());
4962 }
4963 
4964 FPToUIInst *FPToUIInst::cloneImpl() const {
4965   return new FPToUIInst(getOperand(0), getType());
4966 }
4967 
4968 FPToSIInst *FPToSIInst::cloneImpl() const {
4969   return new FPToSIInst(getOperand(0), getType());
4970 }
4971 
4972 PtrToIntInst *PtrToIntInst::cloneImpl() const {
4973   return new PtrToIntInst(getOperand(0), getType());
4974 }
4975 
4976 IntToPtrInst *IntToPtrInst::cloneImpl() const {
4977   return new IntToPtrInst(getOperand(0), getType());
4978 }
4979 
4980 BitCastInst *BitCastInst::cloneImpl() const {
4981   return new BitCastInst(getOperand(0), getType());
4982 }
4983 
4984 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4985   return new AddrSpaceCastInst(getOperand(0), getType());
4986 }
4987 
4988 CallInst *CallInst::cloneImpl() const {
4989   if (hasOperandBundles()) {
4990     unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4991     return new(getNumOperands(), DescriptorBytes) CallInst(*this);
4992   }
4993   return  new(getNumOperands()) CallInst(*this);
4994 }
4995 
4996 SelectInst *SelectInst::cloneImpl() const {
4997   return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4998 }
4999 
5000 VAArgInst *VAArgInst::cloneImpl() const {
5001   return new VAArgInst(getOperand(0), getType());
5002 }
5003 
5004 ExtractElementInst *ExtractElementInst::cloneImpl() const {
5005   return ExtractElementInst::Create(getOperand(0), getOperand(1));
5006 }
5007 
5008 InsertElementInst *InsertElementInst::cloneImpl() const {
5009   return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
5010 }
5011 
5012 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
5013   return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
5014 }
5015 
5016 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
5017 
5018 LandingPadInst *LandingPadInst::cloneImpl() const {
5019   return new LandingPadInst(*this);
5020 }
5021 
5022 ReturnInst *ReturnInst::cloneImpl() const {
5023   return new(getNumOperands()) ReturnInst(*this);
5024 }
5025 
5026 BranchInst *BranchInst::cloneImpl() const {
5027   return new(getNumOperands()) BranchInst(*this);
5028 }
5029 
5030 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
5031 
5032 IndirectBrInst *IndirectBrInst::cloneImpl() const {
5033   return new IndirectBrInst(*this);
5034 }
5035 
5036 InvokeInst *InvokeInst::cloneImpl() const {
5037   if (hasOperandBundles()) {
5038     unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5039     return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
5040   }
5041   return new(getNumOperands()) InvokeInst(*this);
5042 }
5043 
5044 CallBrInst *CallBrInst::cloneImpl() const {
5045   if (hasOperandBundles()) {
5046     unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5047     return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
5048   }
5049   return new (getNumOperands()) CallBrInst(*this);
5050 }
5051 
5052 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
5053 
5054 CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
5055   return new (getNumOperands()) CleanupReturnInst(*this);
5056 }
5057 
5058 CatchReturnInst *CatchReturnInst::cloneImpl() const {
5059   return new (getNumOperands()) CatchReturnInst(*this);
5060 }
5061 
5062 CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
5063   return new CatchSwitchInst(*this);
5064 }
5065 
5066 FuncletPadInst *FuncletPadInst::cloneImpl() const {
5067   return new (getNumOperands()) FuncletPadInst(*this);
5068 }
5069 
5070 UnreachableInst *UnreachableInst::cloneImpl() const {
5071   LLVMContext &Context = getContext();
5072   return new UnreachableInst(Context);
5073 }
5074 
5075 FreezeInst *FreezeInst::cloneImpl() const {
5076   return new FreezeInst(getOperand(0));
5077 }
5078