1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/MDBuilder.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/Type.h"
22 using namespace llvm;
23 
24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25                          Instruction *InsertBefore)
26   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
27 
28   // If requested, insert this instruction into a basic block...
29   if (InsertBefore) {
InlineAsm(FunctionType * FTy,const std::string & asmString,const std::string & constraints,bool hasSideEffects,bool isAlignStack,AsmDialect asmDialect,bool canThrow)30     BasicBlock *BB = InsertBefore->getParent();
31     assert(BB && "Instruction to insert before is not in a basic block!");
32     BB->getInstList().insert(InsertBefore->getIterator(), this);
33   }
34 }
35 
36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
37                          BasicBlock *InsertAtEnd)
38   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
39 
40   // append this instruction into the basic block
41   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
get(FunctionType * FTy,StringRef AsmString,StringRef Constraints,bool hasSideEffects,bool isAlignStack,AsmDialect asmDialect,bool canThrow)42   InsertAtEnd->getInstList().push_back(this);
43 }
44 
45 Instruction::~Instruction() {
46   assert(!Parent && "Instruction still linked in the program!");
47 
48   // Replace any extant metadata uses of this instruction with undef to
49   // preserve debug info accuracy. Some alternatives include:
50   // - Treat Instruction like any other Value, and point its extant metadata
51   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
destroyConstant()52   //   trivially dead (i.e. fair game for deletion in many passes), leading to
53   //   stale dbg.values being in effect for too long.
54   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
55   //   correct. OTOH results in wasted work in some common cases (e.g. when all
56   //   instructions in a BasicBlock are deleted).
57   if (isUsedByMetadata())
58     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
59 }
60 
61 
62 void Instruction::setParent(BasicBlock *P) {
63   Parent = P;
Parse(StringRef Str,InlineAsm::ConstraintInfoVector & ConstraintsSoFar)64 }
65 
66 const Module *Instruction::getModule() const {
67   return getParent()->getModule();
68 }
69 
70 const Function *Instruction::getFunction() const {
71   return getParent()->getParent();
72 }
73 
74 void Instruction::removeFromParent() {
75   getParent()->getInstList().remove(getIterator());
76 }
77 
78 iplist<Instruction>::iterator Instruction::eraseFromParent() {
79   return getParent()->getInstList().erase(getIterator());
80 }
81 
82 /// Insert an unlinked instruction into a basic block immediately before the
83 /// specified instruction.
84 void Instruction::insertBefore(Instruction *InsertPos) {
85   InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
86 }
87 
88 /// Insert an unlinked instruction into a basic block immediately after the
89 /// specified instruction.
90 void Instruction::insertAfter(Instruction *InsertPos) {
91   InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
92                                                     this);
93 }
94 
95 /// Unlink this instruction from its current basic block and insert it into the
96 /// basic block that MovePos lives in, right before MovePos.
97 void Instruction::moveBefore(Instruction *MovePos) {
98   moveBefore(*MovePos->getParent(), MovePos->getIterator());
99 }
100 
101 void Instruction::moveAfter(Instruction *MovePos) {
102   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
103 }
104 
105 void Instruction::moveBefore(BasicBlock &BB,
106                              SymbolTableList<Instruction>::iterator I) {
107   assert(I == BB.end() || I->getParent() == &BB);
108   BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
109 }
110 
111 bool Instruction::comesBefore(const Instruction *Other) const {
112   assert(Parent && Other->Parent &&
113          "instructions without BB parents have no order");
114   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
115   if (!Parent->isInstrOrderValid())
116     Parent->renumberInstructions();
117   return Order < Other->Order;
118 }
119 
120 bool Instruction::isOnlyUserOfAnyOperand() {
121   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
122 }
123 
124 void Instruction::setHasNoUnsignedWrap(bool b) {
125   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
126 }
127 
128 void Instruction::setHasNoSignedWrap(bool b) {
129   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
130 }
131 
132 void Instruction::setIsExact(bool b) {
133   cast<PossiblyExactOperator>(this)->setIsExact(b);
134 }
135 
136 bool Instruction::hasNoUnsignedWrap() const {
137   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
138 }
139 
140 bool Instruction::hasNoSignedWrap() const {
141   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
142 }
143 
144 void Instruction::dropPoisonGeneratingFlags() {
145   switch (getOpcode()) {
146   case Instruction::Add:
147   case Instruction::Sub:
148   case Instruction::Mul:
149   case Instruction::Shl:
150     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
151     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
152     break;
153 
154   case Instruction::UDiv:
155   case Instruction::SDiv:
156   case Instruction::AShr:
157   case Instruction::LShr:
158     cast<PossiblyExactOperator>(this)->setIsExact(false);
159     break;
160 
161   case Instruction::GetElementPtr:
162     cast<GetElementPtrInst>(this)->setIsInBounds(false);
163     break;
164   }
165   // TODO: FastMathFlags!
166 }
167 
168 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
169     ArrayRef<unsigned> KnownIDs) {
170   dropUnknownNonDebugMetadata(KnownIDs);
171   auto *CB = dyn_cast<CallBase>(this);
172   if (!CB)
173     return;
174   // For call instructions, we also need to drop parameter and return attributes
175   // that are can cause UB if the call is moved to a location where the
176   // attribute is not valid.
177   AttributeList AL = CB->getAttributes();
178   if (AL.isEmpty())
179     return;
180   AttrBuilder UBImplyingAttributes = AttributeFuncs::getUBImplyingAttributes();
181   for (unsigned ArgNo = 0; ArgNo < CB->getNumArgOperands(); ArgNo++)
182     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
183   CB->removeAttributes(AttributeList::ReturnIndex, UBImplyingAttributes);
184 }
185 
186 bool Instruction::isExact() const {
187   return cast<PossiblyExactOperator>(this)->isExact();
188 }
189 
190 void Instruction::setFast(bool B) {
191   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
192   cast<FPMathOperator>(this)->setFast(B);
193 }
194 
195 void Instruction::setHasAllowReassoc(bool B) {
196   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
197   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
198 }
199 
200 void Instruction::setHasNoNaNs(bool B) {
201   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
202   cast<FPMathOperator>(this)->setHasNoNaNs(B);
203 }
204 
205 void Instruction::setHasNoInfs(bool B) {
206   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
selectAlternative(unsigned index)207   cast<FPMathOperator>(this)->setHasNoInfs(B);
208 }
209 
210 void Instruction::setHasNoSignedZeros(bool B) {
211   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
212   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
213 }
214 
215 void Instruction::setHasAllowReciprocal(bool B) {
216   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
217   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
ParseConstraints(StringRef Constraints)218 }
219 
220 void Instruction::setHasAllowContract(bool B) {
221   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
222   cast<FPMathOperator>(this)->setHasAllowContract(B);
223 }
224 
225 void Instruction::setHasApproxFunc(bool B) {
226   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
227   cast<FPMathOperator>(this)->setHasApproxFunc(B);
228 }
229 
230 void Instruction::setFastMathFlags(FastMathFlags FMF) {
231   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
232   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
233 }
234 
235 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
236   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
237   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
238 }
239 
240 bool Instruction::isFast() const {
241   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
242   return cast<FPMathOperator>(this)->isFast();
243 }
244 
245 bool Instruction::hasAllowReassoc() const {
246   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
247   return cast<FPMathOperator>(this)->hasAllowReassoc();
248 }
249 
250 bool Instruction::hasNoNaNs() const {
251   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
252   return cast<FPMathOperator>(this)->hasNoNaNs();
253 }
Verify(FunctionType * Ty,StringRef ConstStr)254 
255 bool Instruction::hasNoInfs() const {
256   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
257   return cast<FPMathOperator>(this)->hasNoInfs();
258 }
259 
260 bool Instruction::hasNoSignedZeros() const {
261   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
262   return cast<FPMathOperator>(this)->hasNoSignedZeros();
263 }
264 
265 bool Instruction::hasAllowReciprocal() const {
266   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
267   return cast<FPMathOperator>(this)->hasAllowReciprocal();
268 }
269 
270 bool Instruction::hasAllowContract() const {
271   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
272   return cast<FPMathOperator>(this)->hasAllowContract();
273 }
274 
275 bool Instruction::hasApproxFunc() const {
276   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
277   return cast<FPMathOperator>(this)->hasApproxFunc();
278 }
279 
280 FastMathFlags Instruction::getFastMathFlags() const {
281   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
282   return cast<FPMathOperator>(this)->getFastMathFlags();
283 }
284 
285 void Instruction::copyFastMathFlags(const Instruction *I) {
286   copyFastMathFlags(I->getFastMathFlags());
287 }
288 
289 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
290   // Copy the wrapping flags.
291   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
292     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
293       setHasNoSignedWrap(OB->hasNoSignedWrap());
294       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
295     }
296   }
297 
298   // Copy the exact flag.
299   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
300     if (isa<PossiblyExactOperator>(this))
301       setIsExact(PE->isExact());
302 
303   // Copy the fast-math flags.
304   if (auto *FP = dyn_cast<FPMathOperator>(V))
305     if (isa<FPMathOperator>(this))
306       copyFastMathFlags(FP->getFastMathFlags());
307 
308   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
309     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
310       DestGEP->setIsInBounds(SrcGEP->isInBounds() | DestGEP->isInBounds());
311 }
312 
313 void Instruction::andIRFlags(const Value *V) {
314   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
315     if (isa<OverflowingBinaryOperator>(this)) {
316       setHasNoSignedWrap(hasNoSignedWrap() & OB->hasNoSignedWrap());
317       setHasNoUnsignedWrap(hasNoUnsignedWrap() & OB->hasNoUnsignedWrap());
318     }
319   }
320 
321   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
322     if (isa<PossiblyExactOperator>(this))
323       setIsExact(isExact() & PE->isExact());
324 
325   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
326     if (isa<FPMathOperator>(this)) {
327       FastMathFlags FM = getFastMathFlags();
328       FM &= FP->getFastMathFlags();
329       copyFastMathFlags(FM);
330     }
331   }
332 
333   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
334     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
335       DestGEP->setIsInBounds(SrcGEP->isInBounds() & DestGEP->isInBounds());
336 }
337 
338 const char *Instruction::getOpcodeName(unsigned OpCode) {
339   switch (OpCode) {
340   // Terminators
341   case Ret:    return "ret";
342   case Br:     return "br";
343   case Switch: return "switch";
344   case IndirectBr: return "indirectbr";
345   case Invoke: return "invoke";
346   case Resume: return "resume";
347   case Unreachable: return "unreachable";
348   case CleanupRet: return "cleanupret";
349   case CatchRet: return "catchret";
350   case CatchPad: return "catchpad";
351   case CatchSwitch: return "catchswitch";
352   case CallBr: return "callbr";
353 
354   // Standard unary operators...
355   case FNeg: return "fneg";
356 
357   // Standard binary operators...
358   case Add: return "add";
359   case FAdd: return "fadd";
360   case Sub: return "sub";
361   case FSub: return "fsub";
362   case Mul: return "mul";
363   case FMul: return "fmul";
364   case UDiv: return "udiv";
365   case SDiv: return "sdiv";
366   case FDiv: return "fdiv";
367   case URem: return "urem";
368   case SRem: return "srem";
369   case FRem: return "frem";
370 
371   // Logical operators...
372   case And: return "and";
373   case Or : return "or";
374   case Xor: return "xor";
375 
376   // Memory instructions...
377   case Alloca:        return "alloca";
378   case Load:          return "load";
379   case Store:         return "store";
380   case AtomicCmpXchg: return "cmpxchg";
381   case AtomicRMW:     return "atomicrmw";
382   case Fence:         return "fence";
383   case GetElementPtr: return "getelementptr";
384 
385   // Convert instructions...
386   case Trunc:         return "trunc";
387   case ZExt:          return "zext";
388   case SExt:          return "sext";
389   case FPTrunc:       return "fptrunc";
390   case FPExt:         return "fpext";
391   case FPToUI:        return "fptoui";
392   case FPToSI:        return "fptosi";
393   case UIToFP:        return "uitofp";
394   case SIToFP:        return "sitofp";
395   case IntToPtr:      return "inttoptr";
396   case PtrToInt:      return "ptrtoint";
397   case BitCast:       return "bitcast";
398   case AddrSpaceCast: return "addrspacecast";
399 
400   // Other instructions...
401   case ICmp:           return "icmp";
402   case FCmp:           return "fcmp";
403   case PHI:            return "phi";
404   case Select:         return "select";
405   case Call:           return "call";
406   case Shl:            return "shl";
407   case LShr:           return "lshr";
408   case AShr:           return "ashr";
409   case VAArg:          return "va_arg";
410   case ExtractElement: return "extractelement";
411   case InsertElement:  return "insertelement";
412   case ShuffleVector:  return "shufflevector";
413   case ExtractValue:   return "extractvalue";
414   case InsertValue:    return "insertvalue";
415   case LandingPad:     return "landingpad";
416   case CleanupPad:     return "cleanuppad";
417   case Freeze:         return "freeze";
418 
419   default: return "<Invalid operator> ";
420   }
421 }
422 
423 /// Return true if both instructions have the same special state. This must be
424 /// kept in sync with FunctionComparator::cmpOperations in
425 /// lib/Transforms/IPO/MergeFunctions.cpp.
426 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
427                                  bool IgnoreAlignment = false) {
428   assert(I1->getOpcode() == I2->getOpcode() &&
429          "Can not compare special state of different instructions");
430 
431   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
432     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
433            (AI->getAlignment() == cast<AllocaInst>(I2)->getAlignment() ||
434             IgnoreAlignment);
435   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
436     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
437            (LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
438             IgnoreAlignment) &&
439            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
440            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
441   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
442     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
443            (SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
444             IgnoreAlignment) &&
445            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
446            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
447   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
448     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
449   if (const CallInst *CI = dyn_cast<CallInst>(I1))
450     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
451            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
452            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
453            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
454   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
455     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
456            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
457            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
458   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
459     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
460            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
461            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
462   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
463     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
464   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
465     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
466   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
467     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
468            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
469   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
470     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
471            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
472            CXI->getSuccessOrdering() ==
473                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
474            CXI->getFailureOrdering() ==
475                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
476            CXI->getSyncScopeID() ==
477                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
478   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
479     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
480            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
481            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
482            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
483   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
484     return SVI->getShuffleMask() ==
485            cast<ShuffleVectorInst>(I2)->getShuffleMask();
486 
487   return true;
488 }
489 
490 bool Instruction::isIdenticalTo(const Instruction *I) const {
491   return isIdenticalToWhenDefined(I) &&
492          SubclassOptionalData == I->SubclassOptionalData;
493 }
494 
495 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
496   if (getOpcode() != I->getOpcode() ||
497       getNumOperands() != I->getNumOperands() ||
498       getType() != I->getType())
499     return false;
500 
501   // If both instructions have no operands, they are identical.
502   if (getNumOperands() == 0 && I->getNumOperands() == 0)
503     return haveSameSpecialState(this, I);
504 
505   // We have two instructions of identical opcode and #operands.  Check to see
506   // if all operands are the same.
507   if (!std::equal(op_begin(), op_end(), I->op_begin()))
508     return false;
509 
510   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
511   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
512     const PHINode *otherPHI = cast<PHINode>(I);
513     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
514                       otherPHI->block_begin());
515   }
516 
517   return haveSameSpecialState(this, I);
518 }
519 
520 // Keep this in sync with FunctionComparator::cmpOperations in
521 // lib/Transforms/IPO/MergeFunctions.cpp.
522 bool Instruction::isSameOperationAs(const Instruction *I,
523                                     unsigned flags) const {
524   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
525   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
526 
527   if (getOpcode() != I->getOpcode() ||
528       getNumOperands() != I->getNumOperands() ||
529       (UseScalarTypes ?
530        getType()->getScalarType() != I->getType()->getScalarType() :
531        getType() != I->getType()))
532     return false;
533 
534   // We have two instructions of identical opcode and #operands.  Check to see
535   // if all operands are the same type
536   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
537     if (UseScalarTypes ?
538         getOperand(i)->getType()->getScalarType() !=
539           I->getOperand(i)->getType()->getScalarType() :
540         getOperand(i)->getType() != I->getOperand(i)->getType())
541       return false;
542 
543   return haveSameSpecialState(this, I, IgnoreAlignment);
544 }
545 
546 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
547   for (const Use &U : uses()) {
548     // PHI nodes uses values in the corresponding predecessor block.  For other
549     // instructions, just check to see whether the parent of the use matches up.
550     const Instruction *I = cast<Instruction>(U.getUser());
551     const PHINode *PN = dyn_cast<PHINode>(I);
552     if (!PN) {
553       if (I->getParent() != BB)
554         return true;
555       continue;
556     }
557 
558     if (PN->getIncomingBlock(U) != BB)
559       return true;
560   }
561   return false;
562 }
563 
564 bool Instruction::mayReadFromMemory() const {
565   switch (getOpcode()) {
566   default: return false;
567   case Instruction::VAArg:
568   case Instruction::Load:
569   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
570   case Instruction::AtomicCmpXchg:
571   case Instruction::AtomicRMW:
572   case Instruction::CatchPad:
573   case Instruction::CatchRet:
574     return true;
575   case Instruction::Call:
576   case Instruction::Invoke:
577   case Instruction::CallBr:
578     return !cast<CallBase>(this)->doesNotReadMemory();
579   case Instruction::Store:
580     return !cast<StoreInst>(this)->isUnordered();
581   }
582 }
583 
584 bool Instruction::mayWriteToMemory() const {
585   switch (getOpcode()) {
586   default: return false;
587   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
588   case Instruction::Store:
589   case Instruction::VAArg:
590   case Instruction::AtomicCmpXchg:
591   case Instruction::AtomicRMW:
592   case Instruction::CatchPad:
593   case Instruction::CatchRet:
594     return true;
595   case Instruction::Call:
596   case Instruction::Invoke:
597   case Instruction::CallBr:
598     return !cast<CallBase>(this)->onlyReadsMemory();
599   case Instruction::Load:
600     return !cast<LoadInst>(this)->isUnordered();
601   }
602 }
603 
604 bool Instruction::isAtomic() const {
605   switch (getOpcode()) {
606   default:
607     return false;
608   case Instruction::AtomicCmpXchg:
609   case Instruction::AtomicRMW:
610   case Instruction::Fence:
611     return true;
612   case Instruction::Load:
613     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
614   case Instruction::Store:
615     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
616   }
617 }
618 
619 bool Instruction::hasAtomicLoad() const {
620   assert(isAtomic());
621   switch (getOpcode()) {
622   default:
623     return false;
624   case Instruction::AtomicCmpXchg:
625   case Instruction::AtomicRMW:
626   case Instruction::Load:
627     return true;
628   }
629 }
630 
631 bool Instruction::hasAtomicStore() const {
632   assert(isAtomic());
633   switch (getOpcode()) {
634   default:
635     return false;
636   case Instruction::AtomicCmpXchg:
637   case Instruction::AtomicRMW:
638   case Instruction::Store:
639     return true;
640   }
641 }
642 
643 bool Instruction::isVolatile() const {
644   switch (getOpcode()) {
645   default:
646     return false;
647   case Instruction::AtomicRMW:
648     return cast<AtomicRMWInst>(this)->isVolatile();
649   case Instruction::Store:
650     return cast<StoreInst>(this)->isVolatile();
651   case Instruction::Load:
652     return cast<LoadInst>(this)->isVolatile();
653   case Instruction::AtomicCmpXchg:
654     return cast<AtomicCmpXchgInst>(this)->isVolatile();
655   case Instruction::Call:
656   case Instruction::Invoke:
657     // There are a very limited number of intrinsics with volatile flags.
658     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
659       if (auto *MI = dyn_cast<MemIntrinsic>(II))
660         return MI->isVolatile();
661       switch (II->getIntrinsicID()) {
662       default: break;
663       case Intrinsic::matrix_column_major_load:
664         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
665       case Intrinsic::matrix_column_major_store:
666         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
667       }
668     }
669     return false;
670   }
671 }
672 
673 bool Instruction::mayThrow() const {
674   if (const CallInst *CI = dyn_cast<CallInst>(this))
675     return !CI->doesNotThrow();
676   if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
677     return CRI->unwindsToCaller();
678   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
679     return CatchSwitch->unwindsToCaller();
680   return isa<ResumeInst>(this);
681 }
682 
683 bool Instruction::mayHaveSideEffects() const {
684   return mayWriteToMemory() || mayThrow() || !willReturn();
685 }
686 
687 bool Instruction::isSafeToRemove() const {
688   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
689          !this->isTerminator();
690 }
691 
692 bool Instruction::willReturn() const {
693   // Volatile store isn't guaranteed to return; see LangRef.
694   if (auto *SI = dyn_cast<StoreInst>(this))
695     return !SI->isVolatile();
696 
697   if (const auto *CB = dyn_cast<CallBase>(this))
698     // FIXME: Temporarily assume that all side-effect free intrinsics will
699     // return. Remove this workaround once all intrinsics are appropriately
700     // annotated.
701     return CB->hasFnAttr(Attribute::WillReturn) ||
702            (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
703   return true;
704 }
705 
706 bool Instruction::isLifetimeStartOrEnd() const {
707   auto *II = dyn_cast<IntrinsicInst>(this);
708   if (!II)
709     return false;
710   Intrinsic::ID ID = II->getIntrinsicID();
711   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
712 }
713 
714 bool Instruction::isLaunderOrStripInvariantGroup() const {
715   auto *II = dyn_cast<IntrinsicInst>(this);
716   if (!II)
717     return false;
718   Intrinsic::ID ID = II->getIntrinsicID();
719   return ID == Intrinsic::launder_invariant_group ||
720          ID == Intrinsic::strip_invariant_group;
721 }
722 
723 bool Instruction::isDebugOrPseudoInst() const {
724   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
725 }
726 
727 const Instruction *
728 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
729   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
730     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
731       return I;
732   return nullptr;
733 }
734 
735 const Instruction *
736 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
737   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
738     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
739       return I;
740   return nullptr;
741 }
742 
743 bool Instruction::isAssociative() const {
744   unsigned Opcode = getOpcode();
745   if (isAssociative(Opcode))
746     return true;
747 
748   switch (Opcode) {
749   case FMul:
750   case FAdd:
751     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
752            cast<FPMathOperator>(this)->hasNoSignedZeros();
753   default:
754     return false;
755   }
756 }
757 
758 bool Instruction::isCommutative() const {
759   if (auto *II = dyn_cast<IntrinsicInst>(this))
760     return II->isCommutative();
761   // TODO: Should allow icmp/fcmp?
762   return isCommutative(getOpcode());
763 }
764 
765 unsigned Instruction::getNumSuccessors() const {
766   switch (getOpcode()) {
767 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
768   case Instruction::OPC:                                                       \
769     return static_cast<const CLASS *>(this)->getNumSuccessors();
770 #include "llvm/IR/Instruction.def"
771   default:
772     break;
773   }
774   llvm_unreachable("not a terminator");
775 }
776 
777 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
778   switch (getOpcode()) {
779 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
780   case Instruction::OPC:                                                       \
781     return static_cast<const CLASS *>(this)->getSuccessor(idx);
782 #include "llvm/IR/Instruction.def"
783   default:
784     break;
785   }
786   llvm_unreachable("not a terminator");
787 }
788 
789 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
790   switch (getOpcode()) {
791 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
792   case Instruction::OPC:                                                       \
793     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
794 #include "llvm/IR/Instruction.def"
795   default:
796     break;
797   }
798   llvm_unreachable("not a terminator");
799 }
800 
801 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
802   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
803        Idx != NumSuccessors; ++Idx)
804     if (getSuccessor(Idx) == OldBB)
805       setSuccessor(Idx, NewBB);
806 }
807 
808 Instruction *Instruction::cloneImpl() const {
809   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
810 }
811 
812 void Instruction::swapProfMetadata() {
813   MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
814   if (!ProfileData || ProfileData->getNumOperands() != 3 ||
815       !isa<MDString>(ProfileData->getOperand(0)))
816     return;
817 
818   MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
819   if (MDName->getString() != "branch_weights")
820     return;
821 
822   // The first operand is the name. Fetch them backwards and build a new one.
823   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
824                      ProfileData->getOperand(1)};
825   setMetadata(LLVMContext::MD_prof,
826               MDNode::get(ProfileData->getContext(), Ops));
827 }
828 
829 void Instruction::copyMetadata(const Instruction &SrcInst,
830                                ArrayRef<unsigned> WL) {
831   if (!SrcInst.hasMetadata())
832     return;
833 
834   DenseSet<unsigned> WLS;
835   for (unsigned M : WL)
836     WLS.insert(M);
837 
838   // Otherwise, enumerate and copy over metadata from the old instruction to the
839   // new one.
840   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
841   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
842   for (const auto &MD : TheMDs) {
843     if (WL.empty() || WLS.count(MD.first))
844       setMetadata(MD.first, MD.second);
845   }
846   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
847     setDebugLoc(SrcInst.getDebugLoc());
848 }
849 
850 Instruction *Instruction::clone() const {
851   Instruction *New = nullptr;
852   switch (getOpcode()) {
853   default:
854     llvm_unreachable("Unhandled Opcode.");
855 #define HANDLE_INST(num, opc, clas)                                            \
856   case Instruction::opc:                                                       \
857     New = cast<clas>(this)->cloneImpl();                                       \
858     break;
859 #include "llvm/IR/Instruction.def"
860 #undef HANDLE_INST
861   }
862 
863   New->SubclassOptionalData = SubclassOptionalData;
864   New->copyMetadata(*this);
865   return New;
866 }
867