1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/AttributeMask.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Intrinsics.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/ProfDataUtils.h"
22 #include "llvm/IR/Type.h"
23 using namespace llvm;
24 
25 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
26                          Instruction *InsertBefore)
27   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
28 
29   // If requested, insert this instruction into a basic block...
30   if (InsertBefore) {
31     BasicBlock *BB = InsertBefore->getParent();
32     assert(BB && "Instruction to insert before is not in a basic block!");
33     insertInto(BB, InsertBefore->getIterator());
34   }
35 }
36 
37 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
38                          BasicBlock *InsertAtEnd)
39   : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
40 
41   // append this instruction into the basic block
42   assert(InsertAtEnd && "Basic block to append to may not be NULL!");
43   insertInto(InsertAtEnd, InsertAtEnd->end());
44 }
45 
46 Instruction::~Instruction() {
47   assert(!Parent && "Instruction still linked in the program!");
48 
49   // Replace any extant metadata uses of this instruction with undef to
50   // preserve debug info accuracy. Some alternatives include:
51   // - Treat Instruction like any other Value, and point its extant metadata
52   //   uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
53   //   trivially dead (i.e. fair game for deletion in many passes), leading to
54   //   stale dbg.values being in effect for too long.
55   // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
56   //   correct. OTOH results in wasted work in some common cases (e.g. when all
57   //   instructions in a BasicBlock are deleted).
58   if (isUsedByMetadata())
59     ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
60 
61   // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
62   // mapping in LLVMContext.
63   setMetadata(LLVMContext::MD_DIAssignID, nullptr);
64 }
65 
66 
67 void Instruction::setParent(BasicBlock *P) {
68   Parent = P;
69 }
70 
71 const Module *Instruction::getModule() const {
72   return getParent()->getModule();
73 }
74 
75 const Function *Instruction::getFunction() const {
76   return getParent()->getParent();
77 }
78 
79 void Instruction::removeFromParent() {
80   getParent()->getInstList().remove(getIterator());
81 }
82 
83 iplist<Instruction>::iterator Instruction::eraseFromParent() {
84   return getParent()->getInstList().erase(getIterator());
85 }
86 
87 /// Insert an unlinked instruction into a basic block immediately before the
88 /// specified instruction.
89 void Instruction::insertBefore(Instruction *InsertPos) {
90   insertInto(InsertPos->getParent(), InsertPos->getIterator());
91 }
92 
93 /// Insert an unlinked instruction into a basic block immediately after the
94 /// specified instruction.
95 void Instruction::insertAfter(Instruction *InsertPos) {
96   insertInto(InsertPos->getParent(), std::next(InsertPos->getIterator()));
97 }
98 
99 BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
100                                              BasicBlock::iterator It) {
101   assert(getParent() == nullptr && "Expected detached instruction");
102   assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
103          "It not in ParentBB");
104   return ParentBB->getInstList().insert(It, this);
105 }
106 
107 /// Unlink this instruction from its current basic block and insert it into the
108 /// basic block that MovePos lives in, right before MovePos.
109 void Instruction::moveBefore(Instruction *MovePos) {
110   moveBefore(*MovePos->getParent(), MovePos->getIterator());
111 }
112 
113 void Instruction::moveAfter(Instruction *MovePos) {
114   moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
115 }
116 
117 void Instruction::moveBefore(BasicBlock &BB,
118                              SymbolTableList<Instruction>::iterator I) {
119   assert(I == BB.end() || I->getParent() == &BB);
120   BB.splice(I, getParent(), getIterator());
121 }
122 
123 bool Instruction::comesBefore(const Instruction *Other) const {
124   assert(Parent && Other->Parent &&
125          "instructions without BB parents have no order");
126   assert(Parent == Other->Parent && "cross-BB instruction order comparison");
127   if (!Parent->isInstrOrderValid())
128     Parent->renumberInstructions();
129   return Order < Other->Order;
130 }
131 
132 Instruction *Instruction::getInsertionPointAfterDef() {
133   assert(!getType()->isVoidTy() && "Instruction must define result");
134   BasicBlock *InsertBB;
135   BasicBlock::iterator InsertPt;
136   if (auto *PN = dyn_cast<PHINode>(this)) {
137     InsertBB = PN->getParent();
138     InsertPt = InsertBB->getFirstInsertionPt();
139   } else if (auto *II = dyn_cast<InvokeInst>(this)) {
140     InsertBB = II->getNormalDest();
141     InsertPt = InsertBB->getFirstInsertionPt();
142   } else if (isa<CallBrInst>(this)) {
143     // Def is available in multiple successors, there's no single dominating
144     // insertion point.
145     return nullptr;
146   } else {
147     assert(!isTerminator() && "Only invoke/callbr terminators return value");
148     InsertBB = getParent();
149     InsertPt = std::next(getIterator());
150   }
151 
152   // catchswitch blocks don't have any legal insertion point (because they
153   // are both an exception pad and a terminator).
154   if (InsertPt == InsertBB->end())
155     return nullptr;
156   return &*InsertPt;
157 }
158 
159 bool Instruction::isOnlyUserOfAnyOperand() {
160   return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
161 }
162 
163 void Instruction::setHasNoUnsignedWrap(bool b) {
164   cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
165 }
166 
167 void Instruction::setHasNoSignedWrap(bool b) {
168   cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
169 }
170 
171 void Instruction::setIsExact(bool b) {
172   cast<PossiblyExactOperator>(this)->setIsExact(b);
173 }
174 
175 bool Instruction::hasNoUnsignedWrap() const {
176   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
177 }
178 
179 bool Instruction::hasNoSignedWrap() const {
180   return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
181 }
182 
183 bool Instruction::hasPoisonGeneratingFlags() const {
184   return cast<Operator>(this)->hasPoisonGeneratingFlags();
185 }
186 
187 void Instruction::dropPoisonGeneratingFlags() {
188   switch (getOpcode()) {
189   case Instruction::Add:
190   case Instruction::Sub:
191   case Instruction::Mul:
192   case Instruction::Shl:
193     cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
194     cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
195     break;
196 
197   case Instruction::UDiv:
198   case Instruction::SDiv:
199   case Instruction::AShr:
200   case Instruction::LShr:
201     cast<PossiblyExactOperator>(this)->setIsExact(false);
202     break;
203 
204   case Instruction::GetElementPtr:
205     cast<GetElementPtrInst>(this)->setIsInBounds(false);
206     break;
207   }
208   if (isa<FPMathOperator>(this)) {
209     setHasNoNaNs(false);
210     setHasNoInfs(false);
211   }
212 
213   assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
214 }
215 
216 bool Instruction::hasPoisonGeneratingMetadata() const {
217   return hasMetadata(LLVMContext::MD_range) ||
218          hasMetadata(LLVMContext::MD_nonnull) ||
219          hasMetadata(LLVMContext::MD_align);
220 }
221 
222 void Instruction::dropPoisonGeneratingMetadata() {
223   eraseMetadata(LLVMContext::MD_range);
224   eraseMetadata(LLVMContext::MD_nonnull);
225   eraseMetadata(LLVMContext::MD_align);
226 }
227 
228 void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
229     ArrayRef<unsigned> KnownIDs) {
230   dropUnknownNonDebugMetadata(KnownIDs);
231   auto *CB = dyn_cast<CallBase>(this);
232   if (!CB)
233     return;
234   // For call instructions, we also need to drop parameter and return attributes
235   // that are can cause UB if the call is moved to a location where the
236   // attribute is not valid.
237   AttributeList AL = CB->getAttributes();
238   if (AL.isEmpty())
239     return;
240   AttributeMask UBImplyingAttributes =
241       AttributeFuncs::getUBImplyingAttributes();
242   for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
243     CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
244   CB->removeRetAttrs(UBImplyingAttributes);
245 }
246 
247 void Instruction::dropUBImplyingAttrsAndMetadata() {
248   // !annotation metadata does not impact semantics.
249   // !range, !nonnull and !align produce poison, so they are safe to speculate.
250   // !noundef and various AA metadata must be dropped, as it generally produces
251   // immediate undefined behavior.
252   unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
253                          LLVMContext::MD_nonnull, LLVMContext::MD_align};
254   dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
255 }
256 
257 bool Instruction::isExact() const {
258   return cast<PossiblyExactOperator>(this)->isExact();
259 }
260 
261 void Instruction::setFast(bool B) {
262   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
263   cast<FPMathOperator>(this)->setFast(B);
264 }
265 
266 void Instruction::setHasAllowReassoc(bool B) {
267   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
268   cast<FPMathOperator>(this)->setHasAllowReassoc(B);
269 }
270 
271 void Instruction::setHasNoNaNs(bool B) {
272   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
273   cast<FPMathOperator>(this)->setHasNoNaNs(B);
274 }
275 
276 void Instruction::setHasNoInfs(bool B) {
277   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
278   cast<FPMathOperator>(this)->setHasNoInfs(B);
279 }
280 
281 void Instruction::setHasNoSignedZeros(bool B) {
282   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
283   cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
284 }
285 
286 void Instruction::setHasAllowReciprocal(bool B) {
287   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
288   cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
289 }
290 
291 void Instruction::setHasAllowContract(bool B) {
292   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
293   cast<FPMathOperator>(this)->setHasAllowContract(B);
294 }
295 
296 void Instruction::setHasApproxFunc(bool B) {
297   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
298   cast<FPMathOperator>(this)->setHasApproxFunc(B);
299 }
300 
301 void Instruction::setFastMathFlags(FastMathFlags FMF) {
302   assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
303   cast<FPMathOperator>(this)->setFastMathFlags(FMF);
304 }
305 
306 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
307   assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
308   cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
309 }
310 
311 bool Instruction::isFast() const {
312   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
313   return cast<FPMathOperator>(this)->isFast();
314 }
315 
316 bool Instruction::hasAllowReassoc() const {
317   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
318   return cast<FPMathOperator>(this)->hasAllowReassoc();
319 }
320 
321 bool Instruction::hasNoNaNs() const {
322   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
323   return cast<FPMathOperator>(this)->hasNoNaNs();
324 }
325 
326 bool Instruction::hasNoInfs() const {
327   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
328   return cast<FPMathOperator>(this)->hasNoInfs();
329 }
330 
331 bool Instruction::hasNoSignedZeros() const {
332   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
333   return cast<FPMathOperator>(this)->hasNoSignedZeros();
334 }
335 
336 bool Instruction::hasAllowReciprocal() const {
337   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
338   return cast<FPMathOperator>(this)->hasAllowReciprocal();
339 }
340 
341 bool Instruction::hasAllowContract() const {
342   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
343   return cast<FPMathOperator>(this)->hasAllowContract();
344 }
345 
346 bool Instruction::hasApproxFunc() const {
347   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
348   return cast<FPMathOperator>(this)->hasApproxFunc();
349 }
350 
351 FastMathFlags Instruction::getFastMathFlags() const {
352   assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
353   return cast<FPMathOperator>(this)->getFastMathFlags();
354 }
355 
356 void Instruction::copyFastMathFlags(const Instruction *I) {
357   copyFastMathFlags(I->getFastMathFlags());
358 }
359 
360 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
361   // Copy the wrapping flags.
362   if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
363     if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
364       setHasNoSignedWrap(OB->hasNoSignedWrap());
365       setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
366     }
367   }
368 
369   // Copy the exact flag.
370   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
371     if (isa<PossiblyExactOperator>(this))
372       setIsExact(PE->isExact());
373 
374   // Copy the fast-math flags.
375   if (auto *FP = dyn_cast<FPMathOperator>(V))
376     if (isa<FPMathOperator>(this))
377       copyFastMathFlags(FP->getFastMathFlags());
378 
379   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
380     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
381       DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
382 }
383 
384 void Instruction::andIRFlags(const Value *V) {
385   if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
386     if (isa<OverflowingBinaryOperator>(this)) {
387       setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
388       setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
389     }
390   }
391 
392   if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
393     if (isa<PossiblyExactOperator>(this))
394       setIsExact(isExact() && PE->isExact());
395 
396   if (auto *FP = dyn_cast<FPMathOperator>(V)) {
397     if (isa<FPMathOperator>(this)) {
398       FastMathFlags FM = getFastMathFlags();
399       FM &= FP->getFastMathFlags();
400       copyFastMathFlags(FM);
401     }
402   }
403 
404   if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
405     if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
406       DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
407 }
408 
409 const char *Instruction::getOpcodeName(unsigned OpCode) {
410   switch (OpCode) {
411   // Terminators
412   case Ret:    return "ret";
413   case Br:     return "br";
414   case Switch: return "switch";
415   case IndirectBr: return "indirectbr";
416   case Invoke: return "invoke";
417   case Resume: return "resume";
418   case Unreachable: return "unreachable";
419   case CleanupRet: return "cleanupret";
420   case CatchRet: return "catchret";
421   case CatchPad: return "catchpad";
422   case CatchSwitch: return "catchswitch";
423   case CallBr: return "callbr";
424 
425   // Standard unary operators...
426   case FNeg: return "fneg";
427 
428   // Standard binary operators...
429   case Add: return "add";
430   case FAdd: return "fadd";
431   case Sub: return "sub";
432   case FSub: return "fsub";
433   case Mul: return "mul";
434   case FMul: return "fmul";
435   case UDiv: return "udiv";
436   case SDiv: return "sdiv";
437   case FDiv: return "fdiv";
438   case URem: return "urem";
439   case SRem: return "srem";
440   case FRem: return "frem";
441 
442   // Logical operators...
443   case And: return "and";
444   case Or : return "or";
445   case Xor: return "xor";
446 
447   // Memory instructions...
448   case Alloca:        return "alloca";
449   case Load:          return "load";
450   case Store:         return "store";
451   case AtomicCmpXchg: return "cmpxchg";
452   case AtomicRMW:     return "atomicrmw";
453   case Fence:         return "fence";
454   case GetElementPtr: return "getelementptr";
455 
456   // Convert instructions...
457   case Trunc:         return "trunc";
458   case ZExt:          return "zext";
459   case SExt:          return "sext";
460   case FPTrunc:       return "fptrunc";
461   case FPExt:         return "fpext";
462   case FPToUI:        return "fptoui";
463   case FPToSI:        return "fptosi";
464   case UIToFP:        return "uitofp";
465   case SIToFP:        return "sitofp";
466   case IntToPtr:      return "inttoptr";
467   case PtrToInt:      return "ptrtoint";
468   case BitCast:       return "bitcast";
469   case AddrSpaceCast: return "addrspacecast";
470 
471   // Other instructions...
472   case ICmp:           return "icmp";
473   case FCmp:           return "fcmp";
474   case PHI:            return "phi";
475   case Select:         return "select";
476   case Call:           return "call";
477   case Shl:            return "shl";
478   case LShr:           return "lshr";
479   case AShr:           return "ashr";
480   case VAArg:          return "va_arg";
481   case ExtractElement: return "extractelement";
482   case InsertElement:  return "insertelement";
483   case ShuffleVector:  return "shufflevector";
484   case ExtractValue:   return "extractvalue";
485   case InsertValue:    return "insertvalue";
486   case LandingPad:     return "landingpad";
487   case CleanupPad:     return "cleanuppad";
488   case Freeze:         return "freeze";
489 
490   default: return "<Invalid operator> ";
491   }
492 }
493 
494 /// This must be kept in sync with FunctionComparator::cmpOperations in
495 /// lib/Transforms/IPO/MergeFunctions.cpp.
496 bool Instruction::hasSameSpecialState(const Instruction *I2,
497                                       bool IgnoreAlignment) const {
498   auto I1 = this;
499   assert(I1->getOpcode() == I2->getOpcode() &&
500          "Can not compare special state of different instructions");
501 
502   if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
503     return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
504            (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
505             IgnoreAlignment);
506   if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
507     return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
508            (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
509             IgnoreAlignment) &&
510            LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
511            LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
512   if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
513     return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
514            (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
515             IgnoreAlignment) &&
516            SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
517            SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
518   if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
519     return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
520   if (const CallInst *CI = dyn_cast<CallInst>(I1))
521     return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
522            CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
523            CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
524            CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
525   if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
526     return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
527            CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
528            CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
529   if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
530     return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
531            CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
532            CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
533   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
534     return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
535   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
536     return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
537   if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
538     return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
539            FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
540   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
541     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
542            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
543            CXI->getSuccessOrdering() ==
544                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
545            CXI->getFailureOrdering() ==
546                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
547            CXI->getSyncScopeID() ==
548                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
549   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
550     return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
551            RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
552            RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
553            RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
554   if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
555     return SVI->getShuffleMask() ==
556            cast<ShuffleVectorInst>(I2)->getShuffleMask();
557   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
558     return GEP->getSourceElementType() ==
559            cast<GetElementPtrInst>(I2)->getSourceElementType();
560 
561   return true;
562 }
563 
564 bool Instruction::isIdenticalTo(const Instruction *I) const {
565   return isIdenticalToWhenDefined(I) &&
566          SubclassOptionalData == I->SubclassOptionalData;
567 }
568 
569 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
570   if (getOpcode() != I->getOpcode() ||
571       getNumOperands() != I->getNumOperands() ||
572       getType() != I->getType())
573     return false;
574 
575   // If both instructions have no operands, they are identical.
576   if (getNumOperands() == 0 && I->getNumOperands() == 0)
577     return this->hasSameSpecialState(I);
578 
579   // We have two instructions of identical opcode and #operands.  Check to see
580   // if all operands are the same.
581   if (!std::equal(op_begin(), op_end(), I->op_begin()))
582     return false;
583 
584   // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
585   if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
586     const PHINode *otherPHI = cast<PHINode>(I);
587     return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
588                       otherPHI->block_begin());
589   }
590 
591   return this->hasSameSpecialState(I);
592 }
593 
594 // Keep this in sync with FunctionComparator::cmpOperations in
595 // lib/Transforms/IPO/MergeFunctions.cpp.
596 bool Instruction::isSameOperationAs(const Instruction *I,
597                                     unsigned flags) const {
598   bool IgnoreAlignment = flags & CompareIgnoringAlignment;
599   bool UseScalarTypes  = flags & CompareUsingScalarTypes;
600 
601   if (getOpcode() != I->getOpcode() ||
602       getNumOperands() != I->getNumOperands() ||
603       (UseScalarTypes ?
604        getType()->getScalarType() != I->getType()->getScalarType() :
605        getType() != I->getType()))
606     return false;
607 
608   // We have two instructions of identical opcode and #operands.  Check to see
609   // if all operands are the same type
610   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
611     if (UseScalarTypes ?
612         getOperand(i)->getType()->getScalarType() !=
613           I->getOperand(i)->getType()->getScalarType() :
614         getOperand(i)->getType() != I->getOperand(i)->getType())
615       return false;
616 
617   return this->hasSameSpecialState(I, IgnoreAlignment);
618 }
619 
620 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
621   for (const Use &U : uses()) {
622     // PHI nodes uses values in the corresponding predecessor block.  For other
623     // instructions, just check to see whether the parent of the use matches up.
624     const Instruction *I = cast<Instruction>(U.getUser());
625     const PHINode *PN = dyn_cast<PHINode>(I);
626     if (!PN) {
627       if (I->getParent() != BB)
628         return true;
629       continue;
630     }
631 
632     if (PN->getIncomingBlock(U) != BB)
633       return true;
634   }
635   return false;
636 }
637 
638 bool Instruction::mayReadFromMemory() const {
639   switch (getOpcode()) {
640   default: return false;
641   case Instruction::VAArg:
642   case Instruction::Load:
643   case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
644   case Instruction::AtomicCmpXchg:
645   case Instruction::AtomicRMW:
646   case Instruction::CatchPad:
647   case Instruction::CatchRet:
648     return true;
649   case Instruction::Call:
650   case Instruction::Invoke:
651   case Instruction::CallBr:
652     return !cast<CallBase>(this)->onlyWritesMemory();
653   case Instruction::Store:
654     return !cast<StoreInst>(this)->isUnordered();
655   }
656 }
657 
658 bool Instruction::mayWriteToMemory() const {
659   switch (getOpcode()) {
660   default: return false;
661   case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
662   case Instruction::Store:
663   case Instruction::VAArg:
664   case Instruction::AtomicCmpXchg:
665   case Instruction::AtomicRMW:
666   case Instruction::CatchPad:
667   case Instruction::CatchRet:
668     return true;
669   case Instruction::Call:
670   case Instruction::Invoke:
671   case Instruction::CallBr:
672     return !cast<CallBase>(this)->onlyReadsMemory();
673   case Instruction::Load:
674     return !cast<LoadInst>(this)->isUnordered();
675   }
676 }
677 
678 bool Instruction::isAtomic() const {
679   switch (getOpcode()) {
680   default:
681     return false;
682   case Instruction::AtomicCmpXchg:
683   case Instruction::AtomicRMW:
684   case Instruction::Fence:
685     return true;
686   case Instruction::Load:
687     return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
688   case Instruction::Store:
689     return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
690   }
691 }
692 
693 bool Instruction::hasAtomicLoad() const {
694   assert(isAtomic());
695   switch (getOpcode()) {
696   default:
697     return false;
698   case Instruction::AtomicCmpXchg:
699   case Instruction::AtomicRMW:
700   case Instruction::Load:
701     return true;
702   }
703 }
704 
705 bool Instruction::hasAtomicStore() const {
706   assert(isAtomic());
707   switch (getOpcode()) {
708   default:
709     return false;
710   case Instruction::AtomicCmpXchg:
711   case Instruction::AtomicRMW:
712   case Instruction::Store:
713     return true;
714   }
715 }
716 
717 bool Instruction::isVolatile() const {
718   switch (getOpcode()) {
719   default:
720     return false;
721   case Instruction::AtomicRMW:
722     return cast<AtomicRMWInst>(this)->isVolatile();
723   case Instruction::Store:
724     return cast<StoreInst>(this)->isVolatile();
725   case Instruction::Load:
726     return cast<LoadInst>(this)->isVolatile();
727   case Instruction::AtomicCmpXchg:
728     return cast<AtomicCmpXchgInst>(this)->isVolatile();
729   case Instruction::Call:
730   case Instruction::Invoke:
731     // There are a very limited number of intrinsics with volatile flags.
732     if (auto *II = dyn_cast<IntrinsicInst>(this)) {
733       if (auto *MI = dyn_cast<MemIntrinsic>(II))
734         return MI->isVolatile();
735       switch (II->getIntrinsicID()) {
736       default: break;
737       case Intrinsic::matrix_column_major_load:
738         return cast<ConstantInt>(II->getArgOperand(2))->isOne();
739       case Intrinsic::matrix_column_major_store:
740         return cast<ConstantInt>(II->getArgOperand(3))->isOne();
741       }
742     }
743     return false;
744   }
745 }
746 
747 Type *Instruction::getAccessType() const {
748   switch (getOpcode()) {
749   case Instruction::Store:
750     return cast<StoreInst>(this)->getValueOperand()->getType();
751   case Instruction::Load:
752   case Instruction::AtomicRMW:
753     return getType();
754   case Instruction::AtomicCmpXchg:
755     return cast<AtomicCmpXchgInst>(this)->getNewValOperand()->getType();
756   case Instruction::Call:
757   case Instruction::Invoke:
758     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(this)) {
759       switch (II->getIntrinsicID()) {
760       case Intrinsic::masked_load:
761       case Intrinsic::masked_gather:
762       case Intrinsic::masked_expandload:
763       case Intrinsic::vp_load:
764       case Intrinsic::vp_gather:
765       case Intrinsic::experimental_vp_strided_load:
766         return II->getType();
767       case Intrinsic::masked_store:
768       case Intrinsic::masked_scatter:
769       case Intrinsic::masked_compressstore:
770       case Intrinsic::vp_store:
771       case Intrinsic::vp_scatter:
772       case Intrinsic::experimental_vp_strided_store:
773         return II->getOperand(0)->getType();
774       default:
775         break;
776       }
777     }
778   }
779 
780   return nullptr;
781 }
782 
783 static bool canUnwindPastLandingPad(const LandingPadInst *LP,
784                                     bool IncludePhaseOneUnwind) {
785   // Because phase one unwinding skips cleanup landingpads, we effectively
786   // unwind past this frame, and callers need to have valid unwind info.
787   if (LP->isCleanup())
788     return IncludePhaseOneUnwind;
789 
790   for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
791     Constant *Clause = LP->getClause(I);
792     // catch ptr null catches all exceptions.
793     if (LP->isCatch(I) && isa<ConstantPointerNull>(Clause))
794       return false;
795     // filter [0 x ptr] catches all exceptions.
796     if (LP->isFilter(I) && Clause->getType()->getArrayNumElements() == 0)
797       return false;
798   }
799 
800   // May catch only some subset of exceptions, in which case other exceptions
801   // will continue unwinding.
802   return true;
803 }
804 
805 bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
806   switch (getOpcode()) {
807   case Instruction::Call:
808     return !cast<CallInst>(this)->doesNotThrow();
809   case Instruction::CleanupRet:
810     return cast<CleanupReturnInst>(this)->unwindsToCaller();
811   case Instruction::CatchSwitch:
812     return cast<CatchSwitchInst>(this)->unwindsToCaller();
813   case Instruction::Resume:
814     return true;
815   case Instruction::Invoke: {
816     // Landingpads themselves don't unwind -- however, an invoke of a skipped
817     // landingpad may continue unwinding.
818     BasicBlock *UnwindDest = cast<InvokeInst>(this)->getUnwindDest();
819     Instruction *Pad = UnwindDest->getFirstNonPHI();
820     if (auto *LP = dyn_cast<LandingPadInst>(Pad))
821       return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
822     return false;
823   }
824   case Instruction::CleanupPad:
825     // Treat the same as cleanup landingpad.
826     return IncludePhaseOneUnwind;
827   default:
828     return false;
829   }
830 }
831 
832 bool Instruction::mayHaveSideEffects() const {
833   return mayWriteToMemory() || mayThrow() || !willReturn();
834 }
835 
836 bool Instruction::isSafeToRemove() const {
837   return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
838          !this->isTerminator() && !this->isEHPad();
839 }
840 
841 bool Instruction::willReturn() const {
842   // Volatile store isn't guaranteed to return; see LangRef.
843   if (auto *SI = dyn_cast<StoreInst>(this))
844     return !SI->isVolatile();
845 
846   if (const auto *CB = dyn_cast<CallBase>(this))
847     return CB->hasFnAttr(Attribute::WillReturn);
848   return true;
849 }
850 
851 bool Instruction::isLifetimeStartOrEnd() const {
852   auto *II = dyn_cast<IntrinsicInst>(this);
853   if (!II)
854     return false;
855   Intrinsic::ID ID = II->getIntrinsicID();
856   return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
857 }
858 
859 bool Instruction::isLaunderOrStripInvariantGroup() const {
860   auto *II = dyn_cast<IntrinsicInst>(this);
861   if (!II)
862     return false;
863   Intrinsic::ID ID = II->getIntrinsicID();
864   return ID == Intrinsic::launder_invariant_group ||
865          ID == Intrinsic::strip_invariant_group;
866 }
867 
868 bool Instruction::isDebugOrPseudoInst() const {
869   return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
870 }
871 
872 const Instruction *
873 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
874   for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
875     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
876       return I;
877   return nullptr;
878 }
879 
880 const Instruction *
881 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
882   for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
883     if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
884       return I;
885   return nullptr;
886 }
887 
888 bool Instruction::isAssociative() const {
889   unsigned Opcode = getOpcode();
890   if (isAssociative(Opcode))
891     return true;
892 
893   switch (Opcode) {
894   case FMul:
895   case FAdd:
896     return cast<FPMathOperator>(this)->hasAllowReassoc() &&
897            cast<FPMathOperator>(this)->hasNoSignedZeros();
898   default:
899     return false;
900   }
901 }
902 
903 bool Instruction::isCommutative() const {
904   if (auto *II = dyn_cast<IntrinsicInst>(this))
905     return II->isCommutative();
906   // TODO: Should allow icmp/fcmp?
907   return isCommutative(getOpcode());
908 }
909 
910 unsigned Instruction::getNumSuccessors() const {
911   switch (getOpcode()) {
912 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
913   case Instruction::OPC:                                                       \
914     return static_cast<const CLASS *>(this)->getNumSuccessors();
915 #include "llvm/IR/Instruction.def"
916   default:
917     break;
918   }
919   llvm_unreachable("not a terminator");
920 }
921 
922 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
923   switch (getOpcode()) {
924 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
925   case Instruction::OPC:                                                       \
926     return static_cast<const CLASS *>(this)->getSuccessor(idx);
927 #include "llvm/IR/Instruction.def"
928   default:
929     break;
930   }
931   llvm_unreachable("not a terminator");
932 }
933 
934 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
935   switch (getOpcode()) {
936 #define HANDLE_TERM_INST(N, OPC, CLASS)                                        \
937   case Instruction::OPC:                                                       \
938     return static_cast<CLASS *>(this)->setSuccessor(idx, B);
939 #include "llvm/IR/Instruction.def"
940   default:
941     break;
942   }
943   llvm_unreachable("not a terminator");
944 }
945 
946 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
947   for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
948        Idx != NumSuccessors; ++Idx)
949     if (getSuccessor(Idx) == OldBB)
950       setSuccessor(Idx, NewBB);
951 }
952 
953 Instruction *Instruction::cloneImpl() const {
954   llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
955 }
956 
957 void Instruction::swapProfMetadata() {
958   MDNode *ProfileData = getBranchWeightMDNode(*this);
959   if (!ProfileData || ProfileData->getNumOperands() != 3)
960     return;
961 
962   // The first operand is the name. Fetch them backwards and build a new one.
963   Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
964                      ProfileData->getOperand(1)};
965   setMetadata(LLVMContext::MD_prof,
966               MDNode::get(ProfileData->getContext(), Ops));
967 }
968 
969 void Instruction::copyMetadata(const Instruction &SrcInst,
970                                ArrayRef<unsigned> WL) {
971   if (!SrcInst.hasMetadata())
972     return;
973 
974   DenseSet<unsigned> WLS;
975   for (unsigned M : WL)
976     WLS.insert(M);
977 
978   // Otherwise, enumerate and copy over metadata from the old instruction to the
979   // new one.
980   SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
981   SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
982   for (const auto &MD : TheMDs) {
983     if (WL.empty() || WLS.count(MD.first))
984       setMetadata(MD.first, MD.second);
985   }
986   if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
987     setDebugLoc(SrcInst.getDebugLoc());
988 }
989 
990 Instruction *Instruction::clone() const {
991   Instruction *New = nullptr;
992   switch (getOpcode()) {
993   default:
994     llvm_unreachable("Unhandled Opcode.");
995 #define HANDLE_INST(num, opc, clas)                                            \
996   case Instruction::opc:                                                       \
997     New = cast<clas>(this)->cloneImpl();                                       \
998     break;
999 #include "llvm/IR/Instruction.def"
1000 #undef HANDLE_INST
1001   }
1002 
1003   New->SubclassOptionalData = SubclassOptionalData;
1004   New->copyMetadata(*this);
1005   return New;
1006 }
1007