1 //===-- Instruction.cpp - Implement the Instruction class -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Instruction class for the IR library.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/IR/Instruction.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/IR/Constants.h"
16 #include "llvm/IR/Instructions.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/Operator.h"
20 #include "llvm/IR/ProfDataUtils.h"
21 #include "llvm/IR/Type.h"
22 using namespace llvm;
23
Instruction(Type * ty,unsigned it,Use * Ops,unsigned NumOps,Instruction * InsertBefore)24 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
25 Instruction *InsertBefore)
26 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
27
28 // If requested, insert this instruction into a basic block...
29 if (InsertBefore) {
30 BasicBlock *BB = InsertBefore->getParent();
31 assert(BB && "Instruction to insert before is not in a basic block!");
32 insertInto(BB, InsertBefore->getIterator());
33 }
34 }
35
Instruction(Type * ty,unsigned it,Use * Ops,unsigned NumOps,BasicBlock * InsertAtEnd)36 Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
37 BasicBlock *InsertAtEnd)
38 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
39
40 // append this instruction into the basic block
41 assert(InsertAtEnd && "Basic block to append to may not be NULL!");
42 insertInto(InsertAtEnd, InsertAtEnd->end());
43 }
44
~Instruction()45 Instruction::~Instruction() {
46 assert(!Parent && "Instruction still linked in the program!");
47
48 // Replace any extant metadata uses of this instruction with undef to
49 // preserve debug info accuracy. Some alternatives include:
50 // - Treat Instruction like any other Value, and point its extant metadata
51 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
52 // trivially dead (i.e. fair game for deletion in many passes), leading to
53 // stale dbg.values being in effect for too long.
54 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
55 // correct. OTOH results in wasted work in some common cases (e.g. when all
56 // instructions in a BasicBlock are deleted).
57 if (isUsedByMetadata())
58 ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
59
60 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
61 // mapping in LLVMContext.
62 setMetadata(LLVMContext::MD_DIAssignID, nullptr);
63 }
64
65
setParent(BasicBlock * P)66 void Instruction::setParent(BasicBlock *P) {
67 Parent = P;
68 }
69
getModule() const70 const Module *Instruction::getModule() const {
71 return getParent()->getModule();
72 }
73
getFunction() const74 const Function *Instruction::getFunction() const {
75 return getParent()->getParent();
76 }
77
removeFromParent()78 void Instruction::removeFromParent() {
79 getParent()->getInstList().remove(getIterator());
80 }
81
eraseFromParent()82 iplist<Instruction>::iterator Instruction::eraseFromParent() {
83 return getParent()->getInstList().erase(getIterator());
84 }
85
86 /// Insert an unlinked instruction into a basic block immediately before the
87 /// specified instruction.
insertBefore(Instruction * InsertPos)88 void Instruction::insertBefore(Instruction *InsertPos) {
89 insertInto(InsertPos->getParent(), InsertPos->getIterator());
90 }
91
92 /// Insert an unlinked instruction into a basic block immediately after the
93 /// specified instruction.
insertAfter(Instruction * InsertPos)94 void Instruction::insertAfter(Instruction *InsertPos) {
95 insertInto(InsertPos->getParent(), std::next(InsertPos->getIterator()));
96 }
97
insertInto(BasicBlock * ParentBB,BasicBlock::iterator It)98 BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
99 BasicBlock::iterator It) {
100 assert(getParent() == nullptr && "Expected detached instruction");
101 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
102 "It not in ParentBB");
103 return ParentBB->getInstList().insert(It, this);
104 }
105
106 /// Unlink this instruction from its current basic block and insert it into the
107 /// basic block that MovePos lives in, right before MovePos.
moveBefore(Instruction * MovePos)108 void Instruction::moveBefore(Instruction *MovePos) {
109 moveBefore(*MovePos->getParent(), MovePos->getIterator());
110 }
111
moveAfter(Instruction * MovePos)112 void Instruction::moveAfter(Instruction *MovePos) {
113 moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
114 }
115
moveBefore(BasicBlock & BB,SymbolTableList<Instruction>::iterator I)116 void Instruction::moveBefore(BasicBlock &BB,
117 SymbolTableList<Instruction>::iterator I) {
118 assert(I == BB.end() || I->getParent() == &BB);
119 BB.splice(I, getParent(), getIterator());
120 }
121
comesBefore(const Instruction * Other) const122 bool Instruction::comesBefore(const Instruction *Other) const {
123 assert(Parent && Other->Parent &&
124 "instructions without BB parents have no order");
125 assert(Parent == Other->Parent && "cross-BB instruction order comparison");
126 if (!Parent->isInstrOrderValid())
127 Parent->renumberInstructions();
128 return Order < Other->Order;
129 }
130
getInsertionPointAfterDef()131 Instruction *Instruction::getInsertionPointAfterDef() {
132 assert(!getType()->isVoidTy() && "Instruction must define result");
133 BasicBlock *InsertBB;
134 BasicBlock::iterator InsertPt;
135 if (auto *PN = dyn_cast<PHINode>(this)) {
136 InsertBB = PN->getParent();
137 InsertPt = InsertBB->getFirstInsertionPt();
138 } else if (auto *II = dyn_cast<InvokeInst>(this)) {
139 InsertBB = II->getNormalDest();
140 InsertPt = InsertBB->getFirstInsertionPt();
141 } else if (auto *CB = dyn_cast<CallBrInst>(this)) {
142 InsertBB = CB->getDefaultDest();
143 InsertPt = InsertBB->getFirstInsertionPt();
144 } else {
145 assert(!isTerminator() && "Only invoke/callbr terminators return value");
146 InsertBB = getParent();
147 InsertPt = std::next(getIterator());
148 }
149
150 // catchswitch blocks don't have any legal insertion point (because they
151 // are both an exception pad and a terminator).
152 if (InsertPt == InsertBB->end())
153 return nullptr;
154 return &*InsertPt;
155 }
156
isOnlyUserOfAnyOperand()157 bool Instruction::isOnlyUserOfAnyOperand() {
158 return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
159 }
160
setHasNoUnsignedWrap(bool b)161 void Instruction::setHasNoUnsignedWrap(bool b) {
162 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
163 }
164
setHasNoSignedWrap(bool b)165 void Instruction::setHasNoSignedWrap(bool b) {
166 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
167 }
168
setIsExact(bool b)169 void Instruction::setIsExact(bool b) {
170 cast<PossiblyExactOperator>(this)->setIsExact(b);
171 }
172
hasNoUnsignedWrap() const173 bool Instruction::hasNoUnsignedWrap() const {
174 return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
175 }
176
hasNoSignedWrap() const177 bool Instruction::hasNoSignedWrap() const {
178 return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
179 }
180
hasPoisonGeneratingFlags() const181 bool Instruction::hasPoisonGeneratingFlags() const {
182 return cast<Operator>(this)->hasPoisonGeneratingFlags();
183 }
184
dropPoisonGeneratingFlags()185 void Instruction::dropPoisonGeneratingFlags() {
186 switch (getOpcode()) {
187 case Instruction::Add:
188 case Instruction::Sub:
189 case Instruction::Mul:
190 case Instruction::Shl:
191 cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
192 cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
193 break;
194
195 case Instruction::UDiv:
196 case Instruction::SDiv:
197 case Instruction::AShr:
198 case Instruction::LShr:
199 cast<PossiblyExactOperator>(this)->setIsExact(false);
200 break;
201
202 case Instruction::GetElementPtr:
203 cast<GetElementPtrInst>(this)->setIsInBounds(false);
204 break;
205 }
206 if (isa<FPMathOperator>(this)) {
207 setHasNoNaNs(false);
208 setHasNoInfs(false);
209 }
210
211 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
212 }
213
hasPoisonGeneratingMetadata() const214 bool Instruction::hasPoisonGeneratingMetadata() const {
215 return hasMetadata(LLVMContext::MD_range) ||
216 hasMetadata(LLVMContext::MD_nonnull) ||
217 hasMetadata(LLVMContext::MD_align);
218 }
219
dropPoisonGeneratingMetadata()220 void Instruction::dropPoisonGeneratingMetadata() {
221 eraseMetadata(LLVMContext::MD_range);
222 eraseMetadata(LLVMContext::MD_nonnull);
223 eraseMetadata(LLVMContext::MD_align);
224 }
225
dropUndefImplyingAttrsAndUnknownMetadata(ArrayRef<unsigned> KnownIDs)226 void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
227 ArrayRef<unsigned> KnownIDs) {
228 dropUnknownNonDebugMetadata(KnownIDs);
229 auto *CB = dyn_cast<CallBase>(this);
230 if (!CB)
231 return;
232 // For call instructions, we also need to drop parameter and return attributes
233 // that are can cause UB if the call is moved to a location where the
234 // attribute is not valid.
235 AttributeList AL = CB->getAttributes();
236 if (AL.isEmpty())
237 return;
238 AttributeMask UBImplyingAttributes =
239 AttributeFuncs::getUBImplyingAttributes();
240 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
241 CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
242 CB->removeRetAttrs(UBImplyingAttributes);
243 }
244
isExact() const245 bool Instruction::isExact() const {
246 return cast<PossiblyExactOperator>(this)->isExact();
247 }
248
setFast(bool B)249 void Instruction::setFast(bool B) {
250 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
251 cast<FPMathOperator>(this)->setFast(B);
252 }
253
setHasAllowReassoc(bool B)254 void Instruction::setHasAllowReassoc(bool B) {
255 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
256 cast<FPMathOperator>(this)->setHasAllowReassoc(B);
257 }
258
setHasNoNaNs(bool B)259 void Instruction::setHasNoNaNs(bool B) {
260 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
261 cast<FPMathOperator>(this)->setHasNoNaNs(B);
262 }
263
setHasNoInfs(bool B)264 void Instruction::setHasNoInfs(bool B) {
265 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
266 cast<FPMathOperator>(this)->setHasNoInfs(B);
267 }
268
setHasNoSignedZeros(bool B)269 void Instruction::setHasNoSignedZeros(bool B) {
270 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
271 cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
272 }
273
setHasAllowReciprocal(bool B)274 void Instruction::setHasAllowReciprocal(bool B) {
275 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
276 cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
277 }
278
setHasAllowContract(bool B)279 void Instruction::setHasAllowContract(bool B) {
280 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
281 cast<FPMathOperator>(this)->setHasAllowContract(B);
282 }
283
setHasApproxFunc(bool B)284 void Instruction::setHasApproxFunc(bool B) {
285 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
286 cast<FPMathOperator>(this)->setHasApproxFunc(B);
287 }
288
setFastMathFlags(FastMathFlags FMF)289 void Instruction::setFastMathFlags(FastMathFlags FMF) {
290 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
291 cast<FPMathOperator>(this)->setFastMathFlags(FMF);
292 }
293
copyFastMathFlags(FastMathFlags FMF)294 void Instruction::copyFastMathFlags(FastMathFlags FMF) {
295 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
296 cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
297 }
298
isFast() const299 bool Instruction::isFast() const {
300 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
301 return cast<FPMathOperator>(this)->isFast();
302 }
303
hasAllowReassoc() const304 bool Instruction::hasAllowReassoc() const {
305 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
306 return cast<FPMathOperator>(this)->hasAllowReassoc();
307 }
308
hasNoNaNs() const309 bool Instruction::hasNoNaNs() const {
310 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
311 return cast<FPMathOperator>(this)->hasNoNaNs();
312 }
313
hasNoInfs() const314 bool Instruction::hasNoInfs() const {
315 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
316 return cast<FPMathOperator>(this)->hasNoInfs();
317 }
318
hasNoSignedZeros() const319 bool Instruction::hasNoSignedZeros() const {
320 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
321 return cast<FPMathOperator>(this)->hasNoSignedZeros();
322 }
323
hasAllowReciprocal() const324 bool Instruction::hasAllowReciprocal() const {
325 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
326 return cast<FPMathOperator>(this)->hasAllowReciprocal();
327 }
328
hasAllowContract() const329 bool Instruction::hasAllowContract() const {
330 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
331 return cast<FPMathOperator>(this)->hasAllowContract();
332 }
333
hasApproxFunc() const334 bool Instruction::hasApproxFunc() const {
335 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
336 return cast<FPMathOperator>(this)->hasApproxFunc();
337 }
338
getFastMathFlags() const339 FastMathFlags Instruction::getFastMathFlags() const {
340 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
341 return cast<FPMathOperator>(this)->getFastMathFlags();
342 }
343
copyFastMathFlags(const Instruction * I)344 void Instruction::copyFastMathFlags(const Instruction *I) {
345 copyFastMathFlags(I->getFastMathFlags());
346 }
347
copyIRFlags(const Value * V,bool IncludeWrapFlags)348 void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
349 // Copy the wrapping flags.
350 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
351 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
352 setHasNoSignedWrap(OB->hasNoSignedWrap());
353 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
354 }
355 }
356
357 // Copy the exact flag.
358 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
359 if (isa<PossiblyExactOperator>(this))
360 setIsExact(PE->isExact());
361
362 // Copy the fast-math flags.
363 if (auto *FP = dyn_cast<FPMathOperator>(V))
364 if (isa<FPMathOperator>(this))
365 copyFastMathFlags(FP->getFastMathFlags());
366
367 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
368 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
369 DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
370 }
371
andIRFlags(const Value * V)372 void Instruction::andIRFlags(const Value *V) {
373 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
374 if (isa<OverflowingBinaryOperator>(this)) {
375 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
376 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
377 }
378 }
379
380 if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
381 if (isa<PossiblyExactOperator>(this))
382 setIsExact(isExact() && PE->isExact());
383
384 if (auto *FP = dyn_cast<FPMathOperator>(V)) {
385 if (isa<FPMathOperator>(this)) {
386 FastMathFlags FM = getFastMathFlags();
387 FM &= FP->getFastMathFlags();
388 copyFastMathFlags(FM);
389 }
390 }
391
392 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
393 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
394 DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
395 }
396
getOpcodeName(unsigned OpCode)397 const char *Instruction::getOpcodeName(unsigned OpCode) {
398 switch (OpCode) {
399 // Terminators
400 case Ret: return "ret";
401 case Br: return "br";
402 case Switch: return "switch";
403 case IndirectBr: return "indirectbr";
404 case Invoke: return "invoke";
405 case Resume: return "resume";
406 case Unreachable: return "unreachable";
407 case CleanupRet: return "cleanupret";
408 case CatchRet: return "catchret";
409 case CatchPad: return "catchpad";
410 case CatchSwitch: return "catchswitch";
411 case CallBr: return "callbr";
412
413 // Standard unary operators...
414 case FNeg: return "fneg";
415
416 // Standard binary operators...
417 case Add: return "add";
418 case FAdd: return "fadd";
419 case Sub: return "sub";
420 case FSub: return "fsub";
421 case Mul: return "mul";
422 case FMul: return "fmul";
423 case UDiv: return "udiv";
424 case SDiv: return "sdiv";
425 case FDiv: return "fdiv";
426 case URem: return "urem";
427 case SRem: return "srem";
428 case FRem: return "frem";
429
430 // Logical operators...
431 case And: return "and";
432 case Or : return "or";
433 case Xor: return "xor";
434
435 // Memory instructions...
436 case Alloca: return "alloca";
437 case Load: return "load";
438 case Store: return "store";
439 case AtomicCmpXchg: return "cmpxchg";
440 case AtomicRMW: return "atomicrmw";
441 case Fence: return "fence";
442 case GetElementPtr: return "getelementptr";
443
444 // Convert instructions...
445 case Trunc: return "trunc";
446 case ZExt: return "zext";
447 case SExt: return "sext";
448 case FPTrunc: return "fptrunc";
449 case FPExt: return "fpext";
450 case FPToUI: return "fptoui";
451 case FPToSI: return "fptosi";
452 case UIToFP: return "uitofp";
453 case SIToFP: return "sitofp";
454 case IntToPtr: return "inttoptr";
455 case PtrToInt: return "ptrtoint";
456 case BitCast: return "bitcast";
457 case AddrSpaceCast: return "addrspacecast";
458
459 // Other instructions...
460 case ICmp: return "icmp";
461 case FCmp: return "fcmp";
462 case PHI: return "phi";
463 case Select: return "select";
464 case Call: return "call";
465 case Shl: return "shl";
466 case LShr: return "lshr";
467 case AShr: return "ashr";
468 case VAArg: return "va_arg";
469 case ExtractElement: return "extractelement";
470 case InsertElement: return "insertelement";
471 case ShuffleVector: return "shufflevector";
472 case ExtractValue: return "extractvalue";
473 case InsertValue: return "insertvalue";
474 case LandingPad: return "landingpad";
475 case CleanupPad: return "cleanuppad";
476 case Freeze: return "freeze";
477
478 default: return "<Invalid operator> ";
479 }
480 }
481
482 /// Return true if both instructions have the same special state. This must be
483 /// kept in sync with FunctionComparator::cmpOperations in
484 /// lib/Transforms/IPO/MergeFunctions.cpp.
haveSameSpecialState(const Instruction * I1,const Instruction * I2,bool IgnoreAlignment=false)485 static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
486 bool IgnoreAlignment = false) {
487 assert(I1->getOpcode() == I2->getOpcode() &&
488 "Can not compare special state of different instructions");
489
490 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
491 return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
492 (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
493 IgnoreAlignment);
494 if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
495 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
496 (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
497 IgnoreAlignment) &&
498 LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
499 LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
500 if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
501 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
502 (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
503 IgnoreAlignment) &&
504 SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
505 SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
506 if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
507 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
508 if (const CallInst *CI = dyn_cast<CallInst>(I1))
509 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
510 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
511 CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
512 CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
513 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
514 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
515 CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
516 CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
517 if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
518 return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
519 CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
520 CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
521 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
522 return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
523 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
524 return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
525 if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
526 return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
527 FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
528 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
529 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
530 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
531 CXI->getSuccessOrdering() ==
532 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
533 CXI->getFailureOrdering() ==
534 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
535 CXI->getSyncScopeID() ==
536 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
537 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
538 return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
539 RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
540 RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
541 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
542 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
543 return SVI->getShuffleMask() ==
544 cast<ShuffleVectorInst>(I2)->getShuffleMask();
545 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
546 return GEP->getSourceElementType() ==
547 cast<GetElementPtrInst>(I2)->getSourceElementType();
548
549 return true;
550 }
551
isIdenticalTo(const Instruction * I) const552 bool Instruction::isIdenticalTo(const Instruction *I) const {
553 return isIdenticalToWhenDefined(I) &&
554 SubclassOptionalData == I->SubclassOptionalData;
555 }
556
isIdenticalToWhenDefined(const Instruction * I) const557 bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
558 if (getOpcode() != I->getOpcode() ||
559 getNumOperands() != I->getNumOperands() ||
560 getType() != I->getType())
561 return false;
562
563 // If both instructions have no operands, they are identical.
564 if (getNumOperands() == 0 && I->getNumOperands() == 0)
565 return haveSameSpecialState(this, I);
566
567 // We have two instructions of identical opcode and #operands. Check to see
568 // if all operands are the same.
569 if (!std::equal(op_begin(), op_end(), I->op_begin()))
570 return false;
571
572 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
573 if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
574 const PHINode *otherPHI = cast<PHINode>(I);
575 return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
576 otherPHI->block_begin());
577 }
578
579 return haveSameSpecialState(this, I);
580 }
581
582 // Keep this in sync with FunctionComparator::cmpOperations in
583 // lib/Transforms/IPO/MergeFunctions.cpp.
isSameOperationAs(const Instruction * I,unsigned flags) const584 bool Instruction::isSameOperationAs(const Instruction *I,
585 unsigned flags) const {
586 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
587 bool UseScalarTypes = flags & CompareUsingScalarTypes;
588
589 if (getOpcode() != I->getOpcode() ||
590 getNumOperands() != I->getNumOperands() ||
591 (UseScalarTypes ?
592 getType()->getScalarType() != I->getType()->getScalarType() :
593 getType() != I->getType()))
594 return false;
595
596 // We have two instructions of identical opcode and #operands. Check to see
597 // if all operands are the same type
598 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
599 if (UseScalarTypes ?
600 getOperand(i)->getType()->getScalarType() !=
601 I->getOperand(i)->getType()->getScalarType() :
602 getOperand(i)->getType() != I->getOperand(i)->getType())
603 return false;
604
605 return haveSameSpecialState(this, I, IgnoreAlignment);
606 }
607
isUsedOutsideOfBlock(const BasicBlock * BB) const608 bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
609 for (const Use &U : uses()) {
610 // PHI nodes uses values in the corresponding predecessor block. For other
611 // instructions, just check to see whether the parent of the use matches up.
612 const Instruction *I = cast<Instruction>(U.getUser());
613 const PHINode *PN = dyn_cast<PHINode>(I);
614 if (!PN) {
615 if (I->getParent() != BB)
616 return true;
617 continue;
618 }
619
620 if (PN->getIncomingBlock(U) != BB)
621 return true;
622 }
623 return false;
624 }
625
mayReadFromMemory() const626 bool Instruction::mayReadFromMemory() const {
627 switch (getOpcode()) {
628 default: return false;
629 case Instruction::VAArg:
630 case Instruction::Load:
631 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
632 case Instruction::AtomicCmpXchg:
633 case Instruction::AtomicRMW:
634 case Instruction::CatchPad:
635 case Instruction::CatchRet:
636 return true;
637 case Instruction::Call:
638 case Instruction::Invoke:
639 case Instruction::CallBr:
640 return !cast<CallBase>(this)->onlyWritesMemory();
641 case Instruction::Store:
642 return !cast<StoreInst>(this)->isUnordered();
643 }
644 }
645
mayWriteToMemory() const646 bool Instruction::mayWriteToMemory() const {
647 switch (getOpcode()) {
648 default: return false;
649 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
650 case Instruction::Store:
651 case Instruction::VAArg:
652 case Instruction::AtomicCmpXchg:
653 case Instruction::AtomicRMW:
654 case Instruction::CatchPad:
655 case Instruction::CatchRet:
656 return true;
657 case Instruction::Call:
658 case Instruction::Invoke:
659 case Instruction::CallBr:
660 return !cast<CallBase>(this)->onlyReadsMemory();
661 case Instruction::Load:
662 return !cast<LoadInst>(this)->isUnordered();
663 }
664 }
665
isAtomic() const666 bool Instruction::isAtomic() const {
667 switch (getOpcode()) {
668 default:
669 return false;
670 case Instruction::AtomicCmpXchg:
671 case Instruction::AtomicRMW:
672 case Instruction::Fence:
673 return true;
674 case Instruction::Load:
675 return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
676 case Instruction::Store:
677 return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
678 }
679 }
680
hasAtomicLoad() const681 bool Instruction::hasAtomicLoad() const {
682 assert(isAtomic());
683 switch (getOpcode()) {
684 default:
685 return false;
686 case Instruction::AtomicCmpXchg:
687 case Instruction::AtomicRMW:
688 case Instruction::Load:
689 return true;
690 }
691 }
692
hasAtomicStore() const693 bool Instruction::hasAtomicStore() const {
694 assert(isAtomic());
695 switch (getOpcode()) {
696 default:
697 return false;
698 case Instruction::AtomicCmpXchg:
699 case Instruction::AtomicRMW:
700 case Instruction::Store:
701 return true;
702 }
703 }
704
isVolatile() const705 bool Instruction::isVolatile() const {
706 switch (getOpcode()) {
707 default:
708 return false;
709 case Instruction::AtomicRMW:
710 return cast<AtomicRMWInst>(this)->isVolatile();
711 case Instruction::Store:
712 return cast<StoreInst>(this)->isVolatile();
713 case Instruction::Load:
714 return cast<LoadInst>(this)->isVolatile();
715 case Instruction::AtomicCmpXchg:
716 return cast<AtomicCmpXchgInst>(this)->isVolatile();
717 case Instruction::Call:
718 case Instruction::Invoke:
719 // There are a very limited number of intrinsics with volatile flags.
720 if (auto *II = dyn_cast<IntrinsicInst>(this)) {
721 if (auto *MI = dyn_cast<MemIntrinsic>(II))
722 return MI->isVolatile();
723 switch (II->getIntrinsicID()) {
724 default: break;
725 case Intrinsic::matrix_column_major_load:
726 return cast<ConstantInt>(II->getArgOperand(2))->isOne();
727 case Intrinsic::matrix_column_major_store:
728 return cast<ConstantInt>(II->getArgOperand(3))->isOne();
729 }
730 }
731 return false;
732 }
733 }
734
mayThrow() const735 bool Instruction::mayThrow() const {
736 if (const CallInst *CI = dyn_cast<CallInst>(this))
737 return !CI->doesNotThrow();
738 if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
739 return CRI->unwindsToCaller();
740 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
741 return CatchSwitch->unwindsToCaller();
742 return isa<ResumeInst>(this);
743 }
744
mayHaveSideEffects() const745 bool Instruction::mayHaveSideEffects() const {
746 return mayWriteToMemory() || mayThrow() || !willReturn();
747 }
748
isSafeToRemove() const749 bool Instruction::isSafeToRemove() const {
750 return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
751 !this->isTerminator() && !this->isEHPad();
752 }
753
willReturn() const754 bool Instruction::willReturn() const {
755 // Volatile store isn't guaranteed to return; see LangRef.
756 if (auto *SI = dyn_cast<StoreInst>(this))
757 return !SI->isVolatile();
758
759 if (const auto *CB = dyn_cast<CallBase>(this))
760 return CB->hasFnAttr(Attribute::WillReturn);
761 return true;
762 }
763
isLifetimeStartOrEnd() const764 bool Instruction::isLifetimeStartOrEnd() const {
765 auto *II = dyn_cast<IntrinsicInst>(this);
766 if (!II)
767 return false;
768 Intrinsic::ID ID = II->getIntrinsicID();
769 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
770 }
771
isLaunderOrStripInvariantGroup() const772 bool Instruction::isLaunderOrStripInvariantGroup() const {
773 auto *II = dyn_cast<IntrinsicInst>(this);
774 if (!II)
775 return false;
776 Intrinsic::ID ID = II->getIntrinsicID();
777 return ID == Intrinsic::launder_invariant_group ||
778 ID == Intrinsic::strip_invariant_group;
779 }
780
isDebugOrPseudoInst() const781 bool Instruction::isDebugOrPseudoInst() const {
782 return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
783 }
784
785 const Instruction *
getNextNonDebugInstruction(bool SkipPseudoOp) const786 Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
787 for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
788 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
789 return I;
790 return nullptr;
791 }
792
793 const Instruction *
getPrevNonDebugInstruction(bool SkipPseudoOp) const794 Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
795 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
796 if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
797 return I;
798 return nullptr;
799 }
800
isAssociative() const801 bool Instruction::isAssociative() const {
802 unsigned Opcode = getOpcode();
803 if (isAssociative(Opcode))
804 return true;
805
806 switch (Opcode) {
807 case FMul:
808 case FAdd:
809 return cast<FPMathOperator>(this)->hasAllowReassoc() &&
810 cast<FPMathOperator>(this)->hasNoSignedZeros();
811 default:
812 return false;
813 }
814 }
815
isCommutative() const816 bool Instruction::isCommutative() const {
817 if (auto *II = dyn_cast<IntrinsicInst>(this))
818 return II->isCommutative();
819 // TODO: Should allow icmp/fcmp?
820 return isCommutative(getOpcode());
821 }
822
getNumSuccessors() const823 unsigned Instruction::getNumSuccessors() const {
824 switch (getOpcode()) {
825 #define HANDLE_TERM_INST(N, OPC, CLASS) \
826 case Instruction::OPC: \
827 return static_cast<const CLASS *>(this)->getNumSuccessors();
828 #include "llvm/IR/Instruction.def"
829 default:
830 break;
831 }
832 llvm_unreachable("not a terminator");
833 }
834
getSuccessor(unsigned idx) const835 BasicBlock *Instruction::getSuccessor(unsigned idx) const {
836 switch (getOpcode()) {
837 #define HANDLE_TERM_INST(N, OPC, CLASS) \
838 case Instruction::OPC: \
839 return static_cast<const CLASS *>(this)->getSuccessor(idx);
840 #include "llvm/IR/Instruction.def"
841 default:
842 break;
843 }
844 llvm_unreachable("not a terminator");
845 }
846
setSuccessor(unsigned idx,BasicBlock * B)847 void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
848 switch (getOpcode()) {
849 #define HANDLE_TERM_INST(N, OPC, CLASS) \
850 case Instruction::OPC: \
851 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
852 #include "llvm/IR/Instruction.def"
853 default:
854 break;
855 }
856 llvm_unreachable("not a terminator");
857 }
858
replaceSuccessorWith(BasicBlock * OldBB,BasicBlock * NewBB)859 void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
860 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
861 Idx != NumSuccessors; ++Idx)
862 if (getSuccessor(Idx) == OldBB)
863 setSuccessor(Idx, NewBB);
864 }
865
cloneImpl() const866 Instruction *Instruction::cloneImpl() const {
867 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
868 }
869
swapProfMetadata()870 void Instruction::swapProfMetadata() {
871 MDNode *ProfileData = getBranchWeightMDNode(*this);
872 if (!ProfileData || ProfileData->getNumOperands() != 3)
873 return;
874
875 // The first operand is the name. Fetch them backwards and build a new one.
876 Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
877 ProfileData->getOperand(1)};
878 setMetadata(LLVMContext::MD_prof,
879 MDNode::get(ProfileData->getContext(), Ops));
880 }
881
copyMetadata(const Instruction & SrcInst,ArrayRef<unsigned> WL)882 void Instruction::copyMetadata(const Instruction &SrcInst,
883 ArrayRef<unsigned> WL) {
884 if (!SrcInst.hasMetadata())
885 return;
886
887 DenseSet<unsigned> WLS;
888 for (unsigned M : WL)
889 WLS.insert(M);
890
891 // Otherwise, enumerate and copy over metadata from the old instruction to the
892 // new one.
893 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
894 SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
895 for (const auto &MD : TheMDs) {
896 if (WL.empty() || WLS.count(MD.first))
897 setMetadata(MD.first, MD.second);
898 }
899 if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
900 setDebugLoc(SrcInst.getDebugLoc());
901 }
902
clone() const903 Instruction *Instruction::clone() const {
904 Instruction *New = nullptr;
905 switch (getOpcode()) {
906 default:
907 llvm_unreachable("Unhandled Opcode.");
908 #define HANDLE_INST(num, opc, clas) \
909 case Instruction::opc: \
910 New = cast<clas>(this)->cloneImpl(); \
911 break;
912 #include "llvm/IR/Instruction.def"
913 #undef HANDLE_INST
914 }
915
916 New->SubclassOptionalData = SubclassOptionalData;
917 New->copyMetadata(*this);
918 return New;
919 }
920