1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
10 //
11 // Operand counts: All explicit operands must be present.
12 //
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
15 //
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
18 //
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
22
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/Analysis/EHPersonalities.h"
35 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervalCalc.h"
38 #include "llvm/CodeGen/LiveIntervals.h"
39 #include "llvm/CodeGen/LiveStacks.h"
40 #include "llvm/CodeGen/LiveVariables.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineFrameInfo.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBundle.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/PseudoSourceValue.h"
51 #include "llvm/CodeGen/SlotIndexes.h"
52 #include "llvm/CodeGen/StackMaps.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetOpcodes.h"
55 #include "llvm/CodeGen/TargetRegisterInfo.h"
56 #include "llvm/CodeGen/TargetSubtargetInfo.h"
57 #include "llvm/IR/BasicBlock.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/InlineAsm.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/MC/LaneBitmask.h"
63 #include "llvm/MC/MCAsmInfo.h"
64 #include "llvm/MC/MCInstrDesc.h"
65 #include "llvm/MC/MCRegisterInfo.h"
66 #include "llvm/MC/MCTargetOptions.h"
67 #include "llvm/Pass.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/LowLevelTypeImpl.h"
71 #include "llvm/Support/MathExtras.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/Target/TargetMachine.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cstddef>
77 #include <cstdint>
78 #include <iterator>
79 #include <string>
80 #include <utility>
81
82 using namespace llvm;
83
84 namespace {
85
86 struct MachineVerifier {
MachineVerifier__anon9c12755e0111::MachineVerifier87 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
88
89 unsigned verify(const MachineFunction &MF);
90
91 Pass *const PASS;
92 const char *Banner;
93 const MachineFunction *MF;
94 const TargetMachine *TM;
95 const TargetInstrInfo *TII;
96 const TargetRegisterInfo *TRI;
97 const MachineRegisterInfo *MRI;
98
99 unsigned foundErrors;
100
101 // Avoid querying the MachineFunctionProperties for each operand.
102 bool isFunctionRegBankSelected;
103 bool isFunctionSelected;
104
105 using RegVector = SmallVector<Register, 16>;
106 using RegMaskVector = SmallVector<const uint32_t *, 4>;
107 using RegSet = DenseSet<Register>;
108 using RegMap = DenseMap<Register, const MachineInstr *>;
109 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
110
111 const MachineInstr *FirstNonPHI;
112 const MachineInstr *FirstTerminator;
113 BlockSet FunctionBlocks;
114
115 BitVector regsReserved;
116 RegSet regsLive;
117 RegVector regsDefined, regsDead, regsKilled;
118 RegMaskVector regMasks;
119
120 SlotIndex lastIndex;
121
122 // Add Reg and any sub-registers to RV
addRegWithSubRegs__anon9c12755e0111::MachineVerifier123 void addRegWithSubRegs(RegVector &RV, Register Reg) {
124 RV.push_back(Reg);
125 if (Reg.isPhysical())
126 append_range(RV, TRI->subregs(Reg.asMCReg()));
127 }
128
129 struct BBInfo {
130 // Is this MBB reachable from the MF entry point?
131 bool reachable = false;
132
133 // Vregs that must be live in because they are used without being
134 // defined. Map value is the user. vregsLiveIn doesn't include regs
135 // that only are used by PHI nodes.
136 RegMap vregsLiveIn;
137
138 // Regs killed in MBB. They may be defined again, and will then be in both
139 // regsKilled and regsLiveOut.
140 RegSet regsKilled;
141
142 // Regs defined in MBB and live out. Note that vregs passing through may
143 // be live out without being mentioned here.
144 RegSet regsLiveOut;
145
146 // Vregs that pass through MBB untouched. This set is disjoint from
147 // regsKilled and regsLiveOut.
148 RegSet vregsPassed;
149
150 // Vregs that must pass through MBB because they are needed by a successor
151 // block. This set is disjoint from regsLiveOut.
152 RegSet vregsRequired;
153
154 // Set versions of block's predecessor and successor lists.
155 BlockSet Preds, Succs;
156
157 BBInfo() = default;
158
159 // Add register to vregsRequired if it belongs there. Return true if
160 // anything changed.
addRequired__anon9c12755e0111::MachineVerifier::BBInfo161 bool addRequired(Register Reg) {
162 if (!Reg.isVirtual())
163 return false;
164 if (regsLiveOut.count(Reg))
165 return false;
166 return vregsRequired.insert(Reg).second;
167 }
168
169 // Same for a full set.
addRequired__anon9c12755e0111::MachineVerifier::BBInfo170 bool addRequired(const RegSet &RS) {
171 bool Changed = false;
172 for (Register Reg : RS)
173 Changed |= addRequired(Reg);
174 return Changed;
175 }
176
177 // Same for a full map.
addRequired__anon9c12755e0111::MachineVerifier::BBInfo178 bool addRequired(const RegMap &RM) {
179 bool Changed = false;
180 for (const auto &I : RM)
181 Changed |= addRequired(I.first);
182 return Changed;
183 }
184
185 // Live-out registers are either in regsLiveOut or vregsPassed.
isLiveOut__anon9c12755e0111::MachineVerifier::BBInfo186 bool isLiveOut(Register Reg) const {
187 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
188 }
189 };
190
191 // Extra register info per MBB.
192 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
193
isReserved__anon9c12755e0111::MachineVerifier194 bool isReserved(Register Reg) {
195 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
196 }
197
isAllocatable__anon9c12755e0111::MachineVerifier198 bool isAllocatable(Register Reg) const {
199 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
200 !regsReserved.test(Reg.id());
201 }
202
203 // Analysis information if available
204 LiveVariables *LiveVars;
205 LiveIntervals *LiveInts;
206 LiveStacks *LiveStks;
207 SlotIndexes *Indexes;
208
209 void visitMachineFunctionBefore();
210 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
211 void visitMachineBundleBefore(const MachineInstr *MI);
212
213 /// Verify that all of \p MI's virtual register operands are scalars.
214 /// \returns True if all virtual register operands are scalar. False
215 /// otherwise.
216 bool verifyAllRegOpsScalar(const MachineInstr &MI,
217 const MachineRegisterInfo &MRI);
218 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
219 void verifyPreISelGenericInstruction(const MachineInstr *MI);
220 void visitMachineInstrBefore(const MachineInstr *MI);
221 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
222 void visitMachineBundleAfter(const MachineInstr *MI);
223 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
224 void visitMachineFunctionAfter();
225
226 void report(const char *msg, const MachineFunction *MF);
227 void report(const char *msg, const MachineBasicBlock *MBB);
228 void report(const char *msg, const MachineInstr *MI);
229 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
230 LLT MOVRegType = LLT{});
231 void report(const Twine &Msg, const MachineInstr *MI);
232
233 void report_context(const LiveInterval &LI) const;
234 void report_context(const LiveRange &LR, Register VRegUnit,
235 LaneBitmask LaneMask) const;
236 void report_context(const LiveRange::Segment &S) const;
237 void report_context(const VNInfo &VNI) const;
238 void report_context(SlotIndex Pos) const;
239 void report_context(MCPhysReg PhysReg) const;
240 void report_context_liverange(const LiveRange &LR) const;
241 void report_context_lanemask(LaneBitmask LaneMask) const;
242 void report_context_vreg(Register VReg) const;
243 void report_context_vreg_regunit(Register VRegOrUnit) const;
244
245 void verifyInlineAsm(const MachineInstr *MI);
246
247 void checkLiveness(const MachineOperand *MO, unsigned MONum);
248 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
249 SlotIndex UseIdx, const LiveRange &LR,
250 Register VRegOrUnit,
251 LaneBitmask LaneMask = LaneBitmask::getNone());
252 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
253 SlotIndex DefIdx, const LiveRange &LR,
254 Register VRegOrUnit, bool SubRangeCheck = false,
255 LaneBitmask LaneMask = LaneBitmask::getNone());
256
257 void markReachable(const MachineBasicBlock *MBB);
258 void calcRegsPassed();
259 void checkPHIOps(const MachineBasicBlock &MBB);
260
261 void calcRegsRequired();
262 void verifyLiveVariables();
263 void verifyLiveIntervals();
264 void verifyLiveInterval(const LiveInterval&);
265 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
266 LaneBitmask);
267 void verifyLiveRangeSegment(const LiveRange &,
268 const LiveRange::const_iterator I, Register,
269 LaneBitmask);
270 void verifyLiveRange(const LiveRange &, Register,
271 LaneBitmask LaneMask = LaneBitmask::getNone());
272
273 void verifyStackFrame();
274
275 void verifySlotIndexes() const;
276 void verifyProperties(const MachineFunction &MF);
277 };
278
279 struct MachineVerifierPass : public MachineFunctionPass {
280 static char ID; // Pass ID, replacement for typeid
281
282 const std::string Banner;
283
MachineVerifierPass__anon9c12755e0111::MachineVerifierPass284 MachineVerifierPass(std::string banner = std::string())
285 : MachineFunctionPass(ID), Banner(std::move(banner)) {
286 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
287 }
288
getAnalysisUsage__anon9c12755e0111::MachineVerifierPass289 void getAnalysisUsage(AnalysisUsage &AU) const override {
290 AU.setPreservesAll();
291 MachineFunctionPass::getAnalysisUsage(AU);
292 }
293
runOnMachineFunction__anon9c12755e0111::MachineVerifierPass294 bool runOnMachineFunction(MachineFunction &MF) override {
295 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
296 if (FoundErrors)
297 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
298 return false;
299 }
300 };
301
302 } // end anonymous namespace
303
304 char MachineVerifierPass::ID = 0;
305
306 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
307 "Verify generated machine code", false, false)
308
createMachineVerifierPass(const std::string & Banner)309 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
310 return new MachineVerifierPass(Banner);
311 }
312
verifyMachineFunction(MachineFunctionAnalysisManager *,const std::string & Banner,const MachineFunction & MF)313 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
314 const std::string &Banner,
315 const MachineFunction &MF) {
316 // TODO: Use MFAM after porting below analyses.
317 // LiveVariables *LiveVars;
318 // LiveIntervals *LiveInts;
319 // LiveStacks *LiveStks;
320 // SlotIndexes *Indexes;
321 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
322 if (FoundErrors)
323 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
324 }
325
verify(Pass * p,const char * Banner,bool AbortOnErrors) const326 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
327 const {
328 MachineFunction &MF = const_cast<MachineFunction&>(*this);
329 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
330 if (AbortOnErrors && FoundErrors)
331 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
332 return FoundErrors == 0;
333 }
334
verifySlotIndexes() const335 void MachineVerifier::verifySlotIndexes() const {
336 if (Indexes == nullptr)
337 return;
338
339 // Ensure the IdxMBB list is sorted by slot indexes.
340 SlotIndex Last;
341 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
342 E = Indexes->MBBIndexEnd(); I != E; ++I) {
343 assert(!Last.isValid() || I->first > Last);
344 Last = I->first;
345 }
346 }
347
verifyProperties(const MachineFunction & MF)348 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
349 // If a pass has introduced virtual registers without clearing the
350 // NoVRegs property (or set it without allocating the vregs)
351 // then report an error.
352 if (MF.getProperties().hasProperty(
353 MachineFunctionProperties::Property::NoVRegs) &&
354 MRI->getNumVirtRegs())
355 report("Function has NoVRegs property but there are VReg operands", &MF);
356 }
357
verify(const MachineFunction & MF)358 unsigned MachineVerifier::verify(const MachineFunction &MF) {
359 foundErrors = 0;
360
361 this->MF = &MF;
362 TM = &MF.getTarget();
363 TII = MF.getSubtarget().getInstrInfo();
364 TRI = MF.getSubtarget().getRegisterInfo();
365 MRI = &MF.getRegInfo();
366
367 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
368 MachineFunctionProperties::Property::FailedISel);
369
370 // If we're mid-GlobalISel and we already triggered the fallback path then
371 // it's expected that the MIR is somewhat broken but that's ok since we'll
372 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
373 if (isFunctionFailedISel)
374 return foundErrors;
375
376 isFunctionRegBankSelected = MF.getProperties().hasProperty(
377 MachineFunctionProperties::Property::RegBankSelected);
378 isFunctionSelected = MF.getProperties().hasProperty(
379 MachineFunctionProperties::Property::Selected);
380
381 LiveVars = nullptr;
382 LiveInts = nullptr;
383 LiveStks = nullptr;
384 Indexes = nullptr;
385 if (PASS) {
386 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
387 // We don't want to verify LiveVariables if LiveIntervals is available.
388 if (!LiveInts)
389 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
390 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
391 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
392 }
393
394 verifySlotIndexes();
395
396 verifyProperties(MF);
397
398 visitMachineFunctionBefore();
399 for (const MachineBasicBlock &MBB : MF) {
400 visitMachineBasicBlockBefore(&MBB);
401 // Keep track of the current bundle header.
402 const MachineInstr *CurBundle = nullptr;
403 // Do we expect the next instruction to be part of the same bundle?
404 bool InBundle = false;
405
406 for (const MachineInstr &MI : MBB.instrs()) {
407 if (MI.getParent() != &MBB) {
408 report("Bad instruction parent pointer", &MBB);
409 errs() << "Instruction: " << MI;
410 continue;
411 }
412
413 // Check for consistent bundle flags.
414 if (InBundle && !MI.isBundledWithPred())
415 report("Missing BundledPred flag, "
416 "BundledSucc was set on predecessor",
417 &MI);
418 if (!InBundle && MI.isBundledWithPred())
419 report("BundledPred flag is set, "
420 "but BundledSucc not set on predecessor",
421 &MI);
422
423 // Is this a bundle header?
424 if (!MI.isInsideBundle()) {
425 if (CurBundle)
426 visitMachineBundleAfter(CurBundle);
427 CurBundle = &MI;
428 visitMachineBundleBefore(CurBundle);
429 } else if (!CurBundle)
430 report("No bundle header", &MI);
431 visitMachineInstrBefore(&MI);
432 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
433 const MachineOperand &Op = MI.getOperand(I);
434 if (Op.getParent() != &MI) {
435 // Make sure to use correct addOperand / RemoveOperand / ChangeTo
436 // functions when replacing operands of a MachineInstr.
437 report("Instruction has operand with wrong parent set", &MI);
438 }
439
440 visitMachineOperand(&Op, I);
441 }
442
443 // Was this the last bundled instruction?
444 InBundle = MI.isBundledWithSucc();
445 }
446 if (CurBundle)
447 visitMachineBundleAfter(CurBundle);
448 if (InBundle)
449 report("BundledSucc flag set on last instruction in block", &MBB.back());
450 visitMachineBasicBlockAfter(&MBB);
451 }
452 visitMachineFunctionAfter();
453
454 // Clean up.
455 regsLive.clear();
456 regsDefined.clear();
457 regsDead.clear();
458 regsKilled.clear();
459 regMasks.clear();
460 MBBInfoMap.clear();
461
462 return foundErrors;
463 }
464
report(const char * msg,const MachineFunction * MF)465 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
466 assert(MF);
467 errs() << '\n';
468 if (!foundErrors++) {
469 if (Banner)
470 errs() << "# " << Banner << '\n';
471 if (LiveInts != nullptr)
472 LiveInts->print(errs());
473 else
474 MF->print(errs(), Indexes);
475 }
476 errs() << "*** Bad machine code: " << msg << " ***\n"
477 << "- function: " << MF->getName() << "\n";
478 }
479
report(const char * msg,const MachineBasicBlock * MBB)480 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
481 assert(MBB);
482 report(msg, MBB->getParent());
483 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
484 << MBB->getName() << " (" << (const void *)MBB << ')';
485 if (Indexes)
486 errs() << " [" << Indexes->getMBBStartIdx(MBB)
487 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
488 errs() << '\n';
489 }
490
report(const char * msg,const MachineInstr * MI)491 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
492 assert(MI);
493 report(msg, MI->getParent());
494 errs() << "- instruction: ";
495 if (Indexes && Indexes->hasIndex(*MI))
496 errs() << Indexes->getInstructionIndex(*MI) << '\t';
497 MI->print(errs(), /*IsStandalone=*/true);
498 }
499
report(const char * msg,const MachineOperand * MO,unsigned MONum,LLT MOVRegType)500 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
501 unsigned MONum, LLT MOVRegType) {
502 assert(MO);
503 report(msg, MO->getParent());
504 errs() << "- operand " << MONum << ": ";
505 MO->print(errs(), MOVRegType, TRI);
506 errs() << "\n";
507 }
508
report(const Twine & Msg,const MachineInstr * MI)509 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
510 report(Msg.str().c_str(), MI);
511 }
512
report_context(SlotIndex Pos) const513 void MachineVerifier::report_context(SlotIndex Pos) const {
514 errs() << "- at: " << Pos << '\n';
515 }
516
report_context(const LiveInterval & LI) const517 void MachineVerifier::report_context(const LiveInterval &LI) const {
518 errs() << "- interval: " << LI << '\n';
519 }
520
report_context(const LiveRange & LR,Register VRegUnit,LaneBitmask LaneMask) const521 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
522 LaneBitmask LaneMask) const {
523 report_context_liverange(LR);
524 report_context_vreg_regunit(VRegUnit);
525 if (LaneMask.any())
526 report_context_lanemask(LaneMask);
527 }
528
report_context(const LiveRange::Segment & S) const529 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
530 errs() << "- segment: " << S << '\n';
531 }
532
report_context(const VNInfo & VNI) const533 void MachineVerifier::report_context(const VNInfo &VNI) const {
534 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
535 }
536
report_context_liverange(const LiveRange & LR) const537 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
538 errs() << "- liverange: " << LR << '\n';
539 }
540
report_context(MCPhysReg PReg) const541 void MachineVerifier::report_context(MCPhysReg PReg) const {
542 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
543 }
544
report_context_vreg(Register VReg) const545 void MachineVerifier::report_context_vreg(Register VReg) const {
546 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
547 }
548
report_context_vreg_regunit(Register VRegOrUnit) const549 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
550 if (Register::isVirtualRegister(VRegOrUnit)) {
551 report_context_vreg(VRegOrUnit);
552 } else {
553 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
554 }
555 }
556
report_context_lanemask(LaneBitmask LaneMask) const557 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
558 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
559 }
560
markReachable(const MachineBasicBlock * MBB)561 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
562 BBInfo &MInfo = MBBInfoMap[MBB];
563 if (!MInfo.reachable) {
564 MInfo.reachable = true;
565 for (const MachineBasicBlock *Succ : MBB->successors())
566 markReachable(Succ);
567 }
568 }
569
visitMachineFunctionBefore()570 void MachineVerifier::visitMachineFunctionBefore() {
571 lastIndex = SlotIndex();
572 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
573 : TRI->getReservedRegs(*MF);
574
575 if (!MF->empty())
576 markReachable(&MF->front());
577
578 // Build a set of the basic blocks in the function.
579 FunctionBlocks.clear();
580 for (const auto &MBB : *MF) {
581 FunctionBlocks.insert(&MBB);
582 BBInfo &MInfo = MBBInfoMap[&MBB];
583
584 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
585 if (MInfo.Preds.size() != MBB.pred_size())
586 report("MBB has duplicate entries in its predecessor list.", &MBB);
587
588 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
589 if (MInfo.Succs.size() != MBB.succ_size())
590 report("MBB has duplicate entries in its successor list.", &MBB);
591 }
592
593 // Check that the register use lists are sane.
594 MRI->verifyUseLists();
595
596 if (!MF->empty())
597 verifyStackFrame();
598 }
599
600 void
visitMachineBasicBlockBefore(const MachineBasicBlock * MBB)601 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
602 FirstTerminator = nullptr;
603 FirstNonPHI = nullptr;
604
605 if (!MF->getProperties().hasProperty(
606 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
607 // If this block has allocatable physical registers live-in, check that
608 // it is an entry block or landing pad.
609 for (const auto &LI : MBB->liveins()) {
610 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
611 MBB->getIterator() != MBB->getParent()->begin()) {
612 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
613 report_context(LI.PhysReg);
614 }
615 }
616 }
617
618 // Count the number of landing pad successors.
619 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
620 for (const auto *succ : MBB->successors()) {
621 if (succ->isEHPad())
622 LandingPadSuccs.insert(succ);
623 if (!FunctionBlocks.count(succ))
624 report("MBB has successor that isn't part of the function.", MBB);
625 if (!MBBInfoMap[succ].Preds.count(MBB)) {
626 report("Inconsistent CFG", MBB);
627 errs() << "MBB is not in the predecessor list of the successor "
628 << printMBBReference(*succ) << ".\n";
629 }
630 }
631
632 // Check the predecessor list.
633 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
634 if (!FunctionBlocks.count(Pred))
635 report("MBB has predecessor that isn't part of the function.", MBB);
636 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
637 report("Inconsistent CFG", MBB);
638 errs() << "MBB is not in the successor list of the predecessor "
639 << printMBBReference(*Pred) << ".\n";
640 }
641 }
642
643 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
644 const BasicBlock *BB = MBB->getBasicBlock();
645 const Function &F = MF->getFunction();
646 if (LandingPadSuccs.size() > 1 &&
647 !(AsmInfo &&
648 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
649 BB && isa<SwitchInst>(BB->getTerminator())) &&
650 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
651 report("MBB has more than one landing pad successor", MBB);
652
653 // Call analyzeBranch. If it succeeds, there several more conditions to check.
654 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
655 SmallVector<MachineOperand, 4> Cond;
656 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
657 Cond)) {
658 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
659 // check whether its answers match up with reality.
660 if (!TBB && !FBB) {
661 // Block falls through to its successor.
662 if (!MBB->empty() && MBB->back().isBarrier() &&
663 !TII->isPredicated(MBB->back())) {
664 report("MBB exits via unconditional fall-through but ends with a "
665 "barrier instruction!", MBB);
666 }
667 if (!Cond.empty()) {
668 report("MBB exits via unconditional fall-through but has a condition!",
669 MBB);
670 }
671 } else if (TBB && !FBB && Cond.empty()) {
672 // Block unconditionally branches somewhere.
673 if (MBB->empty()) {
674 report("MBB exits via unconditional branch but doesn't contain "
675 "any instructions!", MBB);
676 } else if (!MBB->back().isBarrier()) {
677 report("MBB exits via unconditional branch but doesn't end with a "
678 "barrier instruction!", MBB);
679 } else if (!MBB->back().isTerminator()) {
680 report("MBB exits via unconditional branch but the branch isn't a "
681 "terminator instruction!", MBB);
682 }
683 } else if (TBB && !FBB && !Cond.empty()) {
684 // Block conditionally branches somewhere, otherwise falls through.
685 if (MBB->empty()) {
686 report("MBB exits via conditional branch/fall-through but doesn't "
687 "contain any instructions!", MBB);
688 } else if (MBB->back().isBarrier()) {
689 report("MBB exits via conditional branch/fall-through but ends with a "
690 "barrier instruction!", MBB);
691 } else if (!MBB->back().isTerminator()) {
692 report("MBB exits via conditional branch/fall-through but the branch "
693 "isn't a terminator instruction!", MBB);
694 }
695 } else if (TBB && FBB) {
696 // Block conditionally branches somewhere, otherwise branches
697 // somewhere else.
698 if (MBB->empty()) {
699 report("MBB exits via conditional branch/branch but doesn't "
700 "contain any instructions!", MBB);
701 } else if (!MBB->back().isBarrier()) {
702 report("MBB exits via conditional branch/branch but doesn't end with a "
703 "barrier instruction!", MBB);
704 } else if (!MBB->back().isTerminator()) {
705 report("MBB exits via conditional branch/branch but the branch "
706 "isn't a terminator instruction!", MBB);
707 }
708 if (Cond.empty()) {
709 report("MBB exits via conditional branch/branch but there's no "
710 "condition!", MBB);
711 }
712 } else {
713 report("analyzeBranch returned invalid data!", MBB);
714 }
715
716 // Now check that the successors match up with the answers reported by
717 // analyzeBranch.
718 if (TBB && !MBB->isSuccessor(TBB))
719 report("MBB exits via jump or conditional branch, but its target isn't a "
720 "CFG successor!",
721 MBB);
722 if (FBB && !MBB->isSuccessor(FBB))
723 report("MBB exits via conditional branch, but its target isn't a CFG "
724 "successor!",
725 MBB);
726
727 // There might be a fallthrough to the next block if there's either no
728 // unconditional true branch, or if there's a condition, and one of the
729 // branches is missing.
730 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
731
732 // A conditional fallthrough must be an actual CFG successor, not
733 // unreachable. (Conversely, an unconditional fallthrough might not really
734 // be a successor, because the block might end in unreachable.)
735 if (!Cond.empty() && !FBB) {
736 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
737 if (MBBI == MF->end()) {
738 report("MBB conditionally falls through out of function!", MBB);
739 } else if (!MBB->isSuccessor(&*MBBI))
740 report("MBB exits via conditional branch/fall-through but the CFG "
741 "successors don't match the actual successors!",
742 MBB);
743 }
744
745 // Verify that there aren't any extra un-accounted-for successors.
746 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
747 // If this successor is one of the branch targets, it's okay.
748 if (SuccMBB == TBB || SuccMBB == FBB)
749 continue;
750 // If we might have a fallthrough, and the successor is the fallthrough
751 // block, that's also ok.
752 if (Fallthrough && SuccMBB == MBB->getNextNode())
753 continue;
754 // Also accept successors which are for exception-handling or might be
755 // inlineasm_br targets.
756 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
757 continue;
758 report("MBB has unexpected successors which are not branch targets, "
759 "fallthrough, EHPads, or inlineasm_br targets.",
760 MBB);
761 }
762 }
763
764 regsLive.clear();
765 if (MRI->tracksLiveness()) {
766 for (const auto &LI : MBB->liveins()) {
767 if (!Register::isPhysicalRegister(LI.PhysReg)) {
768 report("MBB live-in list contains non-physical register", MBB);
769 continue;
770 }
771 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
772 regsLive.insert(SubReg);
773 }
774 }
775
776 const MachineFrameInfo &MFI = MF->getFrameInfo();
777 BitVector PR = MFI.getPristineRegs(*MF);
778 for (unsigned I : PR.set_bits()) {
779 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
780 regsLive.insert(SubReg);
781 }
782
783 regsKilled.clear();
784 regsDefined.clear();
785
786 if (Indexes)
787 lastIndex = Indexes->getMBBStartIdx(MBB);
788 }
789
790 // This function gets called for all bundle headers, including normal
791 // stand-alone unbundled instructions.
visitMachineBundleBefore(const MachineInstr * MI)792 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
793 if (Indexes && Indexes->hasIndex(*MI)) {
794 SlotIndex idx = Indexes->getInstructionIndex(*MI);
795 if (!(idx > lastIndex)) {
796 report("Instruction index out of order", MI);
797 errs() << "Last instruction was at " << lastIndex << '\n';
798 }
799 lastIndex = idx;
800 }
801
802 // Ensure non-terminators don't follow terminators.
803 if (MI->isTerminator()) {
804 if (!FirstTerminator)
805 FirstTerminator = MI;
806 } else if (FirstTerminator) {
807 report("Non-terminator instruction after the first terminator", MI);
808 errs() << "First terminator was:\t" << *FirstTerminator;
809 }
810 }
811
812 // The operands on an INLINEASM instruction must follow a template.
813 // Verify that the flag operands make sense.
verifyInlineAsm(const MachineInstr * MI)814 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
815 // The first two operands on INLINEASM are the asm string and global flags.
816 if (MI->getNumOperands() < 2) {
817 report("Too few operands on inline asm", MI);
818 return;
819 }
820 if (!MI->getOperand(0).isSymbol())
821 report("Asm string must be an external symbol", MI);
822 if (!MI->getOperand(1).isImm())
823 report("Asm flags must be an immediate", MI);
824 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
825 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
826 // and Extra_IsConvergent = 32.
827 if (!isUInt<6>(MI->getOperand(1).getImm()))
828 report("Unknown asm flags", &MI->getOperand(1), 1);
829
830 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
831
832 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
833 unsigned NumOps;
834 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
835 const MachineOperand &MO = MI->getOperand(OpNo);
836 // There may be implicit ops after the fixed operands.
837 if (!MO.isImm())
838 break;
839 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
840 }
841
842 if (OpNo > MI->getNumOperands())
843 report("Missing operands in last group", MI);
844
845 // An optional MDNode follows the groups.
846 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
847 ++OpNo;
848
849 // All trailing operands must be implicit registers.
850 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
851 const MachineOperand &MO = MI->getOperand(OpNo);
852 if (!MO.isReg() || !MO.isImplicit())
853 report("Expected implicit register after groups", &MO, OpNo);
854 }
855 }
856
verifyAllRegOpsScalar(const MachineInstr & MI,const MachineRegisterInfo & MRI)857 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
858 const MachineRegisterInfo &MRI) {
859 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
860 if (!Op.isReg())
861 return false;
862 const auto Reg = Op.getReg();
863 if (Reg.isPhysical())
864 return false;
865 return !MRI.getType(Reg).isScalar();
866 }))
867 return true;
868 report("All register operands must have scalar types", &MI);
869 return false;
870 }
871
872 /// Check that types are consistent when two operands need to have the same
873 /// number of vector elements.
874 /// \return true if the types are valid.
verifyVectorElementMatch(LLT Ty0,LLT Ty1,const MachineInstr * MI)875 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
876 const MachineInstr *MI) {
877 if (Ty0.isVector() != Ty1.isVector()) {
878 report("operand types must be all-vector or all-scalar", MI);
879 // Generally we try to report as many issues as possible at once, but in
880 // this case it's not clear what should we be comparing the size of the
881 // scalar with: the size of the whole vector or its lane. Instead of
882 // making an arbitrary choice and emitting not so helpful message, let's
883 // avoid the extra noise and stop here.
884 return false;
885 }
886
887 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
888 report("operand types must preserve number of vector elements", MI);
889 return false;
890 }
891
892 return true;
893 }
894
verifyPreISelGenericInstruction(const MachineInstr * MI)895 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
896 if (isFunctionSelected)
897 report("Unexpected generic instruction in a Selected function", MI);
898
899 const MCInstrDesc &MCID = MI->getDesc();
900 unsigned NumOps = MI->getNumOperands();
901
902 // Branches must reference a basic block if they are not indirect
903 if (MI->isBranch() && !MI->isIndirectBranch()) {
904 bool HasMBB = false;
905 for (const MachineOperand &Op : MI->operands()) {
906 if (Op.isMBB()) {
907 HasMBB = true;
908 break;
909 }
910 }
911
912 if (!HasMBB) {
913 report("Branch instruction is missing a basic block operand or "
914 "isIndirectBranch property",
915 MI);
916 }
917 }
918
919 // Check types.
920 SmallVector<LLT, 4> Types;
921 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
922 I != E; ++I) {
923 if (!MCID.OpInfo[I].isGenericType())
924 continue;
925 // Generic instructions specify type equality constraints between some of
926 // their operands. Make sure these are consistent.
927 size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
928 Types.resize(std::max(TypeIdx + 1, Types.size()));
929
930 const MachineOperand *MO = &MI->getOperand(I);
931 if (!MO->isReg()) {
932 report("generic instruction must use register operands", MI);
933 continue;
934 }
935
936 LLT OpTy = MRI->getType(MO->getReg());
937 // Don't report a type mismatch if there is no actual mismatch, only a
938 // type missing, to reduce noise:
939 if (OpTy.isValid()) {
940 // Only the first valid type for a type index will be printed: don't
941 // overwrite it later so it's always clear which type was expected:
942 if (!Types[TypeIdx].isValid())
943 Types[TypeIdx] = OpTy;
944 else if (Types[TypeIdx] != OpTy)
945 report("Type mismatch in generic instruction", MO, I, OpTy);
946 } else {
947 // Generic instructions must have types attached to their operands.
948 report("Generic instruction is missing a virtual register type", MO, I);
949 }
950 }
951
952 // Generic opcodes must not have physical register operands.
953 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
954 const MachineOperand *MO = &MI->getOperand(I);
955 if (MO->isReg() && Register::isPhysicalRegister(MO->getReg()))
956 report("Generic instruction cannot have physical register", MO, I);
957 }
958
959 // Avoid out of bounds in checks below. This was already reported earlier.
960 if (MI->getNumOperands() < MCID.getNumOperands())
961 return;
962
963 StringRef ErrorInfo;
964 if (!TII->verifyInstruction(*MI, ErrorInfo))
965 report(ErrorInfo.data(), MI);
966
967 // Verify properties of various specific instruction types
968 unsigned Opc = MI->getOpcode();
969 switch (Opc) {
970 case TargetOpcode::G_ASSERT_SEXT:
971 case TargetOpcode::G_ASSERT_ZEXT: {
972 std::string OpcName =
973 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
974 if (!MI->getOperand(2).isImm()) {
975 report(Twine(OpcName, " expects an immediate operand #2"), MI);
976 break;
977 }
978
979 Register Dst = MI->getOperand(0).getReg();
980 Register Src = MI->getOperand(1).getReg();
981 LLT SrcTy = MRI->getType(Src);
982 int64_t Imm = MI->getOperand(2).getImm();
983 if (Imm <= 0) {
984 report(Twine(OpcName, " size must be >= 1"), MI);
985 break;
986 }
987
988 if (Imm >= SrcTy.getScalarSizeInBits()) {
989 report(Twine(OpcName, " size must be less than source bit width"), MI);
990 break;
991 }
992
993 if (MRI->getRegBankOrNull(Src) != MRI->getRegBankOrNull(Dst)) {
994 report(
995 Twine(OpcName, " source and destination register banks must match"),
996 MI);
997 break;
998 }
999
1000 if (MRI->getRegClassOrNull(Src) != MRI->getRegClassOrNull(Dst))
1001 report(
1002 Twine(OpcName, " source and destination register classes must match"),
1003 MI);
1004
1005 break;
1006 }
1007
1008 case TargetOpcode::G_CONSTANT:
1009 case TargetOpcode::G_FCONSTANT: {
1010 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1011 if (DstTy.isVector())
1012 report("Instruction cannot use a vector result type", MI);
1013
1014 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1015 if (!MI->getOperand(1).isCImm()) {
1016 report("G_CONSTANT operand must be cimm", MI);
1017 break;
1018 }
1019
1020 const ConstantInt *CI = MI->getOperand(1).getCImm();
1021 if (CI->getBitWidth() != DstTy.getSizeInBits())
1022 report("inconsistent constant size", MI);
1023 } else {
1024 if (!MI->getOperand(1).isFPImm()) {
1025 report("G_FCONSTANT operand must be fpimm", MI);
1026 break;
1027 }
1028 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1029
1030 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1031 DstTy.getSizeInBits()) {
1032 report("inconsistent constant size", MI);
1033 }
1034 }
1035
1036 break;
1037 }
1038 case TargetOpcode::G_LOAD:
1039 case TargetOpcode::G_STORE:
1040 case TargetOpcode::G_ZEXTLOAD:
1041 case TargetOpcode::G_SEXTLOAD: {
1042 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1043 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1044 if (!PtrTy.isPointer())
1045 report("Generic memory instruction must access a pointer", MI);
1046
1047 // Generic loads and stores must have a single MachineMemOperand
1048 // describing that access.
1049 if (!MI->hasOneMemOperand()) {
1050 report("Generic instruction accessing memory must have one mem operand",
1051 MI);
1052 } else {
1053 const MachineMemOperand &MMO = **MI->memoperands_begin();
1054 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1055 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1056 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1057 report("Generic extload must have a narrower memory type", MI);
1058 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1059 if (MMO.getSize() > ValTy.getSizeInBytes())
1060 report("load memory size cannot exceed result size", MI);
1061 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1062 if (ValTy.getSizeInBytes() < MMO.getSize())
1063 report("store memory size cannot exceed value size", MI);
1064 }
1065 }
1066
1067 break;
1068 }
1069 case TargetOpcode::G_PHI: {
1070 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1071 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1072 [this, &DstTy](const MachineOperand &MO) {
1073 if (!MO.isReg())
1074 return true;
1075 LLT Ty = MRI->getType(MO.getReg());
1076 if (!Ty.isValid() || (Ty != DstTy))
1077 return false;
1078 return true;
1079 }))
1080 report("Generic Instruction G_PHI has operands with incompatible/missing "
1081 "types",
1082 MI);
1083 break;
1084 }
1085 case TargetOpcode::G_BITCAST: {
1086 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1087 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1088 if (!DstTy.isValid() || !SrcTy.isValid())
1089 break;
1090
1091 if (SrcTy.isPointer() != DstTy.isPointer())
1092 report("bitcast cannot convert between pointers and other types", MI);
1093
1094 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1095 report("bitcast sizes must match", MI);
1096
1097 if (SrcTy == DstTy)
1098 report("bitcast must change the type", MI);
1099
1100 break;
1101 }
1102 case TargetOpcode::G_INTTOPTR:
1103 case TargetOpcode::G_PTRTOINT:
1104 case TargetOpcode::G_ADDRSPACE_CAST: {
1105 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1106 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1107 if (!DstTy.isValid() || !SrcTy.isValid())
1108 break;
1109
1110 verifyVectorElementMatch(DstTy, SrcTy, MI);
1111
1112 DstTy = DstTy.getScalarType();
1113 SrcTy = SrcTy.getScalarType();
1114
1115 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1116 if (!DstTy.isPointer())
1117 report("inttoptr result type must be a pointer", MI);
1118 if (SrcTy.isPointer())
1119 report("inttoptr source type must not be a pointer", MI);
1120 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1121 if (!SrcTy.isPointer())
1122 report("ptrtoint source type must be a pointer", MI);
1123 if (DstTy.isPointer())
1124 report("ptrtoint result type must not be a pointer", MI);
1125 } else {
1126 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1127 if (!SrcTy.isPointer() || !DstTy.isPointer())
1128 report("addrspacecast types must be pointers", MI);
1129 else {
1130 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1131 report("addrspacecast must convert different address spaces", MI);
1132 }
1133 }
1134
1135 break;
1136 }
1137 case TargetOpcode::G_PTR_ADD: {
1138 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1139 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1140 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1141 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1142 break;
1143
1144 if (!PtrTy.getScalarType().isPointer())
1145 report("gep first operand must be a pointer", MI);
1146
1147 if (OffsetTy.getScalarType().isPointer())
1148 report("gep offset operand must not be a pointer", MI);
1149
1150 // TODO: Is the offset allowed to be a scalar with a vector?
1151 break;
1152 }
1153 case TargetOpcode::G_PTRMASK: {
1154 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1155 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1156 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1157 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1158 break;
1159
1160 if (!DstTy.getScalarType().isPointer())
1161 report("ptrmask result type must be a pointer", MI);
1162
1163 if (!MaskTy.getScalarType().isScalar())
1164 report("ptrmask mask type must be an integer", MI);
1165
1166 verifyVectorElementMatch(DstTy, MaskTy, MI);
1167 break;
1168 }
1169 case TargetOpcode::G_SEXT:
1170 case TargetOpcode::G_ZEXT:
1171 case TargetOpcode::G_ANYEXT:
1172 case TargetOpcode::G_TRUNC:
1173 case TargetOpcode::G_FPEXT:
1174 case TargetOpcode::G_FPTRUNC: {
1175 // Number of operands and presense of types is already checked (and
1176 // reported in case of any issues), so no need to report them again. As
1177 // we're trying to report as many issues as possible at once, however, the
1178 // instructions aren't guaranteed to have the right number of operands or
1179 // types attached to them at this point
1180 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1181 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1182 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1183 if (!DstTy.isValid() || !SrcTy.isValid())
1184 break;
1185
1186 LLT DstElTy = DstTy.getScalarType();
1187 LLT SrcElTy = SrcTy.getScalarType();
1188 if (DstElTy.isPointer() || SrcElTy.isPointer())
1189 report("Generic extend/truncate can not operate on pointers", MI);
1190
1191 verifyVectorElementMatch(DstTy, SrcTy, MI);
1192
1193 unsigned DstSize = DstElTy.getSizeInBits();
1194 unsigned SrcSize = SrcElTy.getSizeInBits();
1195 switch (MI->getOpcode()) {
1196 default:
1197 if (DstSize <= SrcSize)
1198 report("Generic extend has destination type no larger than source", MI);
1199 break;
1200 case TargetOpcode::G_TRUNC:
1201 case TargetOpcode::G_FPTRUNC:
1202 if (DstSize >= SrcSize)
1203 report("Generic truncate has destination type no smaller than source",
1204 MI);
1205 break;
1206 }
1207 break;
1208 }
1209 case TargetOpcode::G_SELECT: {
1210 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1211 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1212 if (!SelTy.isValid() || !CondTy.isValid())
1213 break;
1214
1215 // Scalar condition select on a vector is valid.
1216 if (CondTy.isVector())
1217 verifyVectorElementMatch(SelTy, CondTy, MI);
1218 break;
1219 }
1220 case TargetOpcode::G_MERGE_VALUES: {
1221 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1222 // e.g. s2N = MERGE sN, sN
1223 // Merging multiple scalars into a vector is not allowed, should use
1224 // G_BUILD_VECTOR for that.
1225 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1226 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1227 if (DstTy.isVector() || SrcTy.isVector())
1228 report("G_MERGE_VALUES cannot operate on vectors", MI);
1229
1230 const unsigned NumOps = MI->getNumOperands();
1231 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1232 report("G_MERGE_VALUES result size is inconsistent", MI);
1233
1234 for (unsigned I = 2; I != NumOps; ++I) {
1235 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1236 report("G_MERGE_VALUES source types do not match", MI);
1237 }
1238
1239 break;
1240 }
1241 case TargetOpcode::G_UNMERGE_VALUES: {
1242 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1243 LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg());
1244 // For now G_UNMERGE can split vectors.
1245 for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) {
1246 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy)
1247 report("G_UNMERGE_VALUES destination types do not match", MI);
1248 }
1249 if (SrcTy.getSizeInBits() !=
1250 (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) {
1251 report("G_UNMERGE_VALUES source operand does not cover dest operands",
1252 MI);
1253 }
1254 break;
1255 }
1256 case TargetOpcode::G_BUILD_VECTOR: {
1257 // Source types must be scalars, dest type a vector. Total size of scalars
1258 // must match the dest vector size.
1259 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1260 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1261 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1262 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1263 break;
1264 }
1265
1266 if (DstTy.getElementType() != SrcEltTy)
1267 report("G_BUILD_VECTOR result element type must match source type", MI);
1268
1269 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1270 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1271
1272 for (unsigned i = 2; i < MI->getNumOperands(); ++i) {
1273 if (MRI->getType(MI->getOperand(1).getReg()) !=
1274 MRI->getType(MI->getOperand(i).getReg()))
1275 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1276 }
1277
1278 break;
1279 }
1280 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1281 // Source types must be scalars, dest type a vector. Scalar types must be
1282 // larger than the dest vector elt type, as this is a truncating operation.
1283 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1284 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1285 if (!DstTy.isVector() || SrcEltTy.isVector())
1286 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1287 MI);
1288 for (unsigned i = 2; i < MI->getNumOperands(); ++i) {
1289 if (MRI->getType(MI->getOperand(1).getReg()) !=
1290 MRI->getType(MI->getOperand(i).getReg()))
1291 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1292 MI);
1293 }
1294 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1295 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1296 "dest elt type",
1297 MI);
1298 break;
1299 }
1300 case TargetOpcode::G_CONCAT_VECTORS: {
1301 // Source types should be vectors, and total size should match the dest
1302 // vector size.
1303 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1304 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1305 if (!DstTy.isVector() || !SrcTy.isVector())
1306 report("G_CONCAT_VECTOR requires vector source and destination operands",
1307 MI);
1308
1309 if (MI->getNumOperands() < 3)
1310 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1311
1312 for (unsigned i = 2; i < MI->getNumOperands(); ++i) {
1313 if (MRI->getType(MI->getOperand(1).getReg()) !=
1314 MRI->getType(MI->getOperand(i).getReg()))
1315 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1316 }
1317 if (DstTy.getNumElements() !=
1318 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1319 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1320 break;
1321 }
1322 case TargetOpcode::G_ICMP:
1323 case TargetOpcode::G_FCMP: {
1324 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1325 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1326
1327 if ((DstTy.isVector() != SrcTy.isVector()) ||
1328 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1329 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1330
1331 break;
1332 }
1333 case TargetOpcode::G_EXTRACT: {
1334 const MachineOperand &SrcOp = MI->getOperand(1);
1335 if (!SrcOp.isReg()) {
1336 report("extract source must be a register", MI);
1337 break;
1338 }
1339
1340 const MachineOperand &OffsetOp = MI->getOperand(2);
1341 if (!OffsetOp.isImm()) {
1342 report("extract offset must be a constant", MI);
1343 break;
1344 }
1345
1346 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1347 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1348 if (SrcSize == DstSize)
1349 report("extract source must be larger than result", MI);
1350
1351 if (DstSize + OffsetOp.getImm() > SrcSize)
1352 report("extract reads past end of register", MI);
1353 break;
1354 }
1355 case TargetOpcode::G_INSERT: {
1356 const MachineOperand &SrcOp = MI->getOperand(2);
1357 if (!SrcOp.isReg()) {
1358 report("insert source must be a register", MI);
1359 break;
1360 }
1361
1362 const MachineOperand &OffsetOp = MI->getOperand(3);
1363 if (!OffsetOp.isImm()) {
1364 report("insert offset must be a constant", MI);
1365 break;
1366 }
1367
1368 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1369 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1370
1371 if (DstSize <= SrcSize)
1372 report("inserted size must be smaller than total register", MI);
1373
1374 if (SrcSize + OffsetOp.getImm() > DstSize)
1375 report("insert writes past end of register", MI);
1376
1377 break;
1378 }
1379 case TargetOpcode::G_JUMP_TABLE: {
1380 if (!MI->getOperand(1).isJTI())
1381 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1382 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1383 if (!DstTy.isPointer())
1384 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1385 break;
1386 }
1387 case TargetOpcode::G_BRJT: {
1388 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1389 report("G_BRJT src operand 0 must be a pointer type", MI);
1390
1391 if (!MI->getOperand(1).isJTI())
1392 report("G_BRJT src operand 1 must be a jump table index", MI);
1393
1394 const auto &IdxOp = MI->getOperand(2);
1395 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1396 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1397 break;
1398 }
1399 case TargetOpcode::G_INTRINSIC:
1400 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1401 // TODO: Should verify number of def and use operands, but the current
1402 // interface requires passing in IR types for mangling.
1403 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1404 if (!IntrIDOp.isIntrinsicID()) {
1405 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1406 break;
1407 }
1408
1409 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1410 unsigned IntrID = IntrIDOp.getIntrinsicID();
1411 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1412 AttributeList Attrs
1413 = Intrinsic::getAttributes(MF->getFunction().getContext(),
1414 static_cast<Intrinsic::ID>(IntrID));
1415 bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone);
1416 if (NoSideEffects && DeclHasSideEffects) {
1417 report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1418 break;
1419 }
1420 if (!NoSideEffects && !DeclHasSideEffects) {
1421 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1422 break;
1423 }
1424 }
1425
1426 break;
1427 }
1428 case TargetOpcode::G_SEXT_INREG: {
1429 if (!MI->getOperand(2).isImm()) {
1430 report("G_SEXT_INREG expects an immediate operand #2", MI);
1431 break;
1432 }
1433
1434 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1435 int64_t Imm = MI->getOperand(2).getImm();
1436 if (Imm <= 0)
1437 report("G_SEXT_INREG size must be >= 1", MI);
1438 if (Imm >= SrcTy.getScalarSizeInBits())
1439 report("G_SEXT_INREG size must be less than source bit width", MI);
1440 break;
1441 }
1442 case TargetOpcode::G_SHUFFLE_VECTOR: {
1443 const MachineOperand &MaskOp = MI->getOperand(3);
1444 if (!MaskOp.isShuffleMask()) {
1445 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1446 break;
1447 }
1448
1449 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1450 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1451 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1452
1453 if (Src0Ty != Src1Ty)
1454 report("Source operands must be the same type", MI);
1455
1456 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1457 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1458
1459 // Don't check that all operands are vector because scalars are used in
1460 // place of 1 element vectors.
1461 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1462 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1463
1464 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1465
1466 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1467 report("Wrong result type for shufflemask", MI);
1468
1469 for (int Idx : MaskIdxes) {
1470 if (Idx < 0)
1471 continue;
1472
1473 if (Idx >= 2 * SrcNumElts)
1474 report("Out of bounds shuffle index", MI);
1475 }
1476
1477 break;
1478 }
1479 case TargetOpcode::G_DYN_STACKALLOC: {
1480 const MachineOperand &DstOp = MI->getOperand(0);
1481 const MachineOperand &AllocOp = MI->getOperand(1);
1482 const MachineOperand &AlignOp = MI->getOperand(2);
1483
1484 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1485 report("dst operand 0 must be a pointer type", MI);
1486 break;
1487 }
1488
1489 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1490 report("src operand 1 must be a scalar reg type", MI);
1491 break;
1492 }
1493
1494 if (!AlignOp.isImm()) {
1495 report("src operand 2 must be an immediate type", MI);
1496 break;
1497 }
1498 break;
1499 }
1500 case TargetOpcode::G_MEMCPY_INLINE:
1501 case TargetOpcode::G_MEMCPY:
1502 case TargetOpcode::G_MEMMOVE: {
1503 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1504 if (MMOs.size() != 2) {
1505 report("memcpy/memmove must have 2 memory operands", MI);
1506 break;
1507 }
1508
1509 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1510 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1511 report("wrong memory operand types", MI);
1512 break;
1513 }
1514
1515 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1516 report("inconsistent memory operand sizes", MI);
1517
1518 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1519 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1520
1521 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1522 report("memory instruction operand must be a pointer", MI);
1523 break;
1524 }
1525
1526 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1527 report("inconsistent store address space", MI);
1528 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1529 report("inconsistent load address space", MI);
1530
1531 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1532 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1533 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1534
1535 break;
1536 }
1537 case TargetOpcode::G_BZERO:
1538 case TargetOpcode::G_MEMSET: {
1539 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1540 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1541 if (MMOs.size() != 1) {
1542 report(Twine(Name, " must have 1 memory operand"), MI);
1543 break;
1544 }
1545
1546 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1547 report(Twine(Name, " memory operand must be a store"), MI);
1548 break;
1549 }
1550
1551 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1552 if (!DstPtrTy.isPointer()) {
1553 report(Twine(Name, " operand must be a pointer"), MI);
1554 break;
1555 }
1556
1557 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1558 report("inconsistent " + Twine(Name, " address space"), MI);
1559
1560 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1561 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1562 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1563
1564 break;
1565 }
1566 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1567 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1568 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1569 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1570 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1571 if (!DstTy.isScalar())
1572 report("Vector reduction requires a scalar destination type", MI);
1573 if (!Src1Ty.isScalar())
1574 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1575 if (!Src2Ty.isVector())
1576 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1577 break;
1578 }
1579 case TargetOpcode::G_VECREDUCE_FADD:
1580 case TargetOpcode::G_VECREDUCE_FMUL:
1581 case TargetOpcode::G_VECREDUCE_FMAX:
1582 case TargetOpcode::G_VECREDUCE_FMIN:
1583 case TargetOpcode::G_VECREDUCE_ADD:
1584 case TargetOpcode::G_VECREDUCE_MUL:
1585 case TargetOpcode::G_VECREDUCE_AND:
1586 case TargetOpcode::G_VECREDUCE_OR:
1587 case TargetOpcode::G_VECREDUCE_XOR:
1588 case TargetOpcode::G_VECREDUCE_SMAX:
1589 case TargetOpcode::G_VECREDUCE_SMIN:
1590 case TargetOpcode::G_VECREDUCE_UMAX:
1591 case TargetOpcode::G_VECREDUCE_UMIN: {
1592 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1593 if (!DstTy.isScalar())
1594 report("Vector reduction requires a scalar destination type", MI);
1595 break;
1596 }
1597
1598 case TargetOpcode::G_SBFX:
1599 case TargetOpcode::G_UBFX: {
1600 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1601 if (DstTy.isVector()) {
1602 report("Bitfield extraction is not supported on vectors", MI);
1603 break;
1604 }
1605 break;
1606 }
1607 case TargetOpcode::G_ROTR:
1608 case TargetOpcode::G_ROTL: {
1609 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1610 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1611 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1612 report("Rotate requires operands to be either all scalars or all vectors",
1613 MI);
1614 break;
1615 }
1616 break;
1617 }
1618 case TargetOpcode::G_LLROUND:
1619 case TargetOpcode::G_LROUND: {
1620 verifyAllRegOpsScalar(*MI, *MRI);
1621 break;
1622 }
1623 default:
1624 break;
1625 }
1626 }
1627
visitMachineInstrBefore(const MachineInstr * MI)1628 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1629 const MCInstrDesc &MCID = MI->getDesc();
1630 if (MI->getNumOperands() < MCID.getNumOperands()) {
1631 report("Too few operands", MI);
1632 errs() << MCID.getNumOperands() << " operands expected, but "
1633 << MI->getNumOperands() << " given.\n";
1634 }
1635
1636 if (MI->isPHI()) {
1637 if (MF->getProperties().hasProperty(
1638 MachineFunctionProperties::Property::NoPHIs))
1639 report("Found PHI instruction with NoPHIs property set", MI);
1640
1641 if (FirstNonPHI)
1642 report("Found PHI instruction after non-PHI", MI);
1643 } else if (FirstNonPHI == nullptr)
1644 FirstNonPHI = MI;
1645
1646 // Check the tied operands.
1647 if (MI->isInlineAsm())
1648 verifyInlineAsm(MI);
1649
1650 // Check that unspillable terminators define a reg and have at most one use.
1651 if (TII->isUnspillableTerminator(MI)) {
1652 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1653 report("Unspillable Terminator does not define a reg", MI);
1654 Register Def = MI->getOperand(0).getReg();
1655 if (Def.isVirtual() &&
1656 !MF->getProperties().hasProperty(
1657 MachineFunctionProperties::Property::NoPHIs) &&
1658 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1659 report("Unspillable Terminator expected to have at most one use!", MI);
1660 }
1661
1662 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1663 // DBG_VALUEs: these are convenient to use in tests, but should never get
1664 // generated.
1665 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1666 if (!MI->getDebugLoc())
1667 report("Missing DebugLoc for debug instruction", MI);
1668
1669 // Meta instructions should never be the subject of debug value tracking,
1670 // they don't create a value in the output program at all.
1671 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1672 report("Metadata instruction should not have a value tracking number", MI);
1673
1674 // Check the MachineMemOperands for basic consistency.
1675 for (MachineMemOperand *Op : MI->memoperands()) {
1676 if (Op->isLoad() && !MI->mayLoad())
1677 report("Missing mayLoad flag", MI);
1678 if (Op->isStore() && !MI->mayStore())
1679 report("Missing mayStore flag", MI);
1680 }
1681
1682 // Debug values must not have a slot index.
1683 // Other instructions must have one, unless they are inside a bundle.
1684 if (LiveInts) {
1685 bool mapped = !LiveInts->isNotInMIMap(*MI);
1686 if (MI->isDebugOrPseudoInstr()) {
1687 if (mapped)
1688 report("Debug instruction has a slot index", MI);
1689 } else if (MI->isInsideBundle()) {
1690 if (mapped)
1691 report("Instruction inside bundle has a slot index", MI);
1692 } else {
1693 if (!mapped)
1694 report("Missing slot index", MI);
1695 }
1696 }
1697
1698 unsigned Opc = MCID.getOpcode();
1699 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1700 verifyPreISelGenericInstruction(MI);
1701 return;
1702 }
1703
1704 StringRef ErrorInfo;
1705 if (!TII->verifyInstruction(*MI, ErrorInfo))
1706 report(ErrorInfo.data(), MI);
1707
1708 // Verify properties of various specific instruction types
1709 switch (MI->getOpcode()) {
1710 case TargetOpcode::COPY: {
1711 const MachineOperand &DstOp = MI->getOperand(0);
1712 const MachineOperand &SrcOp = MI->getOperand(1);
1713 const Register SrcReg = SrcOp.getReg();
1714 const Register DstReg = DstOp.getReg();
1715
1716 LLT DstTy = MRI->getType(DstReg);
1717 LLT SrcTy = MRI->getType(SrcReg);
1718 if (SrcTy.isValid() && DstTy.isValid()) {
1719 // If both types are valid, check that the types are the same.
1720 if (SrcTy != DstTy) {
1721 report("Copy Instruction is illegal with mismatching types", MI);
1722 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1723 }
1724
1725 break;
1726 }
1727
1728 if (!SrcTy.isValid() && !DstTy.isValid())
1729 break;
1730
1731 // If we have only one valid type, this is likely a copy between a virtual
1732 // and physical register.
1733 unsigned SrcSize = 0;
1734 unsigned DstSize = 0;
1735 if (SrcReg.isPhysical() && DstTy.isValid()) {
1736 const TargetRegisterClass *SrcRC =
1737 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1738 if (SrcRC)
1739 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1740 }
1741
1742 if (SrcSize == 0)
1743 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1744
1745 if (DstReg.isPhysical() && SrcTy.isValid()) {
1746 const TargetRegisterClass *DstRC =
1747 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1748 if (DstRC)
1749 DstSize = TRI->getRegSizeInBits(*DstRC);
1750 }
1751
1752 if (DstSize == 0)
1753 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1754
1755 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1756 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1757 report("Copy Instruction is illegal with mismatching sizes", MI);
1758 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1759 << "\n";
1760 }
1761 }
1762 break;
1763 }
1764 case TargetOpcode::STATEPOINT: {
1765 StatepointOpers SO(MI);
1766 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1767 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1768 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1769 report("meta operands to STATEPOINT not constant!", MI);
1770 break;
1771 }
1772
1773 auto VerifyStackMapConstant = [&](unsigned Offset) {
1774 if (Offset >= MI->getNumOperands()) {
1775 report("stack map constant to STATEPOINT is out of range!", MI);
1776 return;
1777 }
1778 if (!MI->getOperand(Offset - 1).isImm() ||
1779 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1780 !MI->getOperand(Offset).isImm())
1781 report("stack map constant to STATEPOINT not well formed!", MI);
1782 };
1783 VerifyStackMapConstant(SO.getCCIdx());
1784 VerifyStackMapConstant(SO.getFlagsIdx());
1785 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1786 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1787 VerifyStackMapConstant(SO.getNumAllocaIdx());
1788 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1789
1790 // Verify that all explicit statepoint defs are tied to gc operands as
1791 // they are expected to be a relocation of gc operands.
1792 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1793 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1794 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1795 unsigned UseOpIdx;
1796 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1797 report("STATEPOINT defs expected to be tied", MI);
1798 break;
1799 }
1800 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1801 report("STATEPOINT def tied to non-gc operand", MI);
1802 break;
1803 }
1804 }
1805
1806 // TODO: verify we have properly encoded deopt arguments
1807 } break;
1808 case TargetOpcode::INSERT_SUBREG: {
1809 unsigned InsertedSize;
1810 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1811 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1812 else
1813 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1814 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1815 if (SubRegSize < InsertedSize) {
1816 report("INSERT_SUBREG expected inserted value to have equal or lesser "
1817 "size than the subreg it was inserted into", MI);
1818 break;
1819 }
1820 } break;
1821 }
1822 }
1823
1824 void
visitMachineOperand(const MachineOperand * MO,unsigned MONum)1825 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1826 const MachineInstr *MI = MO->getParent();
1827 const MCInstrDesc &MCID = MI->getDesc();
1828 unsigned NumDefs = MCID.getNumDefs();
1829 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1830 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1831
1832 // The first MCID.NumDefs operands must be explicit register defines
1833 if (MONum < NumDefs) {
1834 const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1835 if (!MO->isReg())
1836 report("Explicit definition must be a register", MO, MONum);
1837 else if (!MO->isDef() && !MCOI.isOptionalDef())
1838 report("Explicit definition marked as use", MO, MONum);
1839 else if (MO->isImplicit())
1840 report("Explicit definition marked as implicit", MO, MONum);
1841 } else if (MONum < MCID.getNumOperands()) {
1842 const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1843 // Don't check if it's the last operand in a variadic instruction. See,
1844 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
1845 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
1846 if (!IsOptional) {
1847 if (MO->isReg()) {
1848 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
1849 report("Explicit operand marked as def", MO, MONum);
1850 if (MO->isImplicit())
1851 report("Explicit operand marked as implicit", MO, MONum);
1852 }
1853
1854 // Check that an instruction has register operands only as expected.
1855 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
1856 !MO->isReg() && !MO->isFI())
1857 report("Expected a register operand.", MO, MONum);
1858 if (MO->isReg()) {
1859 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
1860 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
1861 !TII->isPCRelRegisterOperandLegal(*MO)))
1862 report("Expected a non-register operand.", MO, MONum);
1863 }
1864 }
1865
1866 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
1867 if (TiedTo != -1) {
1868 if (!MO->isReg())
1869 report("Tied use must be a register", MO, MONum);
1870 else if (!MO->isTied())
1871 report("Operand should be tied", MO, MONum);
1872 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
1873 report("Tied def doesn't match MCInstrDesc", MO, MONum);
1874 else if (Register::isPhysicalRegister(MO->getReg())) {
1875 const MachineOperand &MOTied = MI->getOperand(TiedTo);
1876 if (!MOTied.isReg())
1877 report("Tied counterpart must be a register", &MOTied, TiedTo);
1878 else if (Register::isPhysicalRegister(MOTied.getReg()) &&
1879 MO->getReg() != MOTied.getReg())
1880 report("Tied physical registers must match.", &MOTied, TiedTo);
1881 }
1882 } else if (MO->isReg() && MO->isTied())
1883 report("Explicit operand should not be tied", MO, MONum);
1884 } else {
1885 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
1886 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
1887 report("Extra explicit operand on non-variadic instruction", MO, MONum);
1888 }
1889
1890 switch (MO->getType()) {
1891 case MachineOperand::MO_Register: {
1892 // Verify debug flag on debug instructions. Check this first because reg0
1893 // indicates an undefined debug value.
1894 if (MI->isDebugInstr() && MO->isUse()) {
1895 if (!MO->isDebug())
1896 report("Register operand must be marked debug", MO, MONum);
1897 } else if (MO->isDebug()) {
1898 report("Register operand must not be marked debug", MO, MONum);
1899 }
1900
1901 const Register Reg = MO->getReg();
1902 if (!Reg)
1903 return;
1904 if (MRI->tracksLiveness() && !MI->isDebugValue())
1905 checkLiveness(MO, MONum);
1906
1907 // Verify the consistency of tied operands.
1908 if (MO->isTied()) {
1909 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
1910 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
1911 if (!OtherMO.isReg())
1912 report("Must be tied to a register", MO, MONum);
1913 if (!OtherMO.isTied())
1914 report("Missing tie flags on tied operand", MO, MONum);
1915 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
1916 report("Inconsistent tie links", MO, MONum);
1917 if (MONum < MCID.getNumDefs()) {
1918 if (OtherIdx < MCID.getNumOperands()) {
1919 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
1920 report("Explicit def tied to explicit use without tie constraint",
1921 MO, MONum);
1922 } else {
1923 if (!OtherMO.isImplicit())
1924 report("Explicit def should be tied to implicit use", MO, MONum);
1925 }
1926 }
1927 }
1928
1929 // Verify two-address constraints after the twoaddressinstruction pass.
1930 // Both twoaddressinstruction pass and phi-node-elimination pass call
1931 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
1932 // twoaddressinstruction pass not after phi-node-elimination pass. So we
1933 // shouldn't use the NoSSA as the condition, we should based on
1934 // TiedOpsRewritten property to verify two-address constraints, this
1935 // property will be set in twoaddressinstruction pass.
1936 unsigned DefIdx;
1937 if (MF->getProperties().hasProperty(
1938 MachineFunctionProperties::Property::TiedOpsRewritten) &&
1939 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
1940 Reg != MI->getOperand(DefIdx).getReg())
1941 report("Two-address instruction operands must be identical", MO, MONum);
1942
1943 // Check register classes.
1944 unsigned SubIdx = MO->getSubReg();
1945
1946 if (Register::isPhysicalRegister(Reg)) {
1947 if (SubIdx) {
1948 report("Illegal subregister index for physical register", MO, MONum);
1949 return;
1950 }
1951 if (MONum < MCID.getNumOperands()) {
1952 if (const TargetRegisterClass *DRC =
1953 TII->getRegClass(MCID, MONum, TRI, *MF)) {
1954 if (!DRC->contains(Reg)) {
1955 report("Illegal physical register for instruction", MO, MONum);
1956 errs() << printReg(Reg, TRI) << " is not a "
1957 << TRI->getRegClassName(DRC) << " register.\n";
1958 }
1959 }
1960 }
1961 if (MO->isRenamable()) {
1962 if (MRI->isReserved(Reg)) {
1963 report("isRenamable set on reserved register", MO, MONum);
1964 return;
1965 }
1966 }
1967 } else {
1968 // Virtual register.
1969 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
1970 if (!RC) {
1971 // This is a generic virtual register.
1972
1973 // Do not allow undef uses for generic virtual registers. This ensures
1974 // getVRegDef can never fail and return null on a generic register.
1975 //
1976 // FIXME: This restriction should probably be broadened to all SSA
1977 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
1978 // run on the SSA function just before phi elimination.
1979 if (MO->isUndef())
1980 report("Generic virtual register use cannot be undef", MO, MONum);
1981
1982 // If we're post-Select, we can't have gvregs anymore.
1983 if (isFunctionSelected) {
1984 report("Generic virtual register invalid in a Selected function",
1985 MO, MONum);
1986 return;
1987 }
1988
1989 // The gvreg must have a type and it must not have a SubIdx.
1990 LLT Ty = MRI->getType(Reg);
1991 if (!Ty.isValid()) {
1992 report("Generic virtual register must have a valid type", MO,
1993 MONum);
1994 return;
1995 }
1996
1997 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
1998
1999 // If we're post-RegBankSelect, the gvreg must have a bank.
2000 if (!RegBank && isFunctionRegBankSelected) {
2001 report("Generic virtual register must have a bank in a "
2002 "RegBankSelected function",
2003 MO, MONum);
2004 return;
2005 }
2006
2007 // Make sure the register fits into its register bank if any.
2008 if (RegBank && Ty.isValid() &&
2009 RegBank->getSize() < Ty.getSizeInBits()) {
2010 report("Register bank is too small for virtual register", MO,
2011 MONum);
2012 errs() << "Register bank " << RegBank->getName() << " too small("
2013 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2014 << "-bits\n";
2015 return;
2016 }
2017 if (SubIdx) {
2018 report("Generic virtual register does not allow subregister index", MO,
2019 MONum);
2020 return;
2021 }
2022
2023 // If this is a target specific instruction and this operand
2024 // has register class constraint, the virtual register must
2025 // comply to it.
2026 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2027 MONum < MCID.getNumOperands() &&
2028 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2029 report("Virtual register does not match instruction constraint", MO,
2030 MONum);
2031 errs() << "Expect register class "
2032 << TRI->getRegClassName(
2033 TII->getRegClass(MCID, MONum, TRI, *MF))
2034 << " but got nothing\n";
2035 return;
2036 }
2037
2038 break;
2039 }
2040 if (SubIdx) {
2041 const TargetRegisterClass *SRC =
2042 TRI->getSubClassWithSubReg(RC, SubIdx);
2043 if (!SRC) {
2044 report("Invalid subregister index for virtual register", MO, MONum);
2045 errs() << "Register class " << TRI->getRegClassName(RC)
2046 << " does not support subreg index " << SubIdx << "\n";
2047 return;
2048 }
2049 if (RC != SRC) {
2050 report("Invalid register class for subregister index", MO, MONum);
2051 errs() << "Register class " << TRI->getRegClassName(RC)
2052 << " does not fully support subreg index " << SubIdx << "\n";
2053 return;
2054 }
2055 }
2056 if (MONum < MCID.getNumOperands()) {
2057 if (const TargetRegisterClass *DRC =
2058 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2059 if (SubIdx) {
2060 const TargetRegisterClass *SuperRC =
2061 TRI->getLargestLegalSuperClass(RC, *MF);
2062 if (!SuperRC) {
2063 report("No largest legal super class exists.", MO, MONum);
2064 return;
2065 }
2066 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2067 if (!DRC) {
2068 report("No matching super-reg register class.", MO, MONum);
2069 return;
2070 }
2071 }
2072 if (!RC->hasSuperClassEq(DRC)) {
2073 report("Illegal virtual register for instruction", MO, MONum);
2074 errs() << "Expected a " << TRI->getRegClassName(DRC)
2075 << " register, but got a " << TRI->getRegClassName(RC)
2076 << " register\n";
2077 }
2078 }
2079 }
2080 }
2081 break;
2082 }
2083
2084 case MachineOperand::MO_RegisterMask:
2085 regMasks.push_back(MO->getRegMask());
2086 break;
2087
2088 case MachineOperand::MO_MachineBasicBlock:
2089 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2090 report("PHI operand is not in the CFG", MO, MONum);
2091 break;
2092
2093 case MachineOperand::MO_FrameIndex:
2094 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2095 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2096 int FI = MO->getIndex();
2097 LiveInterval &LI = LiveStks->getInterval(FI);
2098 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2099
2100 bool stores = MI->mayStore();
2101 bool loads = MI->mayLoad();
2102 // For a memory-to-memory move, we need to check if the frame
2103 // index is used for storing or loading, by inspecting the
2104 // memory operands.
2105 if (stores && loads) {
2106 for (auto *MMO : MI->memoperands()) {
2107 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2108 if (PSV == nullptr) continue;
2109 const FixedStackPseudoSourceValue *Value =
2110 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2111 if (Value == nullptr) continue;
2112 if (Value->getFrameIndex() != FI) continue;
2113
2114 if (MMO->isStore())
2115 loads = false;
2116 else
2117 stores = false;
2118 break;
2119 }
2120 if (loads == stores)
2121 report("Missing fixed stack memoperand.", MI);
2122 }
2123 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2124 report("Instruction loads from dead spill slot", MO, MONum);
2125 errs() << "Live stack: " << LI << '\n';
2126 }
2127 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2128 report("Instruction stores to dead spill slot", MO, MONum);
2129 errs() << "Live stack: " << LI << '\n';
2130 }
2131 }
2132 break;
2133
2134 default:
2135 break;
2136 }
2137 }
2138
checkLivenessAtUse(const MachineOperand * MO,unsigned MONum,SlotIndex UseIdx,const LiveRange & LR,Register VRegOrUnit,LaneBitmask LaneMask)2139 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2140 unsigned MONum, SlotIndex UseIdx,
2141 const LiveRange &LR,
2142 Register VRegOrUnit,
2143 LaneBitmask LaneMask) {
2144 LiveQueryResult LRQ = LR.Query(UseIdx);
2145 // Check if we have a segment at the use, note however that we only need one
2146 // live subregister range, the others may be dead.
2147 if (!LRQ.valueIn() && LaneMask.none()) {
2148 report("No live segment at use", MO, MONum);
2149 report_context_liverange(LR);
2150 report_context_vreg_regunit(VRegOrUnit);
2151 report_context(UseIdx);
2152 }
2153 if (MO->isKill() && !LRQ.isKill()) {
2154 report("Live range continues after kill flag", MO, MONum);
2155 report_context_liverange(LR);
2156 report_context_vreg_regunit(VRegOrUnit);
2157 if (LaneMask.any())
2158 report_context_lanemask(LaneMask);
2159 report_context(UseIdx);
2160 }
2161 }
2162
checkLivenessAtDef(const MachineOperand * MO,unsigned MONum,SlotIndex DefIdx,const LiveRange & LR,Register VRegOrUnit,bool SubRangeCheck,LaneBitmask LaneMask)2163 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2164 unsigned MONum, SlotIndex DefIdx,
2165 const LiveRange &LR,
2166 Register VRegOrUnit,
2167 bool SubRangeCheck,
2168 LaneBitmask LaneMask) {
2169 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2170 assert(VNI && "NULL valno is not allowed");
2171 if (VNI->def != DefIdx) {
2172 report("Inconsistent valno->def", MO, MONum);
2173 report_context_liverange(LR);
2174 report_context_vreg_regunit(VRegOrUnit);
2175 if (LaneMask.any())
2176 report_context_lanemask(LaneMask);
2177 report_context(*VNI);
2178 report_context(DefIdx);
2179 }
2180 } else {
2181 report("No live segment at def", MO, MONum);
2182 report_context_liverange(LR);
2183 report_context_vreg_regunit(VRegOrUnit);
2184 if (LaneMask.any())
2185 report_context_lanemask(LaneMask);
2186 report_context(DefIdx);
2187 }
2188 // Check that, if the dead def flag is present, LiveInts agree.
2189 if (MO->isDead()) {
2190 LiveQueryResult LRQ = LR.Query(DefIdx);
2191 if (!LRQ.isDeadDef()) {
2192 assert(Register::isVirtualRegister(VRegOrUnit) &&
2193 "Expecting a virtual register.");
2194 // A dead subreg def only tells us that the specific subreg is dead. There
2195 // could be other non-dead defs of other subregs, or we could have other
2196 // parts of the register being live through the instruction. So unless we
2197 // are checking liveness for a subrange it is ok for the live range to
2198 // continue, given that we have a dead def of a subregister.
2199 if (SubRangeCheck || MO->getSubReg() == 0) {
2200 report("Live range continues after dead def flag", MO, MONum);
2201 report_context_liverange(LR);
2202 report_context_vreg_regunit(VRegOrUnit);
2203 if (LaneMask.any())
2204 report_context_lanemask(LaneMask);
2205 }
2206 }
2207 }
2208 }
2209
checkLiveness(const MachineOperand * MO,unsigned MONum)2210 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2211 const MachineInstr *MI = MO->getParent();
2212 const Register Reg = MO->getReg();
2213
2214 // Both use and def operands can read a register.
2215 if (MO->readsReg()) {
2216 if (MO->isKill())
2217 addRegWithSubRegs(regsKilled, Reg);
2218
2219 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2220 // which case we have already checked that LiveVars knows any kills on the
2221 // bundle header instead).
2222 if (LiveVars && Register::isVirtualRegister(Reg) && MO->isKill() &&
2223 !MI->isBundledWithPred()) {
2224 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2225 if (!is_contained(VI.Kills, MI))
2226 report("Kill missing from LiveVariables", MO, MONum);
2227 }
2228
2229 // Check LiveInts liveness and kill.
2230 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2231 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2232 // Check the cached regunit intervals.
2233 if (Reg.isPhysical() && !isReserved(Reg)) {
2234 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2235 ++Units) {
2236 if (MRI->isReservedRegUnit(*Units))
2237 continue;
2238 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2239 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2240 }
2241 }
2242
2243 if (Register::isVirtualRegister(Reg)) {
2244 if (LiveInts->hasInterval(Reg)) {
2245 // This is a virtual register interval.
2246 const LiveInterval &LI = LiveInts->getInterval(Reg);
2247 checkLivenessAtUse(MO, MONum, UseIdx, LI, Reg);
2248
2249 if (LI.hasSubRanges() && !MO->isDef()) {
2250 unsigned SubRegIdx = MO->getSubReg();
2251 LaneBitmask MOMask = SubRegIdx != 0
2252 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2253 : MRI->getMaxLaneMaskForVReg(Reg);
2254 LaneBitmask LiveInMask;
2255 for (const LiveInterval::SubRange &SR : LI.subranges()) {
2256 if ((MOMask & SR.LaneMask).none())
2257 continue;
2258 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2259 LiveQueryResult LRQ = SR.Query(UseIdx);
2260 if (LRQ.valueIn())
2261 LiveInMask |= SR.LaneMask;
2262 }
2263 // At least parts of the register has to be live at the use.
2264 if ((LiveInMask & MOMask).none()) {
2265 report("No live subrange at use", MO, MONum);
2266 report_context(LI);
2267 report_context(UseIdx);
2268 }
2269 }
2270 } else {
2271 report("Virtual register has no live interval", MO, MONum);
2272 }
2273 }
2274 }
2275
2276 // Use of a dead register.
2277 if (!regsLive.count(Reg)) {
2278 if (Register::isPhysicalRegister(Reg)) {
2279 // Reserved registers may be used even when 'dead'.
2280 bool Bad = !isReserved(Reg);
2281 // We are fine if just any subregister has a defined value.
2282 if (Bad) {
2283
2284 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2285 if (regsLive.count(SubReg)) {
2286 Bad = false;
2287 break;
2288 }
2289 }
2290 }
2291 // If there is an additional implicit-use of a super register we stop
2292 // here. By definition we are fine if the super register is not
2293 // (completely) dead, if the complete super register is dead we will
2294 // get a report for its operand.
2295 if (Bad) {
2296 for (const MachineOperand &MOP : MI->uses()) {
2297 if (!MOP.isReg() || !MOP.isImplicit())
2298 continue;
2299
2300 if (!Register::isPhysicalRegister(MOP.getReg()))
2301 continue;
2302
2303 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2304 Bad = false;
2305 }
2306 }
2307 if (Bad)
2308 report("Using an undefined physical register", MO, MONum);
2309 } else if (MRI->def_empty(Reg)) {
2310 report("Reading virtual register without a def", MO, MONum);
2311 } else {
2312 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2313 // We don't know which virtual registers are live in, so only complain
2314 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2315 // must be live in. PHI instructions are handled separately.
2316 if (MInfo.regsKilled.count(Reg))
2317 report("Using a killed virtual register", MO, MONum);
2318 else if (!MI->isPHI())
2319 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2320 }
2321 }
2322 }
2323
2324 if (MO->isDef()) {
2325 // Register defined.
2326 // TODO: verify that earlyclobber ops are not used.
2327 if (MO->isDead())
2328 addRegWithSubRegs(regsDead, Reg);
2329 else
2330 addRegWithSubRegs(regsDefined, Reg);
2331
2332 // Verify SSA form.
2333 if (MRI->isSSA() && Register::isVirtualRegister(Reg) &&
2334 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2335 report("Multiple virtual register defs in SSA form", MO, MONum);
2336
2337 // Check LiveInts for a live segment, but only for virtual registers.
2338 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2339 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2340 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2341
2342 if (Register::isVirtualRegister(Reg)) {
2343 if (LiveInts->hasInterval(Reg)) {
2344 const LiveInterval &LI = LiveInts->getInterval(Reg);
2345 checkLivenessAtDef(MO, MONum, DefIdx, LI, Reg);
2346
2347 if (LI.hasSubRanges()) {
2348 unsigned SubRegIdx = MO->getSubReg();
2349 LaneBitmask MOMask = SubRegIdx != 0
2350 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2351 : MRI->getMaxLaneMaskForVReg(Reg);
2352 for (const LiveInterval::SubRange &SR : LI.subranges()) {
2353 if ((SR.LaneMask & MOMask).none())
2354 continue;
2355 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2356 }
2357 }
2358 } else {
2359 report("Virtual register has no Live interval", MO, MONum);
2360 }
2361 }
2362 }
2363 }
2364 }
2365
2366 // This function gets called after visiting all instructions in a bundle. The
2367 // argument points to the bundle header.
2368 // Normal stand-alone instructions are also considered 'bundles', and this
2369 // function is called for all of them.
visitMachineBundleAfter(const MachineInstr * MI)2370 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2371 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2372 set_union(MInfo.regsKilled, regsKilled);
2373 set_subtract(regsLive, regsKilled); regsKilled.clear();
2374 // Kill any masked registers.
2375 while (!regMasks.empty()) {
2376 const uint32_t *Mask = regMasks.pop_back_val();
2377 for (Register Reg : regsLive)
2378 if (Reg.isPhysical() &&
2379 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2380 regsDead.push_back(Reg);
2381 }
2382 set_subtract(regsLive, regsDead); regsDead.clear();
2383 set_union(regsLive, regsDefined); regsDefined.clear();
2384 }
2385
2386 void
visitMachineBasicBlockAfter(const MachineBasicBlock * MBB)2387 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2388 MBBInfoMap[MBB].regsLiveOut = regsLive;
2389 regsLive.clear();
2390
2391 if (Indexes) {
2392 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2393 if (!(stop > lastIndex)) {
2394 report("Block ends before last instruction index", MBB);
2395 errs() << "Block ends at " << stop
2396 << " last instruction was at " << lastIndex << '\n';
2397 }
2398 lastIndex = stop;
2399 }
2400 }
2401
2402 namespace {
2403 // This implements a set of registers that serves as a filter: can filter other
2404 // sets by passing through elements not in the filter and blocking those that
2405 // are. Any filter implicitly includes the full set of physical registers upon
2406 // creation, thus filtering them all out. The filter itself as a set only grows,
2407 // and needs to be as efficient as possible.
2408 struct VRegFilter {
2409 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2410 // no duplicates. Both virtual and physical registers are fine.
add__anon9c12755e0511::VRegFilter2411 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2412 SmallVector<Register, 0> VRegsBuffer;
2413 filterAndAdd(FromRegSet, VRegsBuffer);
2414 }
2415 // Filter \p FromRegSet through the filter and append passed elements into \p
2416 // ToVRegs. All elements appended are then added to the filter itself.
2417 // \returns true if anything changed.
2418 template <typename RegSetT>
filterAndAdd__anon9c12755e0511::VRegFilter2419 bool filterAndAdd(const RegSetT &FromRegSet,
2420 SmallVectorImpl<Register> &ToVRegs) {
2421 unsigned SparseUniverse = Sparse.size();
2422 unsigned NewSparseUniverse = SparseUniverse;
2423 unsigned NewDenseSize = Dense.size();
2424 size_t Begin = ToVRegs.size();
2425 for (Register Reg : FromRegSet) {
2426 if (!Reg.isVirtual())
2427 continue;
2428 unsigned Index = Register::virtReg2Index(Reg);
2429 if (Index < SparseUniverseMax) {
2430 if (Index < SparseUniverse && Sparse.test(Index))
2431 continue;
2432 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2433 } else {
2434 if (Dense.count(Reg))
2435 continue;
2436 ++NewDenseSize;
2437 }
2438 ToVRegs.push_back(Reg);
2439 }
2440 size_t End = ToVRegs.size();
2441 if (Begin == End)
2442 return false;
2443 // Reserving space in sets once performs better than doing so continuously
2444 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2445 // tuned all the way down) and double iteration (the second one is over a
2446 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2447 Sparse.resize(NewSparseUniverse);
2448 Dense.reserve(NewDenseSize);
2449 for (unsigned I = Begin; I < End; ++I) {
2450 Register Reg = ToVRegs[I];
2451 unsigned Index = Register::virtReg2Index(Reg);
2452 if (Index < SparseUniverseMax)
2453 Sparse.set(Index);
2454 else
2455 Dense.insert(Reg);
2456 }
2457 return true;
2458 }
2459
2460 private:
2461 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2462 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2463 // are tracked by Dense. The only purpose of the threashold and the Dense set
2464 // is to have a reasonably growing memory usage in pathological cases (large
2465 // number of very sparse VRegFilter instances live at the same time). In
2466 // practice even in the worst-by-execution time cases having all elements
2467 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2468 // space efficient than if tracked by Dense. The threashold is set to keep the
2469 // worst-case memory usage within 2x of figures determined empirically for
2470 // "all Dense" scenario in such worst-by-execution-time cases.
2471 BitVector Sparse;
2472 DenseSet<unsigned> Dense;
2473 };
2474
2475 // Implements both a transfer function and a (binary, in-place) join operator
2476 // for a dataflow over register sets with set union join and filtering transfer
2477 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2478 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2479 // time, where n is the size of the set (as opposed to O(U) where U is the
2480 // universe). filter_b implicitly contains all physical registers at all times.
2481 class FilteringVRegSet {
2482 VRegFilter Filter;
2483 SmallVector<Register, 0> VRegs;
2484
2485 public:
2486 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2487 // Both virtual and physical registers are fine.
addToFilter(const RegSetT & RS)2488 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2489 Filter.add(RS);
2490 }
2491 // Passes \p RS through the filter_b (transfer function) and adds what's left
2492 // to itself (out_b).
add(const RegSetT & RS)2493 template <typename RegSetT> bool add(const RegSetT &RS) {
2494 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2495 // a set union) just add everything being added here to the Filter as well.
2496 return Filter.filterAndAdd(RS, VRegs);
2497 }
2498 using const_iterator = decltype(VRegs)::const_iterator;
begin() const2499 const_iterator begin() const { return VRegs.begin(); }
end() const2500 const_iterator end() const { return VRegs.end(); }
size() const2501 size_t size() const { return VRegs.size(); }
2502 };
2503 } // namespace
2504
2505 // Calculate the largest possible vregsPassed sets. These are the registers that
2506 // can pass through an MBB live, but may not be live every time. It is assumed
2507 // that all vregsPassed sets are empty before the call.
calcRegsPassed()2508 void MachineVerifier::calcRegsPassed() {
2509 if (MF->empty())
2510 // ReversePostOrderTraversal doesn't handle empty functions.
2511 return;
2512
2513 for (const MachineBasicBlock *MB :
2514 ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2515 FilteringVRegSet VRegs;
2516 BBInfo &Info = MBBInfoMap[MB];
2517 assert(Info.reachable);
2518
2519 VRegs.addToFilter(Info.regsKilled);
2520 VRegs.addToFilter(Info.regsLiveOut);
2521 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2522 const BBInfo &PredInfo = MBBInfoMap[Pred];
2523 if (!PredInfo.reachable)
2524 continue;
2525
2526 VRegs.add(PredInfo.regsLiveOut);
2527 VRegs.add(PredInfo.vregsPassed);
2528 }
2529 Info.vregsPassed.reserve(VRegs.size());
2530 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2531 }
2532 }
2533
2534 // Calculate the set of virtual registers that must be passed through each basic
2535 // block in order to satisfy the requirements of successor blocks. This is very
2536 // similar to calcRegsPassed, only backwards.
calcRegsRequired()2537 void MachineVerifier::calcRegsRequired() {
2538 // First push live-in regs to predecessors' vregsRequired.
2539 SmallPtrSet<const MachineBasicBlock*, 8> todo;
2540 for (const auto &MBB : *MF) {
2541 BBInfo &MInfo = MBBInfoMap[&MBB];
2542 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2543 BBInfo &PInfo = MBBInfoMap[Pred];
2544 if (PInfo.addRequired(MInfo.vregsLiveIn))
2545 todo.insert(Pred);
2546 }
2547
2548 // Handle the PHI node.
2549 for (const MachineInstr &MI : MBB.phis()) {
2550 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2551 // Skip those Operands which are undef regs or not regs.
2552 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2553 continue;
2554
2555 // Get register and predecessor for one PHI edge.
2556 Register Reg = MI.getOperand(i).getReg();
2557 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2558
2559 BBInfo &PInfo = MBBInfoMap[Pred];
2560 if (PInfo.addRequired(Reg))
2561 todo.insert(Pred);
2562 }
2563 }
2564 }
2565
2566 // Iteratively push vregsRequired to predecessors. This will converge to the
2567 // same final state regardless of DenseSet iteration order.
2568 while (!todo.empty()) {
2569 const MachineBasicBlock *MBB = *todo.begin();
2570 todo.erase(MBB);
2571 BBInfo &MInfo = MBBInfoMap[MBB];
2572 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2573 if (Pred == MBB)
2574 continue;
2575 BBInfo &SInfo = MBBInfoMap[Pred];
2576 if (SInfo.addRequired(MInfo.vregsRequired))
2577 todo.insert(Pred);
2578 }
2579 }
2580 }
2581
2582 // Check PHI instructions at the beginning of MBB. It is assumed that
2583 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
checkPHIOps(const MachineBasicBlock & MBB)2584 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2585 BBInfo &MInfo = MBBInfoMap[&MBB];
2586
2587 SmallPtrSet<const MachineBasicBlock*, 8> seen;
2588 for (const MachineInstr &Phi : MBB) {
2589 if (!Phi.isPHI())
2590 break;
2591 seen.clear();
2592
2593 const MachineOperand &MODef = Phi.getOperand(0);
2594 if (!MODef.isReg() || !MODef.isDef()) {
2595 report("Expected first PHI operand to be a register def", &MODef, 0);
2596 continue;
2597 }
2598 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2599 MODef.isEarlyClobber() || MODef.isDebug())
2600 report("Unexpected flag on PHI operand", &MODef, 0);
2601 Register DefReg = MODef.getReg();
2602 if (!Register::isVirtualRegister(DefReg))
2603 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2604
2605 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2606 const MachineOperand &MO0 = Phi.getOperand(I);
2607 if (!MO0.isReg()) {
2608 report("Expected PHI operand to be a register", &MO0, I);
2609 continue;
2610 }
2611 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2612 MO0.isDebug() || MO0.isTied())
2613 report("Unexpected flag on PHI operand", &MO0, I);
2614
2615 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2616 if (!MO1.isMBB()) {
2617 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2618 continue;
2619 }
2620
2621 const MachineBasicBlock &Pre = *MO1.getMBB();
2622 if (!Pre.isSuccessor(&MBB)) {
2623 report("PHI input is not a predecessor block", &MO1, I + 1);
2624 continue;
2625 }
2626
2627 if (MInfo.reachable) {
2628 seen.insert(&Pre);
2629 BBInfo &PrInfo = MBBInfoMap[&Pre];
2630 if (!MO0.isUndef() && PrInfo.reachable &&
2631 !PrInfo.isLiveOut(MO0.getReg()))
2632 report("PHI operand is not live-out from predecessor", &MO0, I);
2633 }
2634 }
2635
2636 // Did we see all predecessors?
2637 if (MInfo.reachable) {
2638 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2639 if (!seen.count(Pred)) {
2640 report("Missing PHI operand", &Phi);
2641 errs() << printMBBReference(*Pred)
2642 << " is a predecessor according to the CFG.\n";
2643 }
2644 }
2645 }
2646 }
2647 }
2648
visitMachineFunctionAfter()2649 void MachineVerifier::visitMachineFunctionAfter() {
2650 calcRegsPassed();
2651
2652 for (const MachineBasicBlock &MBB : *MF)
2653 checkPHIOps(MBB);
2654
2655 // Now check liveness info if available
2656 calcRegsRequired();
2657
2658 // Check for killed virtual registers that should be live out.
2659 for (const auto &MBB : *MF) {
2660 BBInfo &MInfo = MBBInfoMap[&MBB];
2661 for (Register VReg : MInfo.vregsRequired)
2662 if (MInfo.regsKilled.count(VReg)) {
2663 report("Virtual register killed in block, but needed live out.", &MBB);
2664 errs() << "Virtual register " << printReg(VReg)
2665 << " is used after the block.\n";
2666 }
2667 }
2668
2669 if (!MF->empty()) {
2670 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2671 for (Register VReg : MInfo.vregsRequired) {
2672 report("Virtual register defs don't dominate all uses.", MF);
2673 report_context_vreg(VReg);
2674 }
2675 }
2676
2677 if (LiveVars)
2678 verifyLiveVariables();
2679 if (LiveInts)
2680 verifyLiveIntervals();
2681
2682 // Check live-in list of each MBB. If a register is live into MBB, check
2683 // that the register is in regsLiveOut of each predecessor block. Since
2684 // this must come from a definition in the predecesssor or its live-in
2685 // list, this will catch a live-through case where the predecessor does not
2686 // have the register in its live-in list. This currently only checks
2687 // registers that have no aliases, are not allocatable and are not
2688 // reserved, which could mean a condition code register for instance.
2689 if (MRI->tracksLiveness())
2690 for (const auto &MBB : *MF)
2691 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2692 MCPhysReg LiveInReg = P.PhysReg;
2693 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2694 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2695 continue;
2696 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2697 BBInfo &PInfo = MBBInfoMap[Pred];
2698 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2699 report("Live in register not found to be live out from predecessor.",
2700 &MBB);
2701 errs() << TRI->getName(LiveInReg)
2702 << " not found to be live out from "
2703 << printMBBReference(*Pred) << "\n";
2704 }
2705 }
2706 }
2707
2708 for (auto CSInfo : MF->getCallSitesInfo())
2709 if (!CSInfo.first->isCall())
2710 report("Call site info referencing instruction that is not call", MF);
2711
2712 // If there's debug-info, check that we don't have any duplicate value
2713 // tracking numbers.
2714 if (MF->getFunction().getSubprogram()) {
2715 DenseSet<unsigned> SeenNumbers;
2716 for (auto &MBB : *MF) {
2717 for (auto &MI : MBB) {
2718 if (auto Num = MI.peekDebugInstrNum()) {
2719 auto Result = SeenNumbers.insert((unsigned)Num);
2720 if (!Result.second)
2721 report("Instruction has a duplicated value tracking number", &MI);
2722 }
2723 }
2724 }
2725 }
2726 }
2727
verifyLiveVariables()2728 void MachineVerifier::verifyLiveVariables() {
2729 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2730 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2731 Register Reg = Register::index2VirtReg(I);
2732 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2733 for (const auto &MBB : *MF) {
2734 BBInfo &MInfo = MBBInfoMap[&MBB];
2735
2736 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2737 if (MInfo.vregsRequired.count(Reg)) {
2738 if (!VI.AliveBlocks.test(MBB.getNumber())) {
2739 report("LiveVariables: Block missing from AliveBlocks", &MBB);
2740 errs() << "Virtual register " << printReg(Reg)
2741 << " must be live through the block.\n";
2742 }
2743 } else {
2744 if (VI.AliveBlocks.test(MBB.getNumber())) {
2745 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2746 errs() << "Virtual register " << printReg(Reg)
2747 << " is not needed live through the block.\n";
2748 }
2749 }
2750 }
2751 }
2752 }
2753
verifyLiveIntervals()2754 void MachineVerifier::verifyLiveIntervals() {
2755 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2756 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2757 Register Reg = Register::index2VirtReg(I);
2758
2759 // Spilling and splitting may leave unused registers around. Skip them.
2760 if (MRI->reg_nodbg_empty(Reg))
2761 continue;
2762
2763 if (!LiveInts->hasInterval(Reg)) {
2764 report("Missing live interval for virtual register", MF);
2765 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2766 continue;
2767 }
2768
2769 const LiveInterval &LI = LiveInts->getInterval(Reg);
2770 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2771 verifyLiveInterval(LI);
2772 }
2773
2774 // Verify all the cached regunit intervals.
2775 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2776 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2777 verifyLiveRange(*LR, i);
2778 }
2779
verifyLiveRangeValue(const LiveRange & LR,const VNInfo * VNI,Register Reg,LaneBitmask LaneMask)2780 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2781 const VNInfo *VNI, Register Reg,
2782 LaneBitmask LaneMask) {
2783 if (VNI->isUnused())
2784 return;
2785
2786 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2787
2788 if (!DefVNI) {
2789 report("Value not live at VNInfo def and not marked unused", MF);
2790 report_context(LR, Reg, LaneMask);
2791 report_context(*VNI);
2792 return;
2793 }
2794
2795 if (DefVNI != VNI) {
2796 report("Live segment at def has different VNInfo", MF);
2797 report_context(LR, Reg, LaneMask);
2798 report_context(*VNI);
2799 return;
2800 }
2801
2802 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2803 if (!MBB) {
2804 report("Invalid VNInfo definition index", MF);
2805 report_context(LR, Reg, LaneMask);
2806 report_context(*VNI);
2807 return;
2808 }
2809
2810 if (VNI->isPHIDef()) {
2811 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
2812 report("PHIDef VNInfo is not defined at MBB start", MBB);
2813 report_context(LR, Reg, LaneMask);
2814 report_context(*VNI);
2815 }
2816 return;
2817 }
2818
2819 // Non-PHI def.
2820 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
2821 if (!MI) {
2822 report("No instruction at VNInfo def index", MBB);
2823 report_context(LR, Reg, LaneMask);
2824 report_context(*VNI);
2825 return;
2826 }
2827
2828 if (Reg != 0) {
2829 bool hasDef = false;
2830 bool isEarlyClobber = false;
2831 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2832 if (!MOI->isReg() || !MOI->isDef())
2833 continue;
2834 if (Register::isVirtualRegister(Reg)) {
2835 if (MOI->getReg() != Reg)
2836 continue;
2837 } else {
2838 if (!Register::isPhysicalRegister(MOI->getReg()) ||
2839 !TRI->hasRegUnit(MOI->getReg(), Reg))
2840 continue;
2841 }
2842 if (LaneMask.any() &&
2843 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
2844 continue;
2845 hasDef = true;
2846 if (MOI->isEarlyClobber())
2847 isEarlyClobber = true;
2848 }
2849
2850 if (!hasDef) {
2851 report("Defining instruction does not modify register", MI);
2852 report_context(LR, Reg, LaneMask);
2853 report_context(*VNI);
2854 }
2855
2856 // Early clobber defs begin at USE slots, but other defs must begin at
2857 // DEF slots.
2858 if (isEarlyClobber) {
2859 if (!VNI->def.isEarlyClobber()) {
2860 report("Early clobber def must be at an early-clobber slot", MBB);
2861 report_context(LR, Reg, LaneMask);
2862 report_context(*VNI);
2863 }
2864 } else if (!VNI->def.isRegister()) {
2865 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
2866 report_context(LR, Reg, LaneMask);
2867 report_context(*VNI);
2868 }
2869 }
2870 }
2871
verifyLiveRangeSegment(const LiveRange & LR,const LiveRange::const_iterator I,Register Reg,LaneBitmask LaneMask)2872 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
2873 const LiveRange::const_iterator I,
2874 Register Reg,
2875 LaneBitmask LaneMask) {
2876 const LiveRange::Segment &S = *I;
2877 const VNInfo *VNI = S.valno;
2878 assert(VNI && "Live segment has no valno");
2879
2880 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
2881 report("Foreign valno in live segment", MF);
2882 report_context(LR, Reg, LaneMask);
2883 report_context(S);
2884 report_context(*VNI);
2885 }
2886
2887 if (VNI->isUnused()) {
2888 report("Live segment valno is marked unused", MF);
2889 report_context(LR, Reg, LaneMask);
2890 report_context(S);
2891 }
2892
2893 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
2894 if (!MBB) {
2895 report("Bad start of live segment, no basic block", MF);
2896 report_context(LR, Reg, LaneMask);
2897 report_context(S);
2898 return;
2899 }
2900 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
2901 if (S.start != MBBStartIdx && S.start != VNI->def) {
2902 report("Live segment must begin at MBB entry or valno def", MBB);
2903 report_context(LR, Reg, LaneMask);
2904 report_context(S);
2905 }
2906
2907 const MachineBasicBlock *EndMBB =
2908 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
2909 if (!EndMBB) {
2910 report("Bad end of live segment, no basic block", MF);
2911 report_context(LR, Reg, LaneMask);
2912 report_context(S);
2913 return;
2914 }
2915
2916 // No more checks for live-out segments.
2917 if (S.end == LiveInts->getMBBEndIdx(EndMBB))
2918 return;
2919
2920 // RegUnit intervals are allowed dead phis.
2921 if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() &&
2922 S.start == VNI->def && S.end == VNI->def.getDeadSlot())
2923 return;
2924
2925 // The live segment is ending inside EndMBB
2926 const MachineInstr *MI =
2927 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
2928 if (!MI) {
2929 report("Live segment doesn't end at a valid instruction", EndMBB);
2930 report_context(LR, Reg, LaneMask);
2931 report_context(S);
2932 return;
2933 }
2934
2935 // The block slot must refer to a basic block boundary.
2936 if (S.end.isBlock()) {
2937 report("Live segment ends at B slot of an instruction", EndMBB);
2938 report_context(LR, Reg, LaneMask);
2939 report_context(S);
2940 }
2941
2942 if (S.end.isDead()) {
2943 // Segment ends on the dead slot.
2944 // That means there must be a dead def.
2945 if (!SlotIndex::isSameInstr(S.start, S.end)) {
2946 report("Live segment ending at dead slot spans instructions", EndMBB);
2947 report_context(LR, Reg, LaneMask);
2948 report_context(S);
2949 }
2950 }
2951
2952 // After tied operands are rewritten, a live segment can only end at an
2953 // early-clobber slot if it is being redefined by an early-clobber def.
2954 // TODO: Before tied operands are rewritten, a live segment can only end at an
2955 // early-clobber slot if the last use is tied to an early-clobber def.
2956 if (MF->getProperties().hasProperty(
2957 MachineFunctionProperties::Property::TiedOpsRewritten) &&
2958 S.end.isEarlyClobber()) {
2959 if (I+1 == LR.end() || (I+1)->start != S.end) {
2960 report("Live segment ending at early clobber slot must be "
2961 "redefined by an EC def in the same instruction", EndMBB);
2962 report_context(LR, Reg, LaneMask);
2963 report_context(S);
2964 }
2965 }
2966
2967 // The following checks only apply to virtual registers. Physreg liveness
2968 // is too weird to check.
2969 if (Register::isVirtualRegister(Reg)) {
2970 // A live segment can end with either a redefinition, a kill flag on a
2971 // use, or a dead flag on a def.
2972 bool hasRead = false;
2973 bool hasSubRegDef = false;
2974 bool hasDeadDef = false;
2975 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2976 if (!MOI->isReg() || MOI->getReg() != Reg)
2977 continue;
2978 unsigned Sub = MOI->getSubReg();
2979 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
2980 : LaneBitmask::getAll();
2981 if (MOI->isDef()) {
2982 if (Sub != 0) {
2983 hasSubRegDef = true;
2984 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
2985 // mask for subregister defs. Read-undef defs will be handled by
2986 // readsReg below.
2987 SLM = ~SLM;
2988 }
2989 if (MOI->isDead())
2990 hasDeadDef = true;
2991 }
2992 if (LaneMask.any() && (LaneMask & SLM).none())
2993 continue;
2994 if (MOI->readsReg())
2995 hasRead = true;
2996 }
2997 if (S.end.isDead()) {
2998 // Make sure that the corresponding machine operand for a "dead" live
2999 // range has the dead flag. We cannot perform this check for subregister
3000 // liveranges as partially dead values are allowed.
3001 if (LaneMask.none() && !hasDeadDef) {
3002 report("Instruction ending live segment on dead slot has no dead flag",
3003 MI);
3004 report_context(LR, Reg, LaneMask);
3005 report_context(S);
3006 }
3007 } else {
3008 if (!hasRead) {
3009 // When tracking subregister liveness, the main range must start new
3010 // values on partial register writes, even if there is no read.
3011 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3012 !hasSubRegDef) {
3013 report("Instruction ending live segment doesn't read the register",
3014 MI);
3015 report_context(LR, Reg, LaneMask);
3016 report_context(S);
3017 }
3018 }
3019 }
3020 }
3021
3022 // Now check all the basic blocks in this live segment.
3023 MachineFunction::const_iterator MFI = MBB->getIterator();
3024 // Is this live segment the beginning of a non-PHIDef VN?
3025 if (S.start == VNI->def && !VNI->isPHIDef()) {
3026 // Not live-in to any blocks.
3027 if (MBB == EndMBB)
3028 return;
3029 // Skip this block.
3030 ++MFI;
3031 }
3032
3033 SmallVector<SlotIndex, 4> Undefs;
3034 if (LaneMask.any()) {
3035 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3036 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3037 }
3038
3039 while (true) {
3040 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3041 // We don't know how to track physregs into a landing pad.
3042 if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) {
3043 if (&*MFI == EndMBB)
3044 break;
3045 ++MFI;
3046 continue;
3047 }
3048
3049 // Is VNI a PHI-def in the current block?
3050 bool IsPHI = VNI->isPHIDef() &&
3051 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3052
3053 // Check that VNI is live-out of all predecessors.
3054 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3055 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3056 // Predecessor of landing pad live-out on last call.
3057 if (MFI->isEHPad()) {
3058 for (auto I = Pred->rbegin(), E = Pred->rend(); I != E; ++I) {
3059 if (I->isCall()) {
3060 PEnd = Indexes->getInstructionIndex(*I).getBoundaryIndex();
3061 break;
3062 }
3063 }
3064 }
3065 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3066
3067 // All predecessors must have a live-out value. However for a phi
3068 // instruction with subregister intervals
3069 // only one of the subregisters (not necessarily the current one) needs to
3070 // be defined.
3071 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3072 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3073 continue;
3074 report("Register not marked live out of predecessor", Pred);
3075 report_context(LR, Reg, LaneMask);
3076 report_context(*VNI);
3077 errs() << " live into " << printMBBReference(*MFI) << '@'
3078 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3079 << PEnd << '\n';
3080 continue;
3081 }
3082
3083 // Only PHI-defs can take different predecessor values.
3084 if (!IsPHI && PVNI != VNI) {
3085 report("Different value live out of predecessor", Pred);
3086 report_context(LR, Reg, LaneMask);
3087 errs() << "Valno #" << PVNI->id << " live out of "
3088 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3089 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3090 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3091 }
3092 }
3093 if (&*MFI == EndMBB)
3094 break;
3095 ++MFI;
3096 }
3097 }
3098
verifyLiveRange(const LiveRange & LR,Register Reg,LaneBitmask LaneMask)3099 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3100 LaneBitmask LaneMask) {
3101 for (const VNInfo *VNI : LR.valnos)
3102 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3103
3104 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3105 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3106 }
3107
verifyLiveInterval(const LiveInterval & LI)3108 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3109 Register Reg = LI.reg();
3110 assert(Register::isVirtualRegister(Reg));
3111 verifyLiveRange(LI, Reg);
3112
3113 LaneBitmask Mask;
3114 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3115 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3116 if ((Mask & SR.LaneMask).any()) {
3117 report("Lane masks of sub ranges overlap in live interval", MF);
3118 report_context(LI);
3119 }
3120 if ((SR.LaneMask & ~MaxMask).any()) {
3121 report("Subrange lanemask is invalid", MF);
3122 report_context(LI);
3123 }
3124 if (SR.empty()) {
3125 report("Subrange must not be empty", MF);
3126 report_context(SR, LI.reg(), SR.LaneMask);
3127 }
3128 Mask |= SR.LaneMask;
3129 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3130 if (!LI.covers(SR)) {
3131 report("A Subrange is not covered by the main range", MF);
3132 report_context(LI);
3133 }
3134 }
3135
3136 // Check the LI only has one connected component.
3137 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3138 unsigned NumComp = ConEQ.Classify(LI);
3139 if (NumComp > 1) {
3140 report("Multiple connected components in live interval", MF);
3141 report_context(LI);
3142 for (unsigned comp = 0; comp != NumComp; ++comp) {
3143 errs() << comp << ": valnos";
3144 for (const VNInfo *I : LI.valnos)
3145 if (comp == ConEQ.getEqClass(I))
3146 errs() << ' ' << I->id;
3147 errs() << '\n';
3148 }
3149 }
3150 }
3151
3152 namespace {
3153
3154 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3155 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3156 // value is zero.
3157 // We use a bool plus an integer to capture the stack state.
3158 struct StackStateOfBB {
3159 StackStateOfBB() = default;
StackStateOfBB__anon9c12755e0611::StackStateOfBB3160 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3161 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3162 ExitIsSetup(ExitSetup) {}
3163
3164 // Can be negative, which means we are setting up a frame.
3165 int EntryValue = 0;
3166 int ExitValue = 0;
3167 bool EntryIsSetup = false;
3168 bool ExitIsSetup = false;
3169 };
3170
3171 } // end anonymous namespace
3172
3173 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3174 /// by a FrameDestroy <n>, stack adjustments are identical on all
3175 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
verifyStackFrame()3176 void MachineVerifier::verifyStackFrame() {
3177 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3178 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3179 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3180 return;
3181
3182 SmallVector<StackStateOfBB, 8> SPState;
3183 SPState.resize(MF->getNumBlockIDs());
3184 df_iterator_default_set<const MachineBasicBlock*> Reachable;
3185
3186 // Visit the MBBs in DFS order.
3187 for (df_ext_iterator<const MachineFunction *,
3188 df_iterator_default_set<const MachineBasicBlock *>>
3189 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3190 DFI != DFE; ++DFI) {
3191 const MachineBasicBlock *MBB = *DFI;
3192
3193 StackStateOfBB BBState;
3194 // Check the exit state of the DFS stack predecessor.
3195 if (DFI.getPathLength() >= 2) {
3196 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3197 assert(Reachable.count(StackPred) &&
3198 "DFS stack predecessor is already visited.\n");
3199 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3200 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3201 BBState.ExitValue = BBState.EntryValue;
3202 BBState.ExitIsSetup = BBState.EntryIsSetup;
3203 }
3204
3205 // Update stack state by checking contents of MBB.
3206 for (const auto &I : *MBB) {
3207 if (I.getOpcode() == FrameSetupOpcode) {
3208 if (BBState.ExitIsSetup)
3209 report("FrameSetup is after another FrameSetup", &I);
3210 BBState.ExitValue -= TII->getFrameTotalSize(I);
3211 BBState.ExitIsSetup = true;
3212 }
3213
3214 if (I.getOpcode() == FrameDestroyOpcode) {
3215 int Size = TII->getFrameTotalSize(I);
3216 if (!BBState.ExitIsSetup)
3217 report("FrameDestroy is not after a FrameSetup", &I);
3218 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3219 BBState.ExitValue;
3220 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3221 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3222 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3223 << AbsSPAdj << ">.\n";
3224 }
3225 BBState.ExitValue += Size;
3226 BBState.ExitIsSetup = false;
3227 }
3228 }
3229 SPState[MBB->getNumber()] = BBState;
3230
3231 // Make sure the exit state of any predecessor is consistent with the entry
3232 // state.
3233 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3234 if (Reachable.count(Pred) &&
3235 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3236 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3237 report("The exit stack state of a predecessor is inconsistent.", MBB);
3238 errs() << "Predecessor " << printMBBReference(*Pred)
3239 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3240 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3241 << printMBBReference(*MBB) << " has entry state ("
3242 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3243 }
3244 }
3245
3246 // Make sure the entry state of any successor is consistent with the exit
3247 // state.
3248 for (const MachineBasicBlock *Succ : MBB->successors()) {
3249 if (Reachable.count(Succ) &&
3250 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3251 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3252 report("The entry stack state of a successor is inconsistent.", MBB);
3253 errs() << "Successor " << printMBBReference(*Succ)
3254 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3255 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3256 << printMBBReference(*MBB) << " has exit state ("
3257 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3258 }
3259 }
3260
3261 // Make sure a basic block with return ends with zero stack adjustment.
3262 if (!MBB->empty() && MBB->back().isReturn()) {
3263 if (BBState.ExitIsSetup)
3264 report("A return block ends with a FrameSetup.", MBB);
3265 if (BBState.ExitValue)
3266 report("A return block ends with a nonzero stack adjustment.", MBB);
3267 }
3268 }
3269 }
3270