1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // basic correctness checking of input to the system.
11 //
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
14 //
15 //  * Both of a binary operator's parameters are of the same type
16 //  * Verify that the indices of mem access instructions match other operands
17 //  * Verify that arithmetic and other things are only performed on first-class
18 //    types.  Verify that shifts & logicals only happen on integrals f.e.
19 //  * All of the constants in a switch statement are of the correct type
20 //  * The code is in valid SSA form
21 //  * It should be illegal to put a label into any other type (like a structure)
22 //    or to return one. [except constant arrays!]
23 //  * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 //  * PHI nodes must have an entry for each predecessor, with no extras.
25 //  * PHI nodes must be the first thing in a basic block, all grouped together
26 //  * All basic blocks should only end with terminator insts, not contain them
27 //  * The entry node to a function must not have predecessors
28 //  * All Instructions must be embedded into a basic block
29 //  * Functions cannot take a void-typed parameter
30 //  * Verify that a function's argument list agrees with it's declared type.
31 //  * It is illegal to specify a name for a void value.
32 //  * It is illegal to have a internal global value with no initializer
33 //  * It is illegal to have a ret instruction that returns a value that does not
34 //    agree with the function return value type.
35 //  * Function call argument types match the function prototype
36 //  * A landing pad is defined by a landingpad instruction, and can be jumped to
37 //    only by the unwind edge of an invoke instruction.
38 //  * A landingpad instruction must be the first non-PHI instruction in the
39 //    block.
40 //  * Landingpad instructions must be in a function with a personality function.
41 //  * Convergence control intrinsics are introduced in ConvergentOperations.rst.
42 //    The applied restrictions are too numerous to list here.
43 //  * The convergence entry intrinsic and the loop heart must be the first
44 //    non-PHI instruction in their respective block. This does not conflict with
45 //    the landing pads, since these two kinds cannot occur in the same block.
46 //  * All other things that are tested by asserts spread about the code...
47 //
48 //===----------------------------------------------------------------------===//
49 
50 #include "llvm/IR/Verifier.h"
51 #include "llvm/ADT/APFloat.h"
52 #include "llvm/ADT/APInt.h"
53 #include "llvm/ADT/ArrayRef.h"
54 #include "llvm/ADT/DenseMap.h"
55 #include "llvm/ADT/MapVector.h"
56 #include "llvm/ADT/PostOrderIterator.h"
57 #include "llvm/ADT/STLExtras.h"
58 #include "llvm/ADT/SmallPtrSet.h"
59 #include "llvm/ADT/SmallSet.h"
60 #include "llvm/ADT/SmallVector.h"
61 #include "llvm/ADT/StringExtras.h"
62 #include "llvm/ADT/StringMap.h"
63 #include "llvm/ADT/StringRef.h"
64 #include "llvm/ADT/Twine.h"
65 #include "llvm/BinaryFormat/Dwarf.h"
66 #include "llvm/IR/Argument.h"
67 #include "llvm/IR/AttributeMask.h"
68 #include "llvm/IR/Attributes.h"
69 #include "llvm/IR/BasicBlock.h"
70 #include "llvm/IR/CFG.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Comdat.h"
73 #include "llvm/IR/Constant.h"
74 #include "llvm/IR/ConstantRange.h"
75 #include "llvm/IR/Constants.h"
76 #include "llvm/IR/CycleInfo.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/DebugInfo.h"
79 #include "llvm/IR/DebugInfoMetadata.h"
80 #include "llvm/IR/DebugLoc.h"
81 #include "llvm/IR/DerivedTypes.h"
82 #include "llvm/IR/Dominators.h"
83 #include "llvm/IR/EHPersonalities.h"
84 #include "llvm/IR/Function.h"
85 #include "llvm/IR/GCStrategy.h"
86 #include "llvm/IR/GlobalAlias.h"
87 #include "llvm/IR/GlobalValue.h"
88 #include "llvm/IR/GlobalVariable.h"
89 #include "llvm/IR/InlineAsm.h"
90 #include "llvm/IR/InstVisitor.h"
91 #include "llvm/IR/InstrTypes.h"
92 #include "llvm/IR/Instruction.h"
93 #include "llvm/IR/Instructions.h"
94 #include "llvm/IR/IntrinsicInst.h"
95 #include "llvm/IR/Intrinsics.h"
96 #include "llvm/IR/IntrinsicsAArch64.h"
97 #include "llvm/IR/IntrinsicsAMDGPU.h"
98 #include "llvm/IR/IntrinsicsARM.h"
99 #include "llvm/IR/IntrinsicsWebAssembly.h"
100 #include "llvm/IR/LLVMContext.h"
101 #include "llvm/IR/Metadata.h"
102 #include "llvm/IR/Module.h"
103 #include "llvm/IR/ModuleSlotTracker.h"
104 #include "llvm/IR/PassManager.h"
105 #include "llvm/IR/Statepoint.h"
106 #include "llvm/IR/Type.h"
107 #include "llvm/IR/Use.h"
108 #include "llvm/IR/User.h"
109 #include "llvm/IR/Value.h"
110 #include "llvm/InitializePasses.h"
111 #include "llvm/Pass.h"
112 #include "llvm/Support/AtomicOrdering.h"
113 #include "llvm/Support/Casting.h"
114 #include "llvm/Support/CommandLine.h"
115 #include "llvm/Support/ErrorHandling.h"
116 #include "llvm/Support/MathExtras.h"
117 #include "llvm/Support/raw_ostream.h"
118 #include <algorithm>
119 #include <cassert>
120 #include <cstdint>
121 #include <memory>
122 #include <optional>
123 #include <string>
124 #include <utility>
125 
126 using namespace llvm;
127 
128 static cl::opt<bool> VerifyNoAliasScopeDomination(
129     "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
130     cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
131              "scopes are not dominating"));
132 
133 namespace llvm {
134 
135 struct VerifierSupport {
136   raw_ostream *OS;
137   const Module &M;
138   ModuleSlotTracker MST;
139   Triple TT;
140   const DataLayout &DL;
141   LLVMContext &Context;
142 
143   /// Track the brokenness of the module while recursively visiting.
144   bool Broken = false;
145   /// Broken debug info can be "recovered" from by stripping the debug info.
146   bool BrokenDebugInfo = false;
147   /// Whether to treat broken debug info as an error.
148   bool TreatBrokenDebugInfoAsError = true;
149 
150   explicit VerifierSupport(raw_ostream *OS, const Module &M)
151       : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
152         Context(M.getContext()) {}
153 
154 private:
155   void Write(const Module *M) {
156     *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
157   }
158 
159   void Write(const Value *V) {
160     if (V)
161       Write(*V);
162   }
163 
164   void Write(const Value &V) {
165     if (isa<Instruction>(V)) {
166       V.print(*OS, MST);
167       *OS << '\n';
168     } else {
169       V.printAsOperand(*OS, true, MST);
170       *OS << '\n';
171     }
172   }
173 
174   void Write(const Metadata *MD) {
175     if (!MD)
176       return;
177     MD->print(*OS, MST, &M);
178     *OS << '\n';
179   }
180 
181   template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
182     Write(MD.get());
183   }
184 
185   void Write(const NamedMDNode *NMD) {
186     if (!NMD)
187       return;
188     NMD->print(*OS, MST);
189     *OS << '\n';
190   }
191 
192   void Write(Type *T) {
193     if (!T)
194       return;
195     *OS << ' ' << *T;
196   }
197 
198   void Write(const Comdat *C) {
199     if (!C)
200       return;
201     *OS << *C;
202   }
203 
204   void Write(const APInt *AI) {
205     if (!AI)
206       return;
207     *OS << *AI << '\n';
208   }
209 
210   void Write(const unsigned i) { *OS << i << '\n'; }
211 
212   // NOLINTNEXTLINE(readability-identifier-naming)
213   void Write(const Attribute *A) {
214     if (!A)
215       return;
216     *OS << A->getAsString() << '\n';
217   }
218 
219   // NOLINTNEXTLINE(readability-identifier-naming)
220   void Write(const AttributeSet *AS) {
221     if (!AS)
222       return;
223     *OS << AS->getAsString() << '\n';
224   }
225 
226   // NOLINTNEXTLINE(readability-identifier-naming)
227   void Write(const AttributeList *AL) {
228     if (!AL)
229       return;
230     AL->print(*OS);
231   }
232 
233   void Write(Printable P) { *OS << P << '\n'; }
234 
235   template <typename T> void Write(ArrayRef<T> Vs) {
236     for (const T &V : Vs)
237       Write(V);
238   }
239 
240   template <typename T1, typename... Ts>
241   void WriteTs(const T1 &V1, const Ts &... Vs) {
242     Write(V1);
243     WriteTs(Vs...);
244   }
245 
246   template <typename... Ts> void WriteTs() {}
247 
248 public:
249   /// A check failed, so printout out the condition and the message.
250   ///
251   /// This provides a nice place to put a breakpoint if you want to see why
252   /// something is not correct.
253   void CheckFailed(const Twine &Message) {
254     if (OS)
255       *OS << Message << '\n';
256     Broken = true;
257   }
258 
259   /// A check failed (with values to print).
260   ///
261   /// This calls the Message-only version so that the above is easier to set a
262   /// breakpoint on.
263   template <typename T1, typename... Ts>
264   void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
265     CheckFailed(Message);
266     if (OS)
267       WriteTs(V1, Vs...);
268   }
269 
270   /// A debug info check failed.
271   void DebugInfoCheckFailed(const Twine &Message) {
272     if (OS)
273       *OS << Message << '\n';
274     Broken |= TreatBrokenDebugInfoAsError;
275     BrokenDebugInfo = true;
276   }
277 
278   /// A debug info check failed (with values to print).
279   template <typename T1, typename... Ts>
280   void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
281                             const Ts &... Vs) {
282     DebugInfoCheckFailed(Message);
283     if (OS)
284       WriteTs(V1, Vs...);
285   }
286 };
287 
288 } // namespace llvm
289 
290 namespace {
291 
292 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
293   friend class InstVisitor<Verifier>;
294 
295   // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
296   // the alignment size should not exceed 2^15. Since encode(Align)
297   // would plus the shift value by 1, the alignment size should
298   // not exceed 2^14, otherwise it can NOT be properly lowered
299   // in backend.
300   static constexpr unsigned ParamMaxAlignment = 1 << 14;
301   DominatorTree DT;
302 
303   /// When verifying a basic block, keep track of all of the
304   /// instructions we have seen so far.
305   ///
306   /// This allows us to do efficient dominance checks for the case when an
307   /// instruction has an operand that is an instruction in the same block.
308   SmallPtrSet<Instruction *, 16> InstsInThisBlock;
309 
310   /// Keep track of the metadata nodes that have been checked already.
311   SmallPtrSet<const Metadata *, 32> MDNodes;
312 
313   /// Keep track which DISubprogram is attached to which function.
314   DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
315 
316   /// Track all DICompileUnits visited.
317   SmallPtrSet<const Metadata *, 2> CUVisited;
318 
319   /// The result type for a landingpad.
320   Type *LandingPadResultTy;
321 
322   /// Whether we've seen a call to @llvm.localescape in this function
323   /// already.
324   bool SawFrameEscape;
325 
326   /// Whether the current function has a DISubprogram attached to it.
327   bool HasDebugInfo = false;
328 
329   /// The current source language.
330   dwarf::SourceLanguage CurrentSourceLang = dwarf::DW_LANG_lo_user;
331 
332   /// Whether the current function has convergencectrl operand bundles.
333   enum {
334     ControlledConvergence,
335     UncontrolledConvergence,
336     NoConvergence
337   } ConvergenceKind = NoConvergence;
338 
339   /// Whether source was present on the first DIFile encountered in each CU.
340   DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
341 
342   /// Stores the count of how many objects were passed to llvm.localescape for a
343   /// given function and the largest index passed to llvm.localrecover.
344   DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
345 
346   // Maps catchswitches and cleanuppads that unwind to siblings to the
347   // terminators that indicate the unwind, used to detect cycles therein.
348   MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
349 
350   /// Cache which blocks are in which funclet, if an EH funclet personality is
351   /// in use. Otherwise empty.
352   DenseMap<BasicBlock *, ColorVector> BlockEHFuncletColors;
353 
354   /// Cache of constants visited in search of ConstantExprs.
355   SmallPtrSet<const Constant *, 32> ConstantExprVisited;
356 
357   /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
358   SmallVector<const Function *, 4> DeoptimizeDeclarations;
359 
360   /// Cache of attribute lists verified.
361   SmallPtrSet<const void *, 32> AttributeListsVisited;
362 
363   // Verify that this GlobalValue is only used in this module.
364   // This map is used to avoid visiting uses twice. We can arrive at a user
365   // twice, if they have multiple operands. In particular for very large
366   // constant expressions, we can arrive at a particular user many times.
367   SmallPtrSet<const Value *, 32> GlobalValueVisited;
368 
369   // Keeps track of duplicate function argument debug info.
370   SmallVector<const DILocalVariable *, 16> DebugFnArgs;
371 
372   TBAAVerifier TBAAVerifyHelper;
373 
374   SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
375 
376   void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
377 
378 public:
379   explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
380                     const Module &M)
381       : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
382         SawFrameEscape(false), TBAAVerifyHelper(this) {
383     TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
384   }
385 
386   bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
387 
388   bool verify(const Function &F) {
389     assert(F.getParent() == &M &&
390            "An instance of this class only works with a specific module!");
391 
392     // First ensure the function is well-enough formed to compute dominance
393     // information, and directly compute a dominance tree. We don't rely on the
394     // pass manager to provide this as it isolates us from a potentially
395     // out-of-date dominator tree and makes it significantly more complex to run
396     // this code outside of a pass manager.
397     // FIXME: It's really gross that we have to cast away constness here.
398     if (!F.empty())
399       DT.recalculate(const_cast<Function &>(F));
400 
401     for (const BasicBlock &BB : F) {
402       if (!BB.empty() && BB.back().isTerminator())
403         continue;
404 
405       if (OS) {
406         *OS << "Basic Block in function '" << F.getName()
407             << "' does not have terminator!\n";
408         BB.printAsOperand(*OS, true, MST);
409         *OS << "\n";
410       }
411       return false;
412     }
413 
414     Broken = false;
415     // FIXME: We strip const here because the inst visitor strips const.
416     visit(const_cast<Function &>(F));
417     verifySiblingFuncletUnwinds();
418     if (ConvergenceKind == ControlledConvergence)
419       verifyConvergenceControl(const_cast<Function &>(F));
420     InstsInThisBlock.clear();
421     DebugFnArgs.clear();
422     LandingPadResultTy = nullptr;
423     SawFrameEscape = false;
424     SiblingFuncletInfo.clear();
425     verifyNoAliasScopeDecl();
426     NoAliasScopeDecls.clear();
427     ConvergenceKind = NoConvergence;
428 
429     return !Broken;
430   }
431 
432   /// Verify the module that this instance of \c Verifier was initialized with.
433   bool verify() {
434     Broken = false;
435 
436     // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
437     for (const Function &F : M)
438       if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
439         DeoptimizeDeclarations.push_back(&F);
440 
441     // Now that we've visited every function, verify that we never asked to
442     // recover a frame index that wasn't escaped.
443     verifyFrameRecoverIndices();
444     for (const GlobalVariable &GV : M.globals())
445       visitGlobalVariable(GV);
446 
447     for (const GlobalAlias &GA : M.aliases())
448       visitGlobalAlias(GA);
449 
450     for (const GlobalIFunc &GI : M.ifuncs())
451       visitGlobalIFunc(GI);
452 
453     for (const NamedMDNode &NMD : M.named_metadata())
454       visitNamedMDNode(NMD);
455 
456     for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
457       visitComdat(SMEC.getValue());
458 
459     visitModuleFlags();
460     visitModuleIdents();
461     visitModuleCommandLines();
462 
463     verifyCompileUnits();
464 
465     verifyDeoptimizeCallingConvs();
466     DISubprogramAttachments.clear();
467     return !Broken;
468   }
469 
470 private:
471   /// Whether a metadata node is allowed to be, or contain, a DILocation.
472   enum class AreDebugLocsAllowed { No, Yes };
473 
474   // Verification methods...
475   void visitGlobalValue(const GlobalValue &GV);
476   void visitGlobalVariable(const GlobalVariable &GV);
477   void visitGlobalAlias(const GlobalAlias &GA);
478   void visitGlobalIFunc(const GlobalIFunc &GI);
479   void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
480   void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
481                            const GlobalAlias &A, const Constant &C);
482   void visitNamedMDNode(const NamedMDNode &NMD);
483   void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
484   void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
485   void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
486   void visitComdat(const Comdat &C);
487   void visitModuleIdents();
488   void visitModuleCommandLines();
489   void visitModuleFlags();
490   void visitModuleFlag(const MDNode *Op,
491                        DenseMap<const MDString *, const MDNode *> &SeenIDs,
492                        SmallVectorImpl<const MDNode *> &Requirements);
493   void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
494   void visitFunction(const Function &F);
495   void visitBasicBlock(BasicBlock &BB);
496   void verifyRangeMetadata(const Value &V, const MDNode *Range, Type *Ty,
497                            bool IsAbsoluteSymbol);
498   void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
499   void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
500   void visitProfMetadata(Instruction &I, MDNode *MD);
501   void visitCallStackMetadata(MDNode *MD);
502   void visitMemProfMetadata(Instruction &I, MDNode *MD);
503   void visitCallsiteMetadata(Instruction &I, MDNode *MD);
504   void visitDIAssignIDMetadata(Instruction &I, MDNode *MD);
505   void visitAnnotationMetadata(MDNode *Annotation);
506   void visitAliasScopeMetadata(const MDNode *MD);
507   void visitAliasScopeListMetadata(const MDNode *MD);
508   void visitAccessGroupMetadata(const MDNode *MD);
509 
510   template <class Ty> bool isValidMetadataArray(const MDTuple &N);
511 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
512 #include "llvm/IR/Metadata.def"
513   void visitDIScope(const DIScope &N);
514   void visitDIVariable(const DIVariable &N);
515   void visitDILexicalBlockBase(const DILexicalBlockBase &N);
516   void visitDITemplateParameter(const DITemplateParameter &N);
517 
518   void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
519 
520   // InstVisitor overrides...
521   using InstVisitor<Verifier>::visit;
522   void visit(Instruction &I);
523 
524   void visitTruncInst(TruncInst &I);
525   void visitZExtInst(ZExtInst &I);
526   void visitSExtInst(SExtInst &I);
527   void visitFPTruncInst(FPTruncInst &I);
528   void visitFPExtInst(FPExtInst &I);
529   void visitFPToUIInst(FPToUIInst &I);
530   void visitFPToSIInst(FPToSIInst &I);
531   void visitUIToFPInst(UIToFPInst &I);
532   void visitSIToFPInst(SIToFPInst &I);
533   void visitIntToPtrInst(IntToPtrInst &I);
534   void visitPtrToIntInst(PtrToIntInst &I);
535   void visitBitCastInst(BitCastInst &I);
536   void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
537   void visitPHINode(PHINode &PN);
538   void visitCallBase(CallBase &Call);
539   void visitUnaryOperator(UnaryOperator &U);
540   void visitBinaryOperator(BinaryOperator &B);
541   void visitICmpInst(ICmpInst &IC);
542   void visitFCmpInst(FCmpInst &FC);
543   void visitExtractElementInst(ExtractElementInst &EI);
544   void visitInsertElementInst(InsertElementInst &EI);
545   void visitShuffleVectorInst(ShuffleVectorInst &EI);
546   void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
547   void visitCallInst(CallInst &CI);
548   void visitInvokeInst(InvokeInst &II);
549   void visitGetElementPtrInst(GetElementPtrInst &GEP);
550   void visitLoadInst(LoadInst &LI);
551   void visitStoreInst(StoreInst &SI);
552   void verifyDominatesUse(Instruction &I, unsigned i);
553   void visitInstruction(Instruction &I);
554   void visitTerminator(Instruction &I);
555   void visitBranchInst(BranchInst &BI);
556   void visitReturnInst(ReturnInst &RI);
557   void visitSwitchInst(SwitchInst &SI);
558   void visitIndirectBrInst(IndirectBrInst &BI);
559   void visitCallBrInst(CallBrInst &CBI);
560   void visitSelectInst(SelectInst &SI);
561   void visitUserOp1(Instruction &I);
562   void visitUserOp2(Instruction &I) { visitUserOp1(I); }
563   void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
564   void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
565   void visitVPIntrinsic(VPIntrinsic &VPI);
566   void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
567   void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
568   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
569   void visitAtomicRMWInst(AtomicRMWInst &RMWI);
570   void visitFenceInst(FenceInst &FI);
571   void visitAllocaInst(AllocaInst &AI);
572   void visitExtractValueInst(ExtractValueInst &EVI);
573   void visitInsertValueInst(InsertValueInst &IVI);
574   void visitEHPadPredecessors(Instruction &I);
575   void visitLandingPadInst(LandingPadInst &LPI);
576   void visitResumeInst(ResumeInst &RI);
577   void visitCatchPadInst(CatchPadInst &CPI);
578   void visitCatchReturnInst(CatchReturnInst &CatchReturn);
579   void visitCleanupPadInst(CleanupPadInst &CPI);
580   void visitFuncletPadInst(FuncletPadInst &FPI);
581   void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
582   void visitCleanupReturnInst(CleanupReturnInst &CRI);
583 
584   void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
585   void verifySwiftErrorValue(const Value *SwiftErrorVal);
586   void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
587   void verifyMustTailCall(CallInst &CI);
588   bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
589   void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
590   void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
591   void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
592                                     const Value *V);
593   void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
594                            const Value *V, bool IsIntrinsic, bool IsInlineAsm);
595   void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
596 
597   void visitConstantExprsRecursively(const Constant *EntryC);
598   void visitConstantExpr(const ConstantExpr *CE);
599   void verifyInlineAsmCall(const CallBase &Call);
600   void verifyStatepoint(const CallBase &Call);
601   void verifyFrameRecoverIndices();
602   void verifySiblingFuncletUnwinds();
603   void verifyConvergenceControl(Function &F);
604 
605   void verifyFragmentExpression(const DbgVariableIntrinsic &I);
606   template <typename ValueOrMetadata>
607   void verifyFragmentExpression(const DIVariable &V,
608                                 DIExpression::FragmentInfo Fragment,
609                                 ValueOrMetadata *Desc);
610   void verifyFnArgs(const DbgVariableIntrinsic &I);
611   void verifyNotEntryValue(const DbgVariableIntrinsic &I);
612 
613   /// Module-level debug info verification...
614   void verifyCompileUnits();
615 
616   /// Module-level verification that all @llvm.experimental.deoptimize
617   /// declarations share the same calling convention.
618   void verifyDeoptimizeCallingConvs();
619 
620   void verifyAttachedCallBundle(const CallBase &Call,
621                                 const OperandBundleUse &BU);
622 
623   /// Verify all-or-nothing property of DIFile source attribute within a CU.
624   void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
625 
626   /// Verify the llvm.experimental.noalias.scope.decl declarations
627   void verifyNoAliasScopeDecl();
628 };
629 
630 } // end anonymous namespace
631 
632 /// We know that cond should be true, if not print an error message.
633 #define Check(C, ...)                                                          \
634   do {                                                                         \
635     if (!(C)) {                                                                \
636       CheckFailed(__VA_ARGS__);                                                \
637       return;                                                                  \
638     }                                                                          \
639   } while (false)
640 
641 /// We know that a debug info condition should be true, if not print
642 /// an error message.
643 #define CheckDI(C, ...)                                                        \
644   do {                                                                         \
645     if (!(C)) {                                                                \
646       DebugInfoCheckFailed(__VA_ARGS__);                                       \
647       return;                                                                  \
648     }                                                                          \
649   } while (false)
650 
651 void Verifier::visit(Instruction &I) {
652   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
653     Check(I.getOperand(i) != nullptr, "Operand is null", &I);
654   InstVisitor<Verifier>::visit(I);
655 }
656 
657 // Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
658 static void forEachUser(const Value *User,
659                         SmallPtrSet<const Value *, 32> &Visited,
660                         llvm::function_ref<bool(const Value *)> Callback) {
661   if (!Visited.insert(User).second)
662     return;
663 
664   SmallVector<const Value *> WorkList;
665   append_range(WorkList, User->materialized_users());
666   while (!WorkList.empty()) {
667    const Value *Cur = WorkList.pop_back_val();
668     if (!Visited.insert(Cur).second)
669       continue;
670     if (Callback(Cur))
671       append_range(WorkList, Cur->materialized_users());
672   }
673 }
674 
675 void Verifier::visitGlobalValue(const GlobalValue &GV) {
676   Check(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
677         "Global is external, but doesn't have external or weak linkage!", &GV);
678 
679   if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
680 
681     if (MaybeAlign A = GO->getAlign()) {
682       Check(A->value() <= Value::MaximumAlignment,
683             "huge alignment values are unsupported", GO);
684     }
685 
686     if (const MDNode *Associated =
687             GO->getMetadata(LLVMContext::MD_associated)) {
688       Check(Associated->getNumOperands() == 1,
689             "associated metadata must have one operand", &GV, Associated);
690       const Metadata *Op = Associated->getOperand(0).get();
691       Check(Op, "associated metadata must have a global value", GO, Associated);
692 
693       const auto *VM = dyn_cast_or_null<ValueAsMetadata>(Op);
694       Check(VM, "associated metadata must be ValueAsMetadata", GO, Associated);
695       if (VM) {
696         Check(isa<PointerType>(VM->getValue()->getType()),
697               "associated value must be pointer typed", GV, Associated);
698 
699         const Value *Stripped = VM->getValue()->stripPointerCastsAndAliases();
700         Check(isa<GlobalObject>(Stripped) || isa<Constant>(Stripped),
701               "associated metadata must point to a GlobalObject", GO, Stripped);
702         Check(Stripped != GO,
703               "global values should not associate to themselves", GO,
704               Associated);
705       }
706     }
707 
708     // FIXME: Why is getMetadata on GlobalValue protected?
709     if (const MDNode *AbsoluteSymbol =
710             GO->getMetadata(LLVMContext::MD_absolute_symbol)) {
711       verifyRangeMetadata(*GO, AbsoluteSymbol, DL.getIntPtrType(GO->getType()),
712                           true);
713     }
714   }
715 
716   Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
717         "Only global variables can have appending linkage!", &GV);
718 
719   if (GV.hasAppendingLinkage()) {
720     const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
721     Check(GVar && GVar->getValueType()->isArrayTy(),
722           "Only global arrays can have appending linkage!", GVar);
723   }
724 
725   if (GV.isDeclarationForLinker())
726     Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
727 
728   if (GV.hasDLLExportStorageClass()) {
729     Check(!GV.hasHiddenVisibility(),
730           "dllexport GlobalValue must have default or protected visibility",
731           &GV);
732   }
733   if (GV.hasDLLImportStorageClass()) {
734     Check(GV.hasDefaultVisibility(),
735           "dllimport GlobalValue must have default visibility", &GV);
736     Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
737           &GV);
738 
739     Check((GV.isDeclaration() &&
740            (GV.hasExternalLinkage() || GV.hasExternalWeakLinkage())) ||
741               GV.hasAvailableExternallyLinkage(),
742           "Global is marked as dllimport, but not external", &GV);
743   }
744 
745   if (GV.isImplicitDSOLocal())
746     Check(GV.isDSOLocal(),
747           "GlobalValue with local linkage or non-default "
748           "visibility must be dso_local!",
749           &GV);
750 
751   forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
752     if (const Instruction *I = dyn_cast<Instruction>(V)) {
753       if (!I->getParent() || !I->getParent()->getParent())
754         CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
755                     I);
756       else if (I->getParent()->getParent()->getParent() != &M)
757         CheckFailed("Global is referenced in a different module!", &GV, &M, I,
758                     I->getParent()->getParent(),
759                     I->getParent()->getParent()->getParent());
760       return false;
761     } else if (const Function *F = dyn_cast<Function>(V)) {
762       if (F->getParent() != &M)
763         CheckFailed("Global is used by function in a different module", &GV, &M,
764                     F, F->getParent());
765       return false;
766     }
767     return true;
768   });
769 }
770 
771 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
772   if (GV.hasInitializer()) {
773     Check(GV.getInitializer()->getType() == GV.getValueType(),
774           "Global variable initializer type does not match global "
775           "variable type!",
776           &GV);
777     // If the global has common linkage, it must have a zero initializer and
778     // cannot be constant.
779     if (GV.hasCommonLinkage()) {
780       Check(GV.getInitializer()->isNullValue(),
781             "'common' global must have a zero initializer!", &GV);
782       Check(!GV.isConstant(), "'common' global may not be marked constant!",
783             &GV);
784       Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
785     }
786   }
787 
788   if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
789                        GV.getName() == "llvm.global_dtors")) {
790     Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
791           "invalid linkage for intrinsic global variable", &GV);
792     Check(GV.materialized_use_empty(),
793           "invalid uses of intrinsic global variable", &GV);
794 
795     // Don't worry about emitting an error for it not being an array,
796     // visitGlobalValue will complain on appending non-array.
797     if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
798       StructType *STy = dyn_cast<StructType>(ATy->getElementType());
799       PointerType *FuncPtrTy =
800           FunctionType::get(Type::getVoidTy(Context), false)->
801           getPointerTo(DL.getProgramAddressSpace());
802       Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
803                 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
804                 STy->getTypeAtIndex(1) == FuncPtrTy,
805             "wrong type for intrinsic global variable", &GV);
806       Check(STy->getNumElements() == 3,
807             "the third field of the element type is mandatory, "
808             "specify ptr null to migrate from the obsoleted 2-field form");
809       Type *ETy = STy->getTypeAtIndex(2);
810       Check(ETy->isPointerTy(), "wrong type for intrinsic global variable",
811             &GV);
812     }
813   }
814 
815   if (GV.hasName() && (GV.getName() == "llvm.used" ||
816                        GV.getName() == "llvm.compiler.used")) {
817     Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
818           "invalid linkage for intrinsic global variable", &GV);
819     Check(GV.materialized_use_empty(),
820           "invalid uses of intrinsic global variable", &GV);
821 
822     Type *GVType = GV.getValueType();
823     if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
824       PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
825       Check(PTy, "wrong type for intrinsic global variable", &GV);
826       if (GV.hasInitializer()) {
827         const Constant *Init = GV.getInitializer();
828         const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
829         Check(InitArray, "wrong initalizer for intrinsic global variable",
830               Init);
831         for (Value *Op : InitArray->operands()) {
832           Value *V = Op->stripPointerCasts();
833           Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
834                     isa<GlobalAlias>(V),
835                 Twine("invalid ") + GV.getName() + " member", V);
836           Check(V->hasName(),
837                 Twine("members of ") + GV.getName() + " must be named", V);
838         }
839       }
840     }
841   }
842 
843   // Visit any debug info attachments.
844   SmallVector<MDNode *, 1> MDs;
845   GV.getMetadata(LLVMContext::MD_dbg, MDs);
846   for (auto *MD : MDs) {
847     if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
848       visitDIGlobalVariableExpression(*GVE);
849     else
850       CheckDI(false, "!dbg attachment of global variable must be a "
851                      "DIGlobalVariableExpression");
852   }
853 
854   // Scalable vectors cannot be global variables, since we don't know
855   // the runtime size. If the global is an array containing scalable vectors,
856   // that will be caught by the isValidElementType methods in StructType or
857   // ArrayType instead.
858   Check(!isa<ScalableVectorType>(GV.getValueType()),
859         "Globals cannot contain scalable vectors", &GV);
860 
861   if (auto *STy = dyn_cast<StructType>(GV.getValueType())) {
862     SmallPtrSet<Type *, 4> Visited;
863     Check(!STy->containsScalableVectorType(&Visited),
864           "Globals cannot contain scalable vectors", &GV);
865   }
866 
867   // Check if it's a target extension type that disallows being used as a
868   // global.
869   if (auto *TTy = dyn_cast<TargetExtType>(GV.getValueType()))
870     Check(TTy->hasProperty(TargetExtType::CanBeGlobal),
871           "Global @" + GV.getName() + " has illegal target extension type",
872           TTy);
873 
874   if (!GV.hasInitializer()) {
875     visitGlobalValue(GV);
876     return;
877   }
878 
879   // Walk any aggregate initializers looking for bitcasts between address spaces
880   visitConstantExprsRecursively(GV.getInitializer());
881 
882   visitGlobalValue(GV);
883 }
884 
885 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
886   SmallPtrSet<const GlobalAlias*, 4> Visited;
887   Visited.insert(&GA);
888   visitAliaseeSubExpr(Visited, GA, C);
889 }
890 
891 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
892                                    const GlobalAlias &GA, const Constant &C) {
893   if (GA.hasAvailableExternallyLinkage()) {
894     Check(isa<GlobalValue>(C) &&
895               cast<GlobalValue>(C).hasAvailableExternallyLinkage(),
896           "available_externally alias must point to available_externally "
897           "global value",
898           &GA);
899   }
900   if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
901     if (!GA.hasAvailableExternallyLinkage()) {
902       Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
903             &GA);
904     }
905 
906     if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
907       Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
908 
909       Check(!GA2->isInterposable(),
910             "Alias cannot point to an interposable alias", &GA);
911     } else {
912       // Only continue verifying subexpressions of GlobalAliases.
913       // Do not recurse into global initializers.
914       return;
915     }
916   }
917 
918   if (const auto *CE = dyn_cast<ConstantExpr>(&C))
919     visitConstantExprsRecursively(CE);
920 
921   for (const Use &U : C.operands()) {
922     Value *V = &*U;
923     if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
924       visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
925     else if (const auto *C2 = dyn_cast<Constant>(V))
926       visitAliaseeSubExpr(Visited, GA, *C2);
927   }
928 }
929 
930 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
931   Check(GlobalAlias::isValidLinkage(GA.getLinkage()),
932         "Alias should have private, internal, linkonce, weak, linkonce_odr, "
933         "weak_odr, external, or available_externally linkage!",
934         &GA);
935   const Constant *Aliasee = GA.getAliasee();
936   Check(Aliasee, "Aliasee cannot be NULL!", &GA);
937   Check(GA.getType() == Aliasee->getType(),
938         "Alias and aliasee types should match!", &GA);
939 
940   Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
941         "Aliasee should be either GlobalValue or ConstantExpr", &GA);
942 
943   visitAliaseeSubExpr(GA, *Aliasee);
944 
945   visitGlobalValue(GA);
946 }
947 
948 void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
949   Check(GlobalIFunc::isValidLinkage(GI.getLinkage()),
950         "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
951         "weak_odr, or external linkage!",
952         &GI);
953   // Pierce through ConstantExprs and GlobalAliases and check that the resolver
954   // is a Function definition.
955   const Function *Resolver = GI.getResolverFunction();
956   Check(Resolver, "IFunc must have a Function resolver", &GI);
957   Check(!Resolver->isDeclarationForLinker(),
958         "IFunc resolver must be a definition", &GI);
959 
960   // Check that the immediate resolver operand (prior to any bitcasts) has the
961   // correct type.
962   const Type *ResolverTy = GI.getResolver()->getType();
963 
964   Check(isa<PointerType>(Resolver->getFunctionType()->getReturnType()),
965         "IFunc resolver must return a pointer", &GI);
966 
967   const Type *ResolverFuncTy =
968       GlobalIFunc::getResolverFunctionType(GI.getValueType());
969   Check(ResolverTy == ResolverFuncTy->getPointerTo(GI.getAddressSpace()),
970         "IFunc resolver has incorrect type", &GI);
971 }
972 
973 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
974   // There used to be various other llvm.dbg.* nodes, but we don't support
975   // upgrading them and we want to reserve the namespace for future uses.
976   if (NMD.getName().startswith("llvm.dbg."))
977     CheckDI(NMD.getName() == "llvm.dbg.cu",
978             "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
979   for (const MDNode *MD : NMD.operands()) {
980     if (NMD.getName() == "llvm.dbg.cu")
981       CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
982 
983     if (!MD)
984       continue;
985 
986     visitMDNode(*MD, AreDebugLocsAllowed::Yes);
987   }
988 }
989 
990 void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
991   // Only visit each node once.  Metadata can be mutually recursive, so this
992   // avoids infinite recursion here, as well as being an optimization.
993   if (!MDNodes.insert(&MD).second)
994     return;
995 
996   Check(&MD.getContext() == &Context,
997         "MDNode context does not match Module context!", &MD);
998 
999   switch (MD.getMetadataID()) {
1000   default:
1001     llvm_unreachable("Invalid MDNode subclass");
1002   case Metadata::MDTupleKind:
1003     break;
1004 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
1005   case Metadata::CLASS##Kind:                                                  \
1006     visit##CLASS(cast<CLASS>(MD));                                             \
1007     break;
1008 #include "llvm/IR/Metadata.def"
1009   }
1010 
1011   for (const Metadata *Op : MD.operands()) {
1012     if (!Op)
1013       continue;
1014     Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
1015           &MD, Op);
1016     CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
1017             "DILocation not allowed within this metadata node", &MD, Op);
1018     if (auto *N = dyn_cast<MDNode>(Op)) {
1019       visitMDNode(*N, AllowLocs);
1020       continue;
1021     }
1022     if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
1023       visitValueAsMetadata(*V, nullptr);
1024       continue;
1025     }
1026   }
1027 
1028   // Check these last, so we diagnose problems in operands first.
1029   Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
1030   Check(MD.isResolved(), "All nodes should be resolved!", &MD);
1031 }
1032 
1033 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
1034   Check(MD.getValue(), "Expected valid value", &MD);
1035   Check(!MD.getValue()->getType()->isMetadataTy(),
1036         "Unexpected metadata round-trip through values", &MD, MD.getValue());
1037 
1038   auto *L = dyn_cast<LocalAsMetadata>(&MD);
1039   if (!L)
1040     return;
1041 
1042   Check(F, "function-local metadata used outside a function", L);
1043 
1044   // If this was an instruction, bb, or argument, verify that it is in the
1045   // function that we expect.
1046   Function *ActualF = nullptr;
1047   if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
1048     Check(I->getParent(), "function-local metadata not in basic block", L, I);
1049     ActualF = I->getParent()->getParent();
1050   } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
1051     ActualF = BB->getParent();
1052   else if (Argument *A = dyn_cast<Argument>(L->getValue()))
1053     ActualF = A->getParent();
1054   assert(ActualF && "Unimplemented function local metadata case!");
1055 
1056   Check(ActualF == F, "function-local metadata used in wrong function", L);
1057 }
1058 
1059 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
1060   Metadata *MD = MDV.getMetadata();
1061   if (auto *N = dyn_cast<MDNode>(MD)) {
1062     visitMDNode(*N, AreDebugLocsAllowed::No);
1063     return;
1064   }
1065 
1066   // Only visit each node once.  Metadata can be mutually recursive, so this
1067   // avoids infinite recursion here, as well as being an optimization.
1068   if (!MDNodes.insert(MD).second)
1069     return;
1070 
1071   if (auto *V = dyn_cast<ValueAsMetadata>(MD))
1072     visitValueAsMetadata(*V, F);
1073 }
1074 
1075 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
1076 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
1077 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
1078 
1079 void Verifier::visitDILocation(const DILocation &N) {
1080   CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1081           "location requires a valid scope", &N, N.getRawScope());
1082   if (auto *IA = N.getRawInlinedAt())
1083     CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
1084   if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1085     CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1086 }
1087 
1088 void Verifier::visitGenericDINode(const GenericDINode &N) {
1089   CheckDI(N.getTag(), "invalid tag", &N);
1090 }
1091 
1092 void Verifier::visitDIScope(const DIScope &N) {
1093   if (auto *F = N.getRawFile())
1094     CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1095 }
1096 
1097 void Verifier::visitDISubrange(const DISubrange &N) {
1098   CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
1099   bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
1100   CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
1101               N.getRawUpperBound(),
1102           "Subrange must contain count or upperBound", &N);
1103   CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1104           "Subrange can have any one of count or upperBound", &N);
1105   auto *CBound = N.getRawCountNode();
1106   CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
1107               isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1108           "Count must be signed constant or DIVariable or DIExpression", &N);
1109   auto Count = N.getCount();
1110   CheckDI(!Count || !isa<ConstantInt *>(Count) ||
1111               cast<ConstantInt *>(Count)->getSExtValue() >= -1,
1112           "invalid subrange count", &N);
1113   auto *LBound = N.getRawLowerBound();
1114   CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
1115               isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1116           "LowerBound must be signed constant or DIVariable or DIExpression",
1117           &N);
1118   auto *UBound = N.getRawUpperBound();
1119   CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
1120               isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1121           "UpperBound must be signed constant or DIVariable or DIExpression",
1122           &N);
1123   auto *Stride = N.getRawStride();
1124   CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
1125               isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1126           "Stride must be signed constant or DIVariable or DIExpression", &N);
1127 }
1128 
1129 void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
1130   CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
1131   CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
1132           "GenericSubrange must contain count or upperBound", &N);
1133   CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
1134           "GenericSubrange can have any one of count or upperBound", &N);
1135   auto *CBound = N.getRawCountNode();
1136   CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
1137           "Count must be signed constant or DIVariable or DIExpression", &N);
1138   auto *LBound = N.getRawLowerBound();
1139   CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
1140   CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
1141           "LowerBound must be signed constant or DIVariable or DIExpression",
1142           &N);
1143   auto *UBound = N.getRawUpperBound();
1144   CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
1145           "UpperBound must be signed constant or DIVariable or DIExpression",
1146           &N);
1147   auto *Stride = N.getRawStride();
1148   CheckDI(Stride, "GenericSubrange must contain stride", &N);
1149   CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
1150           "Stride must be signed constant or DIVariable or DIExpression", &N);
1151 }
1152 
1153 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
1154   CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
1155 }
1156 
1157 void Verifier::visitDIBasicType(const DIBasicType &N) {
1158   CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
1159               N.getTag() == dwarf::DW_TAG_unspecified_type ||
1160               N.getTag() == dwarf::DW_TAG_string_type,
1161           "invalid tag", &N);
1162 }
1163 
1164 void Verifier::visitDIStringType(const DIStringType &N) {
1165   CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
1166   CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
1167           &N);
1168 }
1169 
1170 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
1171   // Common scope checks.
1172   visitDIScope(N);
1173 
1174   CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
1175               N.getTag() == dwarf::DW_TAG_pointer_type ||
1176               N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
1177               N.getTag() == dwarf::DW_TAG_reference_type ||
1178               N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
1179               N.getTag() == dwarf::DW_TAG_const_type ||
1180               N.getTag() == dwarf::DW_TAG_immutable_type ||
1181               N.getTag() == dwarf::DW_TAG_volatile_type ||
1182               N.getTag() == dwarf::DW_TAG_restrict_type ||
1183               N.getTag() == dwarf::DW_TAG_atomic_type ||
1184               N.getTag() == dwarf::DW_TAG_member ||
1185               N.getTag() == dwarf::DW_TAG_inheritance ||
1186               N.getTag() == dwarf::DW_TAG_friend ||
1187               N.getTag() == dwarf::DW_TAG_set_type,
1188           "invalid tag", &N);
1189   if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
1190     CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
1191             N.getRawExtraData());
1192   }
1193 
1194   if (N.getTag() == dwarf::DW_TAG_set_type) {
1195     if (auto *T = N.getRawBaseType()) {
1196       auto *Enum = dyn_cast_or_null<DICompositeType>(T);
1197       auto *Basic = dyn_cast_or_null<DIBasicType>(T);
1198       CheckDI(
1199           (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
1200               (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
1201                          Basic->getEncoding() == dwarf::DW_ATE_signed ||
1202                          Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
1203                          Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
1204                          Basic->getEncoding() == dwarf::DW_ATE_boolean)),
1205           "invalid set base type", &N, T);
1206     }
1207   }
1208 
1209   CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1210   CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1211           N.getRawBaseType());
1212 
1213   if (N.getDWARFAddressSpace()) {
1214     CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
1215                 N.getTag() == dwarf::DW_TAG_reference_type ||
1216                 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
1217             "DWARF address space only applies to pointer or reference types",
1218             &N);
1219   }
1220 }
1221 
1222 /// Detect mutually exclusive flags.
1223 static bool hasConflictingReferenceFlags(unsigned Flags) {
1224   return ((Flags & DINode::FlagLValueReference) &&
1225           (Flags & DINode::FlagRValueReference)) ||
1226          ((Flags & DINode::FlagTypePassByValue) &&
1227           (Flags & DINode::FlagTypePassByReference));
1228 }
1229 
1230 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
1231   auto *Params = dyn_cast<MDTuple>(&RawParams);
1232   CheckDI(Params, "invalid template params", &N, &RawParams);
1233   for (Metadata *Op : Params->operands()) {
1234     CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
1235             &N, Params, Op);
1236   }
1237 }
1238 
1239 void Verifier::visitDICompositeType(const DICompositeType &N) {
1240   // Common scope checks.
1241   visitDIScope(N);
1242 
1243   CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
1244               N.getTag() == dwarf::DW_TAG_structure_type ||
1245               N.getTag() == dwarf::DW_TAG_union_type ||
1246               N.getTag() == dwarf::DW_TAG_enumeration_type ||
1247               N.getTag() == dwarf::DW_TAG_class_type ||
1248               N.getTag() == dwarf::DW_TAG_variant_part ||
1249               N.getTag() == dwarf::DW_TAG_namelist,
1250           "invalid tag", &N);
1251 
1252   CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1253   CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
1254           N.getRawBaseType());
1255 
1256   CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1257           "invalid composite elements", &N, N.getRawElements());
1258   CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1259           N.getRawVTableHolder());
1260   CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
1261           "invalid reference flags", &N);
1262   unsigned DIBlockByRefStruct = 1 << 4;
1263   CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
1264           "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1265 
1266   if (N.isVector()) {
1267     const DINodeArray Elements = N.getElements();
1268     CheckDI(Elements.size() == 1 &&
1269                 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1270             "invalid vector, expected one element of type subrange", &N);
1271   }
1272 
1273   if (auto *Params = N.getRawTemplateParams())
1274     visitTemplateParams(N, *Params);
1275 
1276   if (auto *D = N.getRawDiscriminator()) {
1277     CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1278             "discriminator can only appear on variant part");
1279   }
1280 
1281   if (N.getRawDataLocation()) {
1282     CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1283             "dataLocation can only appear in array type");
1284   }
1285 
1286   if (N.getRawAssociated()) {
1287     CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1288             "associated can only appear in array type");
1289   }
1290 
1291   if (N.getRawAllocated()) {
1292     CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1293             "allocated can only appear in array type");
1294   }
1295 
1296   if (N.getRawRank()) {
1297     CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
1298             "rank can only appear in array type");
1299   }
1300 }
1301 
1302 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1303   CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1304   if (auto *Types = N.getRawTypeArray()) {
1305     CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1306     for (Metadata *Ty : N.getTypeArray()->operands()) {
1307       CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1308     }
1309   }
1310   CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
1311           "invalid reference flags", &N);
1312 }
1313 
1314 void Verifier::visitDIFile(const DIFile &N) {
1315   CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1316   std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1317   if (Checksum) {
1318     CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1319             "invalid checksum kind", &N);
1320     size_t Size;
1321     switch (Checksum->Kind) {
1322     case DIFile::CSK_MD5:
1323       Size = 32;
1324       break;
1325     case DIFile::CSK_SHA1:
1326       Size = 40;
1327       break;
1328     case DIFile::CSK_SHA256:
1329       Size = 64;
1330       break;
1331     }
1332     CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1333     CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1334             "invalid checksum", &N);
1335   }
1336 }
1337 
1338 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1339   CheckDI(N.isDistinct(), "compile units must be distinct", &N);
1340   CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1341 
1342   // Don't bother verifying the compilation directory or producer string
1343   // as those could be empty.
1344   CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1345           N.getRawFile());
1346   CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1347           N.getFile());
1348 
1349   CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
1350 
1351   verifySourceDebugInfo(N, *N.getFile());
1352 
1353   CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1354           "invalid emission kind", &N);
1355 
1356   if (auto *Array = N.getRawEnumTypes()) {
1357     CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1358     for (Metadata *Op : N.getEnumTypes()->operands()) {
1359       auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1360       CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1361               "invalid enum type", &N, N.getEnumTypes(), Op);
1362     }
1363   }
1364   if (auto *Array = N.getRawRetainedTypes()) {
1365     CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1366     for (Metadata *Op : N.getRetainedTypes()->operands()) {
1367       CheckDI(
1368           Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
1369                                      !cast<DISubprogram>(Op)->isDefinition())),
1370           "invalid retained type", &N, Op);
1371     }
1372   }
1373   if (auto *Array = N.getRawGlobalVariables()) {
1374     CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1375     for (Metadata *Op : N.getGlobalVariables()->operands()) {
1376       CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1377               "invalid global variable ref", &N, Op);
1378     }
1379   }
1380   if (auto *Array = N.getRawImportedEntities()) {
1381     CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1382     for (Metadata *Op : N.getImportedEntities()->operands()) {
1383       CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1384               &N, Op);
1385     }
1386   }
1387   if (auto *Array = N.getRawMacros()) {
1388     CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1389     for (Metadata *Op : N.getMacros()->operands()) {
1390       CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1391     }
1392   }
1393   CUVisited.insert(&N);
1394 }
1395 
1396 void Verifier::visitDISubprogram(const DISubprogram &N) {
1397   CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1398   CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1399   if (auto *F = N.getRawFile())
1400     CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1401   else
1402     CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1403   if (auto *T = N.getRawType())
1404     CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1405   CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1406           N.getRawContainingType());
1407   if (auto *Params = N.getRawTemplateParams())
1408     visitTemplateParams(N, *Params);
1409   if (auto *S = N.getRawDeclaration())
1410     CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1411             "invalid subprogram declaration", &N, S);
1412   if (auto *RawNode = N.getRawRetainedNodes()) {
1413     auto *Node = dyn_cast<MDTuple>(RawNode);
1414     CheckDI(Node, "invalid retained nodes list", &N, RawNode);
1415     for (Metadata *Op : Node->operands()) {
1416       CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
1417                      isa<DIImportedEntity>(Op)),
1418               "invalid retained nodes, expected DILocalVariable, DILabel or "
1419               "DIImportedEntity",
1420               &N, Node, Op);
1421     }
1422   }
1423   CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
1424           "invalid reference flags", &N);
1425 
1426   auto *Unit = N.getRawUnit();
1427   if (N.isDefinition()) {
1428     // Subprogram definitions (not part of the type hierarchy).
1429     CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1430     CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
1431     CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1432     if (N.getFile())
1433       verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1434   } else {
1435     // Subprogram declarations (part of the type hierarchy).
1436     CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1437     CheckDI(!N.getRawDeclaration(),
1438             "subprogram declaration must not have a declaration field");
1439   }
1440 
1441   if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1442     auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1443     CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1444     for (Metadata *Op : ThrownTypes->operands())
1445       CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1446               Op);
1447   }
1448 
1449   if (N.areAllCallsDescribed())
1450     CheckDI(N.isDefinition(),
1451             "DIFlagAllCallsDescribed must be attached to a definition");
1452 }
1453 
1454 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1455   CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1456   CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1457           "invalid local scope", &N, N.getRawScope());
1458   if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1459     CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1460 }
1461 
1462 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1463   visitDILexicalBlockBase(N);
1464 
1465   CheckDI(N.getLine() || !N.getColumn(),
1466           "cannot have column info without line info", &N);
1467 }
1468 
1469 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1470   visitDILexicalBlockBase(N);
1471 }
1472 
1473 void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1474   CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1475   if (auto *S = N.getRawScope())
1476     CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1477   if (auto *S = N.getRawDecl())
1478     CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1479 }
1480 
1481 void Verifier::visitDINamespace(const DINamespace &N) {
1482   CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1483   if (auto *S = N.getRawScope())
1484     CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1485 }
1486 
1487 void Verifier::visitDIMacro(const DIMacro &N) {
1488   CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1489               N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1490           "invalid macinfo type", &N);
1491   CheckDI(!N.getName().empty(), "anonymous macro", &N);
1492   if (!N.getValue().empty()) {
1493     assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1494   }
1495 }
1496 
1497 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1498   CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1499           "invalid macinfo type", &N);
1500   if (auto *F = N.getRawFile())
1501     CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1502 
1503   if (auto *Array = N.getRawElements()) {
1504     CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1505     for (Metadata *Op : N.getElements()->operands()) {
1506       CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1507     }
1508   }
1509 }
1510 
1511 void Verifier::visitDIArgList(const DIArgList &N) {
1512   CheckDI(!N.getNumOperands(),
1513           "DIArgList should have no operands other than a list of "
1514           "ValueAsMetadata",
1515           &N);
1516 }
1517 
1518 void Verifier::visitDIModule(const DIModule &N) {
1519   CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1520   CheckDI(!N.getName().empty(), "anonymous module", &N);
1521 }
1522 
1523 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1524   CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1525 }
1526 
1527 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1528   visitDITemplateParameter(N);
1529 
1530   CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1531           &N);
1532 }
1533 
1534 void Verifier::visitDITemplateValueParameter(
1535     const DITemplateValueParameter &N) {
1536   visitDITemplateParameter(N);
1537 
1538   CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1539               N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1540               N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1541           "invalid tag", &N);
1542 }
1543 
1544 void Verifier::visitDIVariable(const DIVariable &N) {
1545   if (auto *S = N.getRawScope())
1546     CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1547   if (auto *F = N.getRawFile())
1548     CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1549 }
1550 
1551 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1552   // Checks common to all variables.
1553   visitDIVariable(N);
1554 
1555   CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1556   CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1557   // Check only if the global variable is not an extern
1558   if (N.isDefinition())
1559     CheckDI(N.getType(), "missing global variable type", &N);
1560   if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1561     CheckDI(isa<DIDerivedType>(Member),
1562             "invalid static data member declaration", &N, Member);
1563   }
1564 }
1565 
1566 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1567   // Checks common to all variables.
1568   visitDIVariable(N);
1569 
1570   CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1571   CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1572   CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1573           "local variable requires a valid scope", &N, N.getRawScope());
1574   if (auto Ty = N.getType())
1575     CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1576 }
1577 
1578 void Verifier::visitDIAssignID(const DIAssignID &N) {
1579   CheckDI(!N.getNumOperands(), "DIAssignID has no arguments", &N);
1580   CheckDI(N.isDistinct(), "DIAssignID must be distinct", &N);
1581 }
1582 
1583 void Verifier::visitDILabel(const DILabel &N) {
1584   if (auto *S = N.getRawScope())
1585     CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
1586   if (auto *F = N.getRawFile())
1587     CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1588 
1589   CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1590   CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1591           "label requires a valid scope", &N, N.getRawScope());
1592 }
1593 
1594 void Verifier::visitDIExpression(const DIExpression &N) {
1595   CheckDI(N.isValid(), "invalid expression", &N);
1596 }
1597 
1598 void Verifier::visitDIGlobalVariableExpression(
1599     const DIGlobalVariableExpression &GVE) {
1600   CheckDI(GVE.getVariable(), "missing variable");
1601   if (auto *Var = GVE.getVariable())
1602     visitDIGlobalVariable(*Var);
1603   if (auto *Expr = GVE.getExpression()) {
1604     visitDIExpression(*Expr);
1605     if (auto Fragment = Expr->getFragmentInfo())
1606       verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1607   }
1608 }
1609 
1610 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1611   CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1612   if (auto *T = N.getRawType())
1613     CheckDI(isType(T), "invalid type ref", &N, T);
1614   if (auto *F = N.getRawFile())
1615     CheckDI(isa<DIFile>(F), "invalid file", &N, F);
1616 }
1617 
1618 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1619   CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1620               N.getTag() == dwarf::DW_TAG_imported_declaration,
1621           "invalid tag", &N);
1622   if (auto *S = N.getRawScope())
1623     CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1624   CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1625           N.getRawEntity());
1626 }
1627 
1628 void Verifier::visitComdat(const Comdat &C) {
1629   // In COFF the Module is invalid if the GlobalValue has private linkage.
1630   // Entities with private linkage don't have entries in the symbol table.
1631   if (TT.isOSBinFormatCOFF())
1632     if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1633       Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1634             GV);
1635 }
1636 
1637 void Verifier::visitModuleIdents() {
1638   const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1639   if (!Idents)
1640     return;
1641 
1642   // llvm.ident takes a list of metadata entry. Each entry has only one string.
1643   // Scan each llvm.ident entry and make sure that this requirement is met.
1644   for (const MDNode *N : Idents->operands()) {
1645     Check(N->getNumOperands() == 1,
1646           "incorrect number of operands in llvm.ident metadata", N);
1647     Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1648           ("invalid value for llvm.ident metadata entry operand"
1649            "(the operand should be a string)"),
1650           N->getOperand(0));
1651   }
1652 }
1653 
1654 void Verifier::visitModuleCommandLines() {
1655   const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1656   if (!CommandLines)
1657     return;
1658 
1659   // llvm.commandline takes a list of metadata entry. Each entry has only one
1660   // string. Scan each llvm.commandline entry and make sure that this
1661   // requirement is met.
1662   for (const MDNode *N : CommandLines->operands()) {
1663     Check(N->getNumOperands() == 1,
1664           "incorrect number of operands in llvm.commandline metadata", N);
1665     Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
1666           ("invalid value for llvm.commandline metadata entry operand"
1667            "(the operand should be a string)"),
1668           N->getOperand(0));
1669   }
1670 }
1671 
1672 void Verifier::visitModuleFlags() {
1673   const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1674   if (!Flags) return;
1675 
1676   // Scan each flag, and track the flags and requirements.
1677   DenseMap<const MDString*, const MDNode*> SeenIDs;
1678   SmallVector<const MDNode*, 16> Requirements;
1679   for (const MDNode *MDN : Flags->operands())
1680     visitModuleFlag(MDN, SeenIDs, Requirements);
1681 
1682   // Validate that the requirements in the module are valid.
1683   for (const MDNode *Requirement : Requirements) {
1684     const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1685     const Metadata *ReqValue = Requirement->getOperand(1);
1686 
1687     const MDNode *Op = SeenIDs.lookup(Flag);
1688     if (!Op) {
1689       CheckFailed("invalid requirement on flag, flag is not present in module",
1690                   Flag);
1691       continue;
1692     }
1693 
1694     if (Op->getOperand(2) != ReqValue) {
1695       CheckFailed(("invalid requirement on flag, "
1696                    "flag does not have the required value"),
1697                   Flag);
1698       continue;
1699     }
1700   }
1701 }
1702 
1703 void
1704 Verifier::visitModuleFlag(const MDNode *Op,
1705                           DenseMap<const MDString *, const MDNode *> &SeenIDs,
1706                           SmallVectorImpl<const MDNode *> &Requirements) {
1707   // Each module flag should have three arguments, the merge behavior (a
1708   // constant int), the flag ID (an MDString), and the value.
1709   Check(Op->getNumOperands() == 3,
1710         "incorrect number of operands in module flag", Op);
1711   Module::ModFlagBehavior MFB;
1712   if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1713     Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1714           "invalid behavior operand in module flag (expected constant integer)",
1715           Op->getOperand(0));
1716     Check(false,
1717           "invalid behavior operand in module flag (unexpected constant)",
1718           Op->getOperand(0));
1719   }
1720   MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1721   Check(ID, "invalid ID operand in module flag (expected metadata string)",
1722         Op->getOperand(1));
1723 
1724   // Check the values for behaviors with additional requirements.
1725   switch (MFB) {
1726   case Module::Error:
1727   case Module::Warning:
1728   case Module::Override:
1729     // These behavior types accept any value.
1730     break;
1731 
1732   case Module::Min: {
1733     auto *V = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1734     Check(V && V->getValue().isNonNegative(),
1735           "invalid value for 'min' module flag (expected constant non-negative "
1736           "integer)",
1737           Op->getOperand(2));
1738     break;
1739   }
1740 
1741   case Module::Max: {
1742     Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1743           "invalid value for 'max' module flag (expected constant integer)",
1744           Op->getOperand(2));
1745     break;
1746   }
1747 
1748   case Module::Require: {
1749     // The value should itself be an MDNode with two operands, a flag ID (an
1750     // MDString), and a value.
1751     MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1752     Check(Value && Value->getNumOperands() == 2,
1753           "invalid value for 'require' module flag (expected metadata pair)",
1754           Op->getOperand(2));
1755     Check(isa<MDString>(Value->getOperand(0)),
1756           ("invalid value for 'require' module flag "
1757            "(first value operand should be a string)"),
1758           Value->getOperand(0));
1759 
1760     // Append it to the list of requirements, to check once all module flags are
1761     // scanned.
1762     Requirements.push_back(Value);
1763     break;
1764   }
1765 
1766   case Module::Append:
1767   case Module::AppendUnique: {
1768     // These behavior types require the operand be an MDNode.
1769     Check(isa<MDNode>(Op->getOperand(2)),
1770           "invalid value for 'append'-type module flag "
1771           "(expected a metadata node)",
1772           Op->getOperand(2));
1773     break;
1774   }
1775   }
1776 
1777   // Unless this is a "requires" flag, check the ID is unique.
1778   if (MFB != Module::Require) {
1779     bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1780     Check(Inserted,
1781           "module flag identifiers must be unique (or of 'require' type)", ID);
1782   }
1783 
1784   if (ID->getString() == "wchar_size") {
1785     ConstantInt *Value
1786       = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1787     Check(Value, "wchar_size metadata requires constant integer argument");
1788   }
1789 
1790   if (ID->getString() == "Linker Options") {
1791     // If the llvm.linker.options named metadata exists, we assume that the
1792     // bitcode reader has upgraded the module flag. Otherwise the flag might
1793     // have been created by a client directly.
1794     Check(M.getNamedMetadata("llvm.linker.options"),
1795           "'Linker Options' named metadata no longer supported");
1796   }
1797 
1798   if (ID->getString() == "SemanticInterposition") {
1799     ConstantInt *Value =
1800         mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1801     Check(Value,
1802           "SemanticInterposition metadata requires constant integer argument");
1803   }
1804 
1805   if (ID->getString() == "CG Profile") {
1806     for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1807       visitModuleFlagCGProfileEntry(MDO);
1808   }
1809 }
1810 
1811 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1812   auto CheckFunction = [&](const MDOperand &FuncMDO) {
1813     if (!FuncMDO)
1814       return;
1815     auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1816     Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
1817           "expected a Function or null", FuncMDO);
1818   };
1819   auto Node = dyn_cast_or_null<MDNode>(MDO);
1820   Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1821   CheckFunction(Node->getOperand(0));
1822   CheckFunction(Node->getOperand(1));
1823   auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1824   Check(Count && Count->getType()->isIntegerTy(),
1825         "expected an integer constant", Node->getOperand(2));
1826 }
1827 
1828 void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
1829   for (Attribute A : Attrs) {
1830 
1831     if (A.isStringAttribute()) {
1832 #define GET_ATTR_NAMES
1833 #define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
1834 #define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME)                             \
1835   if (A.getKindAsString() == #DISPLAY_NAME) {                                  \
1836     auto V = A.getValueAsString();                                             \
1837     if (!(V.empty() || V == "true" || V == "false"))                           \
1838       CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V +    \
1839                   "");                                                         \
1840   }
1841 
1842 #include "llvm/IR/Attributes.inc"
1843       continue;
1844     }
1845 
1846     if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
1847       CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1848                   V);
1849       return;
1850     }
1851   }
1852 }
1853 
1854 // VerifyParameterAttrs - Check the given attributes for an argument or return
1855 // value of the specified type.  The value V is printed in error messages.
1856 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1857                                     const Value *V) {
1858   if (!Attrs.hasAttributes())
1859     return;
1860 
1861   verifyAttributeTypes(Attrs, V);
1862 
1863   for (Attribute Attr : Attrs)
1864     Check(Attr.isStringAttribute() ||
1865               Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
1866           "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
1867           V);
1868 
1869   if (Attrs.hasAttribute(Attribute::ImmArg)) {
1870     Check(Attrs.getNumAttributes() == 1,
1871           "Attribute 'immarg' is incompatible with other attributes", V);
1872   }
1873 
1874   // Check for mutually incompatible attributes.  Only inreg is compatible with
1875   // sret.
1876   unsigned AttrCount = 0;
1877   AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1878   AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1879   AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1880   AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1881                Attrs.hasAttribute(Attribute::InReg);
1882   AttrCount += Attrs.hasAttribute(Attribute::Nest);
1883   AttrCount += Attrs.hasAttribute(Attribute::ByRef);
1884   Check(AttrCount <= 1,
1885         "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1886         "'byref', and 'sret' are incompatible!",
1887         V);
1888 
1889   Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1890           Attrs.hasAttribute(Attribute::ReadOnly)),
1891         "Attributes "
1892         "'inalloca and readonly' are incompatible!",
1893         V);
1894 
1895   Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
1896           Attrs.hasAttribute(Attribute::Returned)),
1897         "Attributes "
1898         "'sret and returned' are incompatible!",
1899         V);
1900 
1901   Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
1902           Attrs.hasAttribute(Attribute::SExt)),
1903         "Attributes "
1904         "'zeroext and signext' are incompatible!",
1905         V);
1906 
1907   Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1908           Attrs.hasAttribute(Attribute::ReadOnly)),
1909         "Attributes "
1910         "'readnone and readonly' are incompatible!",
1911         V);
1912 
1913   Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1914           Attrs.hasAttribute(Attribute::WriteOnly)),
1915         "Attributes "
1916         "'readnone and writeonly' are incompatible!",
1917         V);
1918 
1919   Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1920           Attrs.hasAttribute(Attribute::WriteOnly)),
1921         "Attributes "
1922         "'readonly and writeonly' are incompatible!",
1923         V);
1924 
1925   Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
1926           Attrs.hasAttribute(Attribute::AlwaysInline)),
1927         "Attributes "
1928         "'noinline and alwaysinline' are incompatible!",
1929         V);
1930 
1931   AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1932   for (Attribute Attr : Attrs) {
1933     if (!Attr.isStringAttribute() &&
1934         IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
1935       CheckFailed("Attribute '" + Attr.getAsString() +
1936                   "' applied to incompatible type!", V);
1937       return;
1938     }
1939   }
1940 
1941   if (isa<PointerType>(Ty)) {
1942     if (Attrs.hasAttribute(Attribute::ByVal)) {
1943       if (Attrs.hasAttribute(Attribute::Alignment)) {
1944         Align AttrAlign = Attrs.getAlignment().valueOrOne();
1945         Align MaxAlign(ParamMaxAlignment);
1946         Check(AttrAlign <= MaxAlign,
1947               "Attribute 'align' exceed the max size 2^14", V);
1948       }
1949       SmallPtrSet<Type *, 4> Visited;
1950       Check(Attrs.getByValType()->isSized(&Visited),
1951             "Attribute 'byval' does not support unsized types!", V);
1952     }
1953     if (Attrs.hasAttribute(Attribute::ByRef)) {
1954       SmallPtrSet<Type *, 4> Visited;
1955       Check(Attrs.getByRefType()->isSized(&Visited),
1956             "Attribute 'byref' does not support unsized types!", V);
1957     }
1958     if (Attrs.hasAttribute(Attribute::InAlloca)) {
1959       SmallPtrSet<Type *, 4> Visited;
1960       Check(Attrs.getInAllocaType()->isSized(&Visited),
1961             "Attribute 'inalloca' does not support unsized types!", V);
1962     }
1963     if (Attrs.hasAttribute(Attribute::Preallocated)) {
1964       SmallPtrSet<Type *, 4> Visited;
1965       Check(Attrs.getPreallocatedType()->isSized(&Visited),
1966             "Attribute 'preallocated' does not support unsized types!", V);
1967     }
1968   }
1969 
1970   if (Attrs.hasAttribute(Attribute::NoFPClass)) {
1971     uint64_t Val = Attrs.getAttribute(Attribute::NoFPClass).getValueAsInt();
1972     Check(Val != 0, "Attribute 'nofpclass' must have at least one test bit set",
1973           V);
1974     Check((Val & ~static_cast<unsigned>(fcAllFlags)) == 0,
1975           "Invalid value for 'nofpclass' test mask", V);
1976   }
1977 }
1978 
1979 void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
1980                                             const Value *V) {
1981   if (Attrs.hasFnAttr(Attr)) {
1982     StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
1983     unsigned N;
1984     if (S.getAsInteger(10, N))
1985       CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
1986   }
1987 }
1988 
1989 // Check parameter attributes against a function type.
1990 // The value V is printed in error messages.
1991 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1992                                    const Value *V, bool IsIntrinsic,
1993                                    bool IsInlineAsm) {
1994   if (Attrs.isEmpty())
1995     return;
1996 
1997   if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
1998     Check(Attrs.hasParentContext(Context),
1999           "Attribute list does not match Module context!", &Attrs, V);
2000     for (const auto &AttrSet : Attrs) {
2001       Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
2002             "Attribute set does not match Module context!", &AttrSet, V);
2003       for (const auto &A : AttrSet) {
2004         Check(A.hasParentContext(Context),
2005               "Attribute does not match Module context!", &A, V);
2006       }
2007     }
2008   }
2009 
2010   bool SawNest = false;
2011   bool SawReturned = false;
2012   bool SawSRet = false;
2013   bool SawSwiftSelf = false;
2014   bool SawSwiftAsync = false;
2015   bool SawSwiftError = false;
2016 
2017   // Verify return value attributes.
2018   AttributeSet RetAttrs = Attrs.getRetAttrs();
2019   for (Attribute RetAttr : RetAttrs)
2020     Check(RetAttr.isStringAttribute() ||
2021               Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
2022           "Attribute '" + RetAttr.getAsString() +
2023               "' does not apply to function return values",
2024           V);
2025 
2026   verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
2027 
2028   // Verify parameter attributes.
2029   for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
2030     Type *Ty = FT->getParamType(i);
2031     AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
2032 
2033     if (!IsIntrinsic) {
2034       Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
2035             "immarg attribute only applies to intrinsics", V);
2036       if (!IsInlineAsm)
2037         Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
2038               "Attribute 'elementtype' can only be applied to intrinsics"
2039               " and inline asm.",
2040               V);
2041     }
2042 
2043     verifyParameterAttrs(ArgAttrs, Ty, V);
2044 
2045     if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2046       Check(!SawNest, "More than one parameter has attribute nest!", V);
2047       SawNest = true;
2048     }
2049 
2050     if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2051       Check(!SawReturned, "More than one parameter has attribute returned!", V);
2052       Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
2053             "Incompatible argument and return types for 'returned' attribute",
2054             V);
2055       SawReturned = true;
2056     }
2057 
2058     if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
2059       Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
2060       Check(i == 0 || i == 1,
2061             "Attribute 'sret' is not on first or second parameter!", V);
2062       SawSRet = true;
2063     }
2064 
2065     if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
2066       Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
2067       SawSwiftSelf = true;
2068     }
2069 
2070     if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
2071       Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
2072       SawSwiftAsync = true;
2073     }
2074 
2075     if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
2076       Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
2077       SawSwiftError = true;
2078     }
2079 
2080     if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
2081       Check(i == FT->getNumParams() - 1,
2082             "inalloca isn't on the last parameter!", V);
2083     }
2084   }
2085 
2086   if (!Attrs.hasFnAttrs())
2087     return;
2088 
2089   verifyAttributeTypes(Attrs.getFnAttrs(), V);
2090   for (Attribute FnAttr : Attrs.getFnAttrs())
2091     Check(FnAttr.isStringAttribute() ||
2092               Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
2093           "Attribute '" + FnAttr.getAsString() +
2094               "' does not apply to functions!",
2095           V);
2096 
2097   Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
2098           Attrs.hasFnAttr(Attribute::AlwaysInline)),
2099         "Attributes 'noinline and alwaysinline' are incompatible!", V);
2100 
2101   if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
2102     Check(Attrs.hasFnAttr(Attribute::NoInline),
2103           "Attribute 'optnone' requires 'noinline'!", V);
2104 
2105     Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
2106           "Attributes 'optsize and optnone' are incompatible!", V);
2107 
2108     Check(!Attrs.hasFnAttr(Attribute::MinSize),
2109           "Attributes 'minsize and optnone' are incompatible!", V);
2110   }
2111 
2112   if (Attrs.hasFnAttr("aarch64_pstate_sm_enabled")) {
2113     Check(!Attrs.hasFnAttr("aarch64_pstate_sm_compatible"),
2114            "Attributes 'aarch64_pstate_sm_enabled and "
2115            "aarch64_pstate_sm_compatible' are incompatible!",
2116            V);
2117   }
2118 
2119   if (Attrs.hasFnAttr("aarch64_pstate_za_new")) {
2120     Check(!Attrs.hasFnAttr("aarch64_pstate_za_preserved"),
2121            "Attributes 'aarch64_pstate_za_new and aarch64_pstate_za_preserved' "
2122            "are incompatible!",
2123            V);
2124 
2125     Check(!Attrs.hasFnAttr("aarch64_pstate_za_shared"),
2126            "Attributes 'aarch64_pstate_za_new and aarch64_pstate_za_shared' "
2127            "are incompatible!",
2128            V);
2129   }
2130 
2131   if (Attrs.hasFnAttr(Attribute::JumpTable)) {
2132     const GlobalValue *GV = cast<GlobalValue>(V);
2133     Check(GV->hasGlobalUnnamedAddr(),
2134           "Attribute 'jumptable' requires 'unnamed_addr'", V);
2135   }
2136 
2137   if (auto Args = Attrs.getFnAttrs().getAllocSizeArgs()) {
2138     auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
2139       if (ParamNo >= FT->getNumParams()) {
2140         CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
2141         return false;
2142       }
2143 
2144       if (!FT->getParamType(ParamNo)->isIntegerTy()) {
2145         CheckFailed("'allocsize' " + Name +
2146                         " argument must refer to an integer parameter",
2147                     V);
2148         return false;
2149       }
2150 
2151       return true;
2152     };
2153 
2154     if (!CheckParam("element size", Args->first))
2155       return;
2156 
2157     if (Args->second && !CheckParam("number of elements", *Args->second))
2158       return;
2159   }
2160 
2161   if (Attrs.hasFnAttr(Attribute::AllocKind)) {
2162     AllocFnKind K = Attrs.getAllocKind();
2163     AllocFnKind Type =
2164         K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
2165     if (!is_contained(
2166             {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
2167             Type))
2168       CheckFailed(
2169           "'allockind()' requires exactly one of alloc, realloc, and free");
2170     if ((Type == AllocFnKind::Free) &&
2171         ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
2172                AllocFnKind::Aligned)) != AllocFnKind::Unknown))
2173       CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
2174                   "or aligned modifiers.");
2175     AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
2176     if ((K & ZeroedUninit) == ZeroedUninit)
2177       CheckFailed("'allockind()' can't be both zeroed and uninitialized");
2178   }
2179 
2180   if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
2181     unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
2182     if (VScaleMin == 0)
2183       CheckFailed("'vscale_range' minimum must be greater than 0", V);
2184     else if (!isPowerOf2_32(VScaleMin))
2185       CheckFailed("'vscale_range' minimum must be power-of-two value", V);
2186     std::optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
2187     if (VScaleMax && VScaleMin > VScaleMax)
2188       CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
2189     else if (VScaleMax && !isPowerOf2_32(*VScaleMax))
2190       CheckFailed("'vscale_range' maximum must be power-of-two value", V);
2191   }
2192 
2193   if (Attrs.hasFnAttr("frame-pointer")) {
2194     StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
2195     if (FP != "all" && FP != "non-leaf" && FP != "none")
2196       CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
2197   }
2198 
2199   checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
2200   checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
2201   checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
2202 }
2203 
2204 void Verifier::verifyFunctionMetadata(
2205     ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
2206   for (const auto &Pair : MDs) {
2207     if (Pair.first == LLVMContext::MD_prof) {
2208       MDNode *MD = Pair.second;
2209       Check(MD->getNumOperands() >= 2,
2210             "!prof annotations should have no less than 2 operands", MD);
2211 
2212       // Check first operand.
2213       Check(MD->getOperand(0) != nullptr, "first operand should not be null",
2214             MD);
2215       Check(isa<MDString>(MD->getOperand(0)),
2216             "expected string with name of the !prof annotation", MD);
2217       MDString *MDS = cast<MDString>(MD->getOperand(0));
2218       StringRef ProfName = MDS->getString();
2219       Check(ProfName.equals("function_entry_count") ||
2220                 ProfName.equals("synthetic_function_entry_count"),
2221             "first operand should be 'function_entry_count'"
2222             " or 'synthetic_function_entry_count'",
2223             MD);
2224 
2225       // Check second operand.
2226       Check(MD->getOperand(1) != nullptr, "second operand should not be null",
2227             MD);
2228       Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
2229             "expected integer argument to function_entry_count", MD);
2230     } else if (Pair.first == LLVMContext::MD_kcfi_type) {
2231       MDNode *MD = Pair.second;
2232       Check(MD->getNumOperands() == 1,
2233             "!kcfi_type must have exactly one operand", MD);
2234       Check(MD->getOperand(0) != nullptr, "!kcfi_type operand must not be null",
2235             MD);
2236       Check(isa<ConstantAsMetadata>(MD->getOperand(0)),
2237             "expected a constant operand for !kcfi_type", MD);
2238       Constant *C = cast<ConstantAsMetadata>(MD->getOperand(0))->getValue();
2239       Check(isa<ConstantInt>(C),
2240             "expected a constant integer operand for !kcfi_type", MD);
2241       IntegerType *Type = cast<ConstantInt>(C)->getType();
2242       Check(Type->getBitWidth() == 32,
2243             "expected a 32-bit integer constant operand for !kcfi_type", MD);
2244     }
2245   }
2246 }
2247 
2248 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
2249   if (!ConstantExprVisited.insert(EntryC).second)
2250     return;
2251 
2252   SmallVector<const Constant *, 16> Stack;
2253   Stack.push_back(EntryC);
2254 
2255   while (!Stack.empty()) {
2256     const Constant *C = Stack.pop_back_val();
2257 
2258     // Check this constant expression.
2259     if (const auto *CE = dyn_cast<ConstantExpr>(C))
2260       visitConstantExpr(CE);
2261 
2262     if (const auto *GV = dyn_cast<GlobalValue>(C)) {
2263       // Global Values get visited separately, but we do need to make sure
2264       // that the global value is in the correct module
2265       Check(GV->getParent() == &M, "Referencing global in another module!",
2266             EntryC, &M, GV, GV->getParent());
2267       continue;
2268     }
2269 
2270     // Visit all sub-expressions.
2271     for (const Use &U : C->operands()) {
2272       const auto *OpC = dyn_cast<Constant>(U);
2273       if (!OpC)
2274         continue;
2275       if (!ConstantExprVisited.insert(OpC).second)
2276         continue;
2277       Stack.push_back(OpC);
2278     }
2279   }
2280 }
2281 
2282 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2283   if (CE->getOpcode() == Instruction::BitCast)
2284     Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2285                                 CE->getType()),
2286           "Invalid bitcast", CE);
2287 }
2288 
2289 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2290   // There shouldn't be more attribute sets than there are parameters plus the
2291   // function and return value.
2292   return Attrs.getNumAttrSets() <= Params + 2;
2293 }
2294 
2295 void Verifier::verifyInlineAsmCall(const CallBase &Call) {
2296   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
2297   unsigned ArgNo = 0;
2298   unsigned LabelNo = 0;
2299   for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
2300     if (CI.Type == InlineAsm::isLabel) {
2301       ++LabelNo;
2302       continue;
2303     }
2304 
2305     // Only deal with constraints that correspond to call arguments.
2306     if (!CI.hasArg())
2307       continue;
2308 
2309     if (CI.isIndirect) {
2310       const Value *Arg = Call.getArgOperand(ArgNo);
2311       Check(Arg->getType()->isPointerTy(),
2312             "Operand for indirect constraint must have pointer type", &Call);
2313 
2314       Check(Call.getParamElementType(ArgNo),
2315             "Operand for indirect constraint must have elementtype attribute",
2316             &Call);
2317     } else {
2318       Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
2319             "Elementtype attribute can only be applied for indirect "
2320             "constraints",
2321             &Call);
2322     }
2323 
2324     ArgNo++;
2325   }
2326 
2327   if (auto *CallBr = dyn_cast<CallBrInst>(&Call)) {
2328     Check(LabelNo == CallBr->getNumIndirectDests(),
2329           "Number of label constraints does not match number of callbr dests",
2330           &Call);
2331   } else {
2332     Check(LabelNo == 0, "Label constraints can only be used with callbr",
2333           &Call);
2334   }
2335 }
2336 
2337 /// Verify that statepoint intrinsic is well formed.
2338 void Verifier::verifyStatepoint(const CallBase &Call) {
2339   assert(Call.getCalledFunction() &&
2340          Call.getCalledFunction()->getIntrinsicID() ==
2341              Intrinsic::experimental_gc_statepoint);
2342 
2343   Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2344             !Call.onlyAccessesArgMemory(),
2345         "gc.statepoint must read and write all memory to preserve "
2346         "reordering restrictions required by safepoint semantics",
2347         Call);
2348 
2349   const int64_t NumPatchBytes =
2350       cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2351   assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2352   Check(NumPatchBytes >= 0,
2353         "gc.statepoint number of patchable bytes must be "
2354         "positive",
2355         Call);
2356 
2357   Type *TargetElemType = Call.getParamElementType(2);
2358   Check(TargetElemType,
2359         "gc.statepoint callee argument must have elementtype attribute", Call);
2360   FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
2361   Check(TargetFuncType,
2362         "gc.statepoint callee elementtype must be function type", Call);
2363 
2364   const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2365   Check(NumCallArgs >= 0,
2366         "gc.statepoint number of arguments to underlying call "
2367         "must be positive",
2368         Call);
2369   const int NumParams = (int)TargetFuncType->getNumParams();
2370   if (TargetFuncType->isVarArg()) {
2371     Check(NumCallArgs >= NumParams,
2372           "gc.statepoint mismatch in number of vararg call args", Call);
2373 
2374     // TODO: Remove this limitation
2375     Check(TargetFuncType->getReturnType()->isVoidTy(),
2376           "gc.statepoint doesn't support wrapping non-void "
2377           "vararg functions yet",
2378           Call);
2379   } else
2380     Check(NumCallArgs == NumParams,
2381           "gc.statepoint mismatch in number of call args", Call);
2382 
2383   const uint64_t Flags
2384     = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2385   Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2386         "unknown flag used in gc.statepoint flags argument", Call);
2387 
2388   // Verify that the types of the call parameter arguments match
2389   // the type of the wrapped callee.
2390   AttributeList Attrs = Call.getAttributes();
2391   for (int i = 0; i < NumParams; i++) {
2392     Type *ParamType = TargetFuncType->getParamType(i);
2393     Type *ArgType = Call.getArgOperand(5 + i)->getType();
2394     Check(ArgType == ParamType,
2395           "gc.statepoint call argument does not match wrapped "
2396           "function type",
2397           Call);
2398 
2399     if (TargetFuncType->isVarArg()) {
2400       AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
2401       Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
2402             "Attribute 'sret' cannot be used for vararg call arguments!", Call);
2403     }
2404   }
2405 
2406   const int EndCallArgsInx = 4 + NumCallArgs;
2407 
2408   const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2409   Check(isa<ConstantInt>(NumTransitionArgsV),
2410         "gc.statepoint number of transition arguments "
2411         "must be constant integer",
2412         Call);
2413   const int NumTransitionArgs =
2414       cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2415   Check(NumTransitionArgs == 0,
2416         "gc.statepoint w/inline transition bundle is deprecated", Call);
2417   const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2418 
2419   const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2420   Check(isa<ConstantInt>(NumDeoptArgsV),
2421         "gc.statepoint number of deoptimization arguments "
2422         "must be constant integer",
2423         Call);
2424   const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2425   Check(NumDeoptArgs == 0,
2426         "gc.statepoint w/inline deopt operands is deprecated", Call);
2427 
2428   const int ExpectedNumArgs = 7 + NumCallArgs;
2429   Check(ExpectedNumArgs == (int)Call.arg_size(),
2430         "gc.statepoint too many arguments", Call);
2431 
2432   // Check that the only uses of this gc.statepoint are gc.result or
2433   // gc.relocate calls which are tied to this statepoint and thus part
2434   // of the same statepoint sequence
2435   for (const User *U : Call.users()) {
2436     const CallInst *UserCall = dyn_cast<const CallInst>(U);
2437     Check(UserCall, "illegal use of statepoint token", Call, U);
2438     if (!UserCall)
2439       continue;
2440     Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2441           "gc.result or gc.relocate are the only value uses "
2442           "of a gc.statepoint",
2443           Call, U);
2444     if (isa<GCResultInst>(UserCall)) {
2445       Check(UserCall->getArgOperand(0) == &Call,
2446             "gc.result connected to wrong gc.statepoint", Call, UserCall);
2447     } else if (isa<GCRelocateInst>(Call)) {
2448       Check(UserCall->getArgOperand(0) == &Call,
2449             "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2450     }
2451   }
2452 
2453   // Note: It is legal for a single derived pointer to be listed multiple
2454   // times.  It's non-optimal, but it is legal.  It can also happen after
2455   // insertion if we strip a bitcast away.
2456   // Note: It is really tempting to check that each base is relocated and
2457   // that a derived pointer is never reused as a base pointer.  This turns
2458   // out to be problematic since optimizations run after safepoint insertion
2459   // can recognize equality properties that the insertion logic doesn't know
2460   // about.  See example statepoint.ll in the verifier subdirectory
2461 }
2462 
2463 void Verifier::verifyFrameRecoverIndices() {
2464   for (auto &Counts : FrameEscapeInfo) {
2465     Function *F = Counts.first;
2466     unsigned EscapedObjectCount = Counts.second.first;
2467     unsigned MaxRecoveredIndex = Counts.second.second;
2468     Check(MaxRecoveredIndex <= EscapedObjectCount,
2469           "all indices passed to llvm.localrecover must be less than the "
2470           "number of arguments passed to llvm.localescape in the parent "
2471           "function",
2472           F);
2473   }
2474 }
2475 
2476 static Instruction *getSuccPad(Instruction *Terminator) {
2477   BasicBlock *UnwindDest;
2478   if (auto *II = dyn_cast<InvokeInst>(Terminator))
2479     UnwindDest = II->getUnwindDest();
2480   else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2481     UnwindDest = CSI->getUnwindDest();
2482   else
2483     UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2484   return UnwindDest->getFirstNonPHI();
2485 }
2486 
2487 void Verifier::verifySiblingFuncletUnwinds() {
2488   SmallPtrSet<Instruction *, 8> Visited;
2489   SmallPtrSet<Instruction *, 8> Active;
2490   for (const auto &Pair : SiblingFuncletInfo) {
2491     Instruction *PredPad = Pair.first;
2492     if (Visited.count(PredPad))
2493       continue;
2494     Active.insert(PredPad);
2495     Instruction *Terminator = Pair.second;
2496     do {
2497       Instruction *SuccPad = getSuccPad(Terminator);
2498       if (Active.count(SuccPad)) {
2499         // Found a cycle; report error
2500         Instruction *CyclePad = SuccPad;
2501         SmallVector<Instruction *, 8> CycleNodes;
2502         do {
2503           CycleNodes.push_back(CyclePad);
2504           Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2505           if (CycleTerminator != CyclePad)
2506             CycleNodes.push_back(CycleTerminator);
2507           CyclePad = getSuccPad(CycleTerminator);
2508         } while (CyclePad != SuccPad);
2509         Check(false, "EH pads can't handle each other's exceptions",
2510               ArrayRef<Instruction *>(CycleNodes));
2511       }
2512       // Don't re-walk a node we've already checked
2513       if (!Visited.insert(SuccPad).second)
2514         break;
2515       // Walk to this successor if it has a map entry.
2516       PredPad = SuccPad;
2517       auto TermI = SiblingFuncletInfo.find(PredPad);
2518       if (TermI == SiblingFuncletInfo.end())
2519         break;
2520       Terminator = TermI->second;
2521       Active.insert(PredPad);
2522     } while (true);
2523     // Each node only has one successor, so we've walked all the active
2524     // nodes' successors.
2525     Active.clear();
2526   }
2527 }
2528 
2529 void Verifier::verifyConvergenceControl(Function &F) {
2530   DenseMap<BasicBlock *, SmallVector<CallBase *, 8>> LiveTokenMap;
2531   DenseMap<const Cycle *, const CallBase *> CycleHearts;
2532 
2533   // Just like the DominatorTree, compute the CycleInfo locally so that we
2534   // can run the verifier outside of a pass manager and we don't rely on
2535   // potentially out-dated analysis results.
2536   CycleInfo CI;
2537   CI.compute(F);
2538 
2539   auto checkBundle = [&](OperandBundleUse &Bundle, CallBase *CB,
2540                          SmallVectorImpl<CallBase *> &LiveTokens) {
2541     Check(Bundle.Inputs.size() == 1 && Bundle.Inputs[0]->getType()->isTokenTy(),
2542           "The 'convergencectrl' bundle requires exactly one token use.", CB);
2543 
2544     Value *Token = Bundle.Inputs[0].get();
2545     auto *Def = dyn_cast<CallBase>(Token);
2546     Check(Def != nullptr,
2547           "Convergence control tokens can only be produced by call "
2548           "instructions.",
2549           Token);
2550 
2551     Check(llvm::is_contained(LiveTokens, Token),
2552           "Convergence region is not well-nested.", Token, CB);
2553 
2554     while (LiveTokens.back() != Token)
2555       LiveTokens.pop_back();
2556 
2557     // Check static rules about cycles.
2558     auto *BB = CB->getParent();
2559     auto *BBCycle = CI.getCycle(BB);
2560     if (!BBCycle)
2561       return;
2562 
2563     BasicBlock *DefBB = Def->getParent();
2564     if (DefBB == BB || BBCycle->contains(DefBB)) {
2565       // degenerate occurrence of a loop intrinsic
2566       return;
2567     }
2568 
2569     auto *II = dyn_cast<IntrinsicInst>(CB);
2570     Check(II &&
2571               II->getIntrinsicID() == Intrinsic::experimental_convergence_loop,
2572           "Convergence token used by an instruction other than "
2573           "llvm.experimental.convergence.loop in a cycle that does "
2574           "not contain the token's definition.",
2575           CB, CI.print(BBCycle));
2576 
2577     while (true) {
2578       auto *Parent = BBCycle->getParentCycle();
2579       if (!Parent || Parent->contains(DefBB))
2580         break;
2581       BBCycle = Parent;
2582     };
2583 
2584     Check(BBCycle->isReducible() && BB == BBCycle->getHeader(),
2585           "Cycle heart must dominate all blocks in the cycle.", CB, BB,
2586           CI.print(BBCycle));
2587     Check(!CycleHearts.count(BBCycle),
2588           "Two static convergence token uses in a cycle that does "
2589           "not contain either token's definition.",
2590           CB, CycleHearts[BBCycle], CI.print(BBCycle));
2591     CycleHearts[BBCycle] = CB;
2592   };
2593 
2594   ReversePostOrderTraversal<Function *> RPOT(&F);
2595   SmallVector<CallBase *, 8> LiveTokens;
2596   for (BasicBlock *BB : RPOT) {
2597     LiveTokens.clear();
2598     auto LTIt = LiveTokenMap.find(BB);
2599     if (LTIt != LiveTokenMap.end()) {
2600       LiveTokens = std::move(LTIt->second);
2601       LiveTokenMap.erase(LTIt);
2602     }
2603 
2604     for (Instruction &I : *BB) {
2605       CallBase *CB = dyn_cast<CallBase>(&I);
2606       if (!CB)
2607         continue;
2608 
2609       auto Bundle = CB->getOperandBundle(LLVMContext::OB_convergencectrl);
2610       if (Bundle)
2611         checkBundle(*Bundle, CB, LiveTokens);
2612 
2613       if (CB->getType()->isTokenTy())
2614         LiveTokens.push_back(CB);
2615     }
2616 
2617     // Propagate token liveness
2618     for (BasicBlock *Succ : successors(BB)) {
2619       DomTreeNode *SuccNode = DT.getNode(Succ);
2620       LTIt = LiveTokenMap.find(Succ);
2621       if (LTIt == LiveTokenMap.end()) {
2622         // We're the first predecessor: all tokens which dominate the
2623         // successor are live for now.
2624         LTIt = LiveTokenMap.try_emplace(Succ).first;
2625         for (CallBase *LiveToken : LiveTokens) {
2626           if (!DT.dominates(DT.getNode(LiveToken->getParent()), SuccNode))
2627             break;
2628           LTIt->second.push_back(LiveToken);
2629         }
2630       } else {
2631         // Compute the intersection of live tokens.
2632         auto It = llvm::partition(LTIt->second, [&LiveTokens](CallBase *Token) {
2633           return llvm::is_contained(LiveTokens, Token);
2634         });
2635         LTIt->second.erase(It, LTIt->second.end());
2636       }
2637     }
2638   }
2639 }
2640 
2641 // visitFunction - Verify that a function is ok.
2642 //
2643 void Verifier::visitFunction(const Function &F) {
2644   visitGlobalValue(F);
2645 
2646   // Check function arguments.
2647   FunctionType *FT = F.getFunctionType();
2648   unsigned NumArgs = F.arg_size();
2649 
2650   Check(&Context == &F.getContext(),
2651         "Function context does not match Module context!", &F);
2652 
2653   Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2654   Check(FT->getNumParams() == NumArgs,
2655         "# formal arguments must match # of arguments for function type!", &F,
2656         FT);
2657   Check(F.getReturnType()->isFirstClassType() ||
2658             F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2659         "Functions cannot return aggregate values!", &F);
2660 
2661   Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2662         "Invalid struct return type!", &F);
2663 
2664   AttributeList Attrs = F.getAttributes();
2665 
2666   Check(verifyAttributeCount(Attrs, FT->getNumParams()),
2667         "Attribute after last parameter!", &F);
2668 
2669   bool IsIntrinsic = F.isIntrinsic();
2670 
2671   // Check function attributes.
2672   verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
2673 
2674   // On function declarations/definitions, we do not support the builtin
2675   // attribute. We do not check this in VerifyFunctionAttrs since that is
2676   // checking for Attributes that can/can not ever be on functions.
2677   Check(!Attrs.hasFnAttr(Attribute::Builtin),
2678         "Attribute 'builtin' can only be applied to a callsite.", &F);
2679 
2680   Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
2681         "Attribute 'elementtype' can only be applied to a callsite.", &F);
2682 
2683   // Check that this function meets the restrictions on this calling convention.
2684   // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2685   // restrictions can be lifted.
2686   switch (F.getCallingConv()) {
2687   default:
2688   case CallingConv::C:
2689     break;
2690   case CallingConv::X86_INTR: {
2691     Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
2692           "Calling convention parameter requires byval", &F);
2693     break;
2694   }
2695   case CallingConv::AMDGPU_KERNEL:
2696   case CallingConv::SPIR_KERNEL:
2697   case CallingConv::AMDGPU_CS_Chain:
2698   case CallingConv::AMDGPU_CS_ChainPreserve:
2699     Check(F.getReturnType()->isVoidTy(),
2700           "Calling convention requires void return type", &F);
2701     [[fallthrough]];
2702   case CallingConv::AMDGPU_VS:
2703   case CallingConv::AMDGPU_HS:
2704   case CallingConv::AMDGPU_GS:
2705   case CallingConv::AMDGPU_PS:
2706   case CallingConv::AMDGPU_CS:
2707     Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
2708     if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
2709       const unsigned StackAS = DL.getAllocaAddrSpace();
2710       unsigned i = 0;
2711       for (const Argument &Arg : F.args()) {
2712         Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
2713               "Calling convention disallows byval", &F);
2714         Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
2715               "Calling convention disallows preallocated", &F);
2716         Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
2717               "Calling convention disallows inalloca", &F);
2718 
2719         if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
2720           // FIXME: Should also disallow LDS and GDS, but we don't have the enum
2721           // value here.
2722           Check(Arg.getType()->getPointerAddressSpace() != StackAS,
2723                 "Calling convention disallows stack byref", &F);
2724         }
2725 
2726         ++i;
2727       }
2728     }
2729 
2730     [[fallthrough]];
2731   case CallingConv::Fast:
2732   case CallingConv::Cold:
2733   case CallingConv::Intel_OCL_BI:
2734   case CallingConv::PTX_Kernel:
2735   case CallingConv::PTX_Device:
2736     Check(!F.isVarArg(),
2737           "Calling convention does not support varargs or "
2738           "perfect forwarding!",
2739           &F);
2740     break;
2741   }
2742 
2743   // Check that the argument values match the function type for this function...
2744   unsigned i = 0;
2745   for (const Argument &Arg : F.args()) {
2746     Check(Arg.getType() == FT->getParamType(i),
2747           "Argument value does not match function argument type!", &Arg,
2748           FT->getParamType(i));
2749     Check(Arg.getType()->isFirstClassType(),
2750           "Function arguments must have first-class types!", &Arg);
2751     if (!IsIntrinsic) {
2752       Check(!Arg.getType()->isMetadataTy(),
2753             "Function takes metadata but isn't an intrinsic", &Arg, &F);
2754       Check(!Arg.getType()->isTokenTy(),
2755             "Function takes token but isn't an intrinsic", &Arg, &F);
2756       Check(!Arg.getType()->isX86_AMXTy(),
2757             "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
2758     }
2759 
2760     // Check that swifterror argument is only used by loads and stores.
2761     if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
2762       verifySwiftErrorValue(&Arg);
2763     }
2764     ++i;
2765   }
2766 
2767   if (!IsIntrinsic) {
2768     Check(!F.getReturnType()->isTokenTy(),
2769           "Function returns a token but isn't an intrinsic", &F);
2770     Check(!F.getReturnType()->isX86_AMXTy(),
2771           "Function returns a x86_amx but isn't an intrinsic", &F);
2772   }
2773 
2774   // Get the function metadata attachments.
2775   SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2776   F.getAllMetadata(MDs);
2777   assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2778   verifyFunctionMetadata(MDs);
2779 
2780   // Check validity of the personality function
2781   if (F.hasPersonalityFn()) {
2782     auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2783     if (Per)
2784       Check(Per->getParent() == F.getParent(),
2785             "Referencing personality function in another module!", &F,
2786             F.getParent(), Per, Per->getParent());
2787   }
2788 
2789   // EH funclet coloring can be expensive, recompute on-demand
2790   BlockEHFuncletColors.clear();
2791 
2792   if (F.isMaterializable()) {
2793     // Function has a body somewhere we can't see.
2794     Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2795           MDs.empty() ? nullptr : MDs.front().second);
2796   } else if (F.isDeclaration()) {
2797     for (const auto &I : MDs) {
2798       // This is used for call site debug information.
2799       CheckDI(I.first != LLVMContext::MD_dbg ||
2800                   !cast<DISubprogram>(I.second)->isDistinct(),
2801               "function declaration may only have a unique !dbg attachment",
2802               &F);
2803       Check(I.first != LLVMContext::MD_prof,
2804             "function declaration may not have a !prof attachment", &F);
2805 
2806       // Verify the metadata itself.
2807       visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2808     }
2809     Check(!F.hasPersonalityFn(),
2810           "Function declaration shouldn't have a personality routine", &F);
2811   } else {
2812     // Verify that this function (which has a body) is not named "llvm.*".  It
2813     // is not legal to define intrinsics.
2814     Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
2815 
2816     // Check the entry node
2817     const BasicBlock *Entry = &F.getEntryBlock();
2818     Check(pred_empty(Entry),
2819           "Entry block to function must not have predecessors!", Entry);
2820 
2821     // The address of the entry block cannot be taken, unless it is dead.
2822     if (Entry->hasAddressTaken()) {
2823       Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
2824             "blockaddress may not be used with the entry block!", Entry);
2825     }
2826 
2827     unsigned NumDebugAttachments = 0, NumProfAttachments = 0,
2828              NumKCFIAttachments = 0;
2829     // Visit metadata attachments.
2830     for (const auto &I : MDs) {
2831       // Verify that the attachment is legal.
2832       auto AllowLocs = AreDebugLocsAllowed::No;
2833       switch (I.first) {
2834       default:
2835         break;
2836       case LLVMContext::MD_dbg: {
2837         ++NumDebugAttachments;
2838         CheckDI(NumDebugAttachments == 1,
2839                 "function must have a single !dbg attachment", &F, I.second);
2840         CheckDI(isa<DISubprogram>(I.second),
2841                 "function !dbg attachment must be a subprogram", &F, I.second);
2842         CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
2843                 "function definition may only have a distinct !dbg attachment",
2844                 &F);
2845 
2846         auto *SP = cast<DISubprogram>(I.second);
2847         const Function *&AttachedTo = DISubprogramAttachments[SP];
2848         CheckDI(!AttachedTo || AttachedTo == &F,
2849                 "DISubprogram attached to more than one function", SP, &F);
2850         AttachedTo = &F;
2851         AllowLocs = AreDebugLocsAllowed::Yes;
2852         break;
2853       }
2854       case LLVMContext::MD_prof:
2855         ++NumProfAttachments;
2856         Check(NumProfAttachments == 1,
2857               "function must have a single !prof attachment", &F, I.second);
2858         break;
2859       case LLVMContext::MD_kcfi_type:
2860         ++NumKCFIAttachments;
2861         Check(NumKCFIAttachments == 1,
2862               "function must have a single !kcfi_type attachment", &F,
2863               I.second);
2864         break;
2865       }
2866 
2867       // Verify the metadata itself.
2868       visitMDNode(*I.second, AllowLocs);
2869     }
2870   }
2871 
2872   // If this function is actually an intrinsic, verify that it is only used in
2873   // direct call/invokes, never having its "address taken".
2874   // Only do this if the module is materialized, otherwise we don't have all the
2875   // uses.
2876   if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
2877     const User *U;
2878     if (F.hasAddressTaken(&U, false, true, false,
2879                           /*IgnoreARCAttachedCall=*/true))
2880       Check(false, "Invalid user of intrinsic instruction!", U);
2881   }
2882 
2883   // Check intrinsics' signatures.
2884   switch (F.getIntrinsicID()) {
2885   case Intrinsic::experimental_gc_get_pointer_base: {
2886     FunctionType *FT = F.getFunctionType();
2887     Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2888     Check(isa<PointerType>(F.getReturnType()),
2889           "gc.get.pointer.base must return a pointer", F);
2890     Check(FT->getParamType(0) == F.getReturnType(),
2891           "gc.get.pointer.base operand and result must be of the same type", F);
2892     break;
2893   }
2894   case Intrinsic::experimental_gc_get_pointer_offset: {
2895     FunctionType *FT = F.getFunctionType();
2896     Check(FT->getNumParams() == 1, "wrong number of parameters", F);
2897     Check(isa<PointerType>(FT->getParamType(0)),
2898           "gc.get.pointer.offset operand must be a pointer", F);
2899     Check(F.getReturnType()->isIntegerTy(),
2900           "gc.get.pointer.offset must return integer", F);
2901     break;
2902   }
2903   }
2904 
2905   auto *N = F.getSubprogram();
2906   HasDebugInfo = (N != nullptr);
2907   if (!HasDebugInfo)
2908     return;
2909 
2910   // Check that all !dbg attachments lead to back to N.
2911   //
2912   // FIXME: Check this incrementally while visiting !dbg attachments.
2913   // FIXME: Only check when N is the canonical subprogram for F.
2914   SmallPtrSet<const MDNode *, 32> Seen;
2915   auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2916     // Be careful about using DILocation here since we might be dealing with
2917     // broken code (this is the Verifier after all).
2918     const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2919     if (!DL)
2920       return;
2921     if (!Seen.insert(DL).second)
2922       return;
2923 
2924     Metadata *Parent = DL->getRawScope();
2925     CheckDI(Parent && isa<DILocalScope>(Parent),
2926             "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
2927 
2928     DILocalScope *Scope = DL->getInlinedAtScope();
2929     Check(Scope, "Failed to find DILocalScope", DL);
2930 
2931     if (!Seen.insert(Scope).second)
2932       return;
2933 
2934     DISubprogram *SP = Scope->getSubprogram();
2935 
2936     // Scope and SP could be the same MDNode and we don't want to skip
2937     // validation in that case
2938     if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2939       return;
2940 
2941     CheckDI(SP->describes(&F),
2942             "!dbg attachment points at wrong subprogram for function", N, &F,
2943             &I, DL, Scope, SP);
2944   };
2945   for (auto &BB : F)
2946     for (auto &I : BB) {
2947       VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
2948       // The llvm.loop annotations also contain two DILocations.
2949       if (auto MD = I.getMetadata(LLVMContext::MD_loop))
2950         for (unsigned i = 1; i < MD->getNumOperands(); ++i)
2951           VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
2952       if (BrokenDebugInfo)
2953         return;
2954     }
2955 }
2956 
2957 // verifyBasicBlock - Verify that a basic block is well formed...
2958 //
2959 void Verifier::visitBasicBlock(BasicBlock &BB) {
2960   InstsInThisBlock.clear();
2961 
2962   // Ensure that basic blocks have terminators!
2963   Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2964 
2965   // Check constraints that this basic block imposes on all of the PHI nodes in
2966   // it.
2967   if (isa<PHINode>(BB.front())) {
2968     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
2969     SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2970     llvm::sort(Preds);
2971     for (const PHINode &PN : BB.phis()) {
2972       Check(PN.getNumIncomingValues() == Preds.size(),
2973             "PHINode should have one entry for each predecessor of its "
2974             "parent basic block!",
2975             &PN);
2976 
2977       // Get and sort all incoming values in the PHI node...
2978       Values.clear();
2979       Values.reserve(PN.getNumIncomingValues());
2980       for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2981         Values.push_back(
2982             std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2983       llvm::sort(Values);
2984 
2985       for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2986         // Check to make sure that if there is more than one entry for a
2987         // particular basic block in this PHI node, that the incoming values are
2988         // all identical.
2989         //
2990         Check(i == 0 || Values[i].first != Values[i - 1].first ||
2991                   Values[i].second == Values[i - 1].second,
2992               "PHI node has multiple entries for the same basic block with "
2993               "different incoming values!",
2994               &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2995 
2996         // Check to make sure that the predecessors and PHI node entries are
2997         // matched up.
2998         Check(Values[i].first == Preds[i],
2999               "PHI node entries do not match predecessors!", &PN,
3000               Values[i].first, Preds[i]);
3001       }
3002     }
3003   }
3004 
3005   // Check that all instructions have their parent pointers set up correctly.
3006   for (auto &I : BB)
3007   {
3008     Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
3009   }
3010 }
3011 
3012 void Verifier::visitTerminator(Instruction &I) {
3013   // Ensure that terminators only exist at the end of the basic block.
3014   Check(&I == I.getParent()->getTerminator(),
3015         "Terminator found in the middle of a basic block!", I.getParent());
3016   visitInstruction(I);
3017 }
3018 
3019 void Verifier::visitBranchInst(BranchInst &BI) {
3020   if (BI.isConditional()) {
3021     Check(BI.getCondition()->getType()->isIntegerTy(1),
3022           "Branch condition is not 'i1' type!", &BI, BI.getCondition());
3023   }
3024   visitTerminator(BI);
3025 }
3026 
3027 void Verifier::visitReturnInst(ReturnInst &RI) {
3028   Function *F = RI.getParent()->getParent();
3029   unsigned N = RI.getNumOperands();
3030   if (F->getReturnType()->isVoidTy())
3031     Check(N == 0,
3032           "Found return instr that returns non-void in Function of void "
3033           "return type!",
3034           &RI, F->getReturnType());
3035   else
3036     Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
3037           "Function return type does not match operand "
3038           "type of return inst!",
3039           &RI, F->getReturnType());
3040 
3041   // Check to make sure that the return value has necessary properties for
3042   // terminators...
3043   visitTerminator(RI);
3044 }
3045 
3046 void Verifier::visitSwitchInst(SwitchInst &SI) {
3047   Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
3048   // Check to make sure that all of the constants in the switch instruction
3049   // have the same type as the switched-on value.
3050   Type *SwitchTy = SI.getCondition()->getType();
3051   SmallPtrSet<ConstantInt*, 32> Constants;
3052   for (auto &Case : SI.cases()) {
3053     Check(isa<ConstantInt>(SI.getOperand(Case.getCaseIndex() * 2 + 2)),
3054           "Case value is not a constant integer.", &SI);
3055     Check(Case.getCaseValue()->getType() == SwitchTy,
3056           "Switch constants must all be same type as switch value!", &SI);
3057     Check(Constants.insert(Case.getCaseValue()).second,
3058           "Duplicate integer as switch case", &SI, Case.getCaseValue());
3059   }
3060 
3061   visitTerminator(SI);
3062 }
3063 
3064 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
3065   Check(BI.getAddress()->getType()->isPointerTy(),
3066         "Indirectbr operand must have pointer type!", &BI);
3067   for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
3068     Check(BI.getDestination(i)->getType()->isLabelTy(),
3069           "Indirectbr destinations must all have pointer type!", &BI);
3070 
3071   visitTerminator(BI);
3072 }
3073 
3074 void Verifier::visitCallBrInst(CallBrInst &CBI) {
3075   Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
3076   const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
3077   Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
3078 
3079   verifyInlineAsmCall(CBI);
3080   visitTerminator(CBI);
3081 }
3082 
3083 void Verifier::visitSelectInst(SelectInst &SI) {
3084   Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
3085                                         SI.getOperand(2)),
3086         "Invalid operands for select instruction!", &SI);
3087 
3088   Check(SI.getTrueValue()->getType() == SI.getType(),
3089         "Select values must have same type as select instruction!", &SI);
3090   visitInstruction(SI);
3091 }
3092 
3093 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
3094 /// a pass, if any exist, it's an error.
3095 ///
3096 void Verifier::visitUserOp1(Instruction &I) {
3097   Check(false, "User-defined operators should not live outside of a pass!", &I);
3098 }
3099 
3100 void Verifier::visitTruncInst(TruncInst &I) {
3101   // Get the source and destination types
3102   Type *SrcTy = I.getOperand(0)->getType();
3103   Type *DestTy = I.getType();
3104 
3105   // Get the size of the types in bits, we'll need this later
3106   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3107   unsigned DestBitSize = DestTy->getScalarSizeInBits();
3108 
3109   Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
3110   Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
3111   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3112         "trunc source and destination must both be a vector or neither", &I);
3113   Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
3114 
3115   visitInstruction(I);
3116 }
3117 
3118 void Verifier::visitZExtInst(ZExtInst &I) {
3119   // Get the source and destination types
3120   Type *SrcTy = I.getOperand(0)->getType();
3121   Type *DestTy = I.getType();
3122 
3123   // Get the size of the types in bits, we'll need this later
3124   Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
3125   Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
3126   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3127         "zext source and destination must both be a vector or neither", &I);
3128   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3129   unsigned DestBitSize = DestTy->getScalarSizeInBits();
3130 
3131   Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
3132 
3133   visitInstruction(I);
3134 }
3135 
3136 void Verifier::visitSExtInst(SExtInst &I) {
3137   // Get the source and destination types
3138   Type *SrcTy = I.getOperand(0)->getType();
3139   Type *DestTy = I.getType();
3140 
3141   // Get the size of the types in bits, we'll need this later
3142   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3143   unsigned DestBitSize = DestTy->getScalarSizeInBits();
3144 
3145   Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
3146   Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
3147   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3148         "sext source and destination must both be a vector or neither", &I);
3149   Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
3150 
3151   visitInstruction(I);
3152 }
3153 
3154 void Verifier::visitFPTruncInst(FPTruncInst &I) {
3155   // Get the source and destination types
3156   Type *SrcTy = I.getOperand(0)->getType();
3157   Type *DestTy = I.getType();
3158   // Get the size of the types in bits, we'll need this later
3159   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3160   unsigned DestBitSize = DestTy->getScalarSizeInBits();
3161 
3162   Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
3163   Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
3164   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3165         "fptrunc source and destination must both be a vector or neither", &I);
3166   Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
3167 
3168   visitInstruction(I);
3169 }
3170 
3171 void Verifier::visitFPExtInst(FPExtInst &I) {
3172   // Get the source and destination types
3173   Type *SrcTy = I.getOperand(0)->getType();
3174   Type *DestTy = I.getType();
3175 
3176   // Get the size of the types in bits, we'll need this later
3177   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3178   unsigned DestBitSize = DestTy->getScalarSizeInBits();
3179 
3180   Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
3181   Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
3182   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
3183         "fpext source and destination must both be a vector or neither", &I);
3184   Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
3185 
3186   visitInstruction(I);
3187 }
3188 
3189 void Verifier::visitUIToFPInst(UIToFPInst &I) {
3190   // Get the source and destination types
3191   Type *SrcTy = I.getOperand(0)->getType();
3192   Type *DestTy = I.getType();
3193 
3194   bool SrcVec = SrcTy->isVectorTy();
3195   bool DstVec = DestTy->isVectorTy();
3196 
3197   Check(SrcVec == DstVec,
3198         "UIToFP source and dest must both be vector or scalar", &I);
3199   Check(SrcTy->isIntOrIntVectorTy(),
3200         "UIToFP source must be integer or integer vector", &I);
3201   Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
3202         &I);
3203 
3204   if (SrcVec && DstVec)
3205     Check(cast<VectorType>(SrcTy)->getElementCount() ==
3206               cast<VectorType>(DestTy)->getElementCount(),
3207           "UIToFP source and dest vector length mismatch", &I);
3208 
3209   visitInstruction(I);
3210 }
3211 
3212 void Verifier::visitSIToFPInst(SIToFPInst &I) {
3213   // Get the source and destination types
3214   Type *SrcTy = I.getOperand(0)->getType();
3215   Type *DestTy = I.getType();
3216 
3217   bool SrcVec = SrcTy->isVectorTy();
3218   bool DstVec = DestTy->isVectorTy();
3219 
3220   Check(SrcVec == DstVec,
3221         "SIToFP source and dest must both be vector or scalar", &I);
3222   Check(SrcTy->isIntOrIntVectorTy(),
3223         "SIToFP source must be integer or integer vector", &I);
3224   Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
3225         &I);
3226 
3227   if (SrcVec && DstVec)
3228     Check(cast<VectorType>(SrcTy)->getElementCount() ==
3229               cast<VectorType>(DestTy)->getElementCount(),
3230           "SIToFP source and dest vector length mismatch", &I);
3231 
3232   visitInstruction(I);
3233 }
3234 
3235 void Verifier::visitFPToUIInst(FPToUIInst &I) {
3236   // Get the source and destination types
3237   Type *SrcTy = I.getOperand(0)->getType();
3238   Type *DestTy = I.getType();
3239 
3240   bool SrcVec = SrcTy->isVectorTy();
3241   bool DstVec = DestTy->isVectorTy();
3242 
3243   Check(SrcVec == DstVec,
3244         "FPToUI source and dest must both be vector or scalar", &I);
3245   Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
3246   Check(DestTy->isIntOrIntVectorTy(),
3247         "FPToUI result must be integer or integer vector", &I);
3248 
3249   if (SrcVec && DstVec)
3250     Check(cast<VectorType>(SrcTy)->getElementCount() ==
3251               cast<VectorType>(DestTy)->getElementCount(),
3252           "FPToUI source and dest vector length mismatch", &I);
3253 
3254   visitInstruction(I);
3255 }
3256 
3257 void Verifier::visitFPToSIInst(FPToSIInst &I) {
3258   // Get the source and destination types
3259   Type *SrcTy = I.getOperand(0)->getType();
3260   Type *DestTy = I.getType();
3261 
3262   bool SrcVec = SrcTy->isVectorTy();
3263   bool DstVec = DestTy->isVectorTy();
3264 
3265   Check(SrcVec == DstVec,
3266         "FPToSI source and dest must both be vector or scalar", &I);
3267   Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
3268   Check(DestTy->isIntOrIntVectorTy(),
3269         "FPToSI result must be integer or integer vector", &I);
3270 
3271   if (SrcVec && DstVec)
3272     Check(cast<VectorType>(SrcTy)->getElementCount() ==
3273               cast<VectorType>(DestTy)->getElementCount(),
3274           "FPToSI source and dest vector length mismatch", &I);
3275 
3276   visitInstruction(I);
3277 }
3278 
3279 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
3280   // Get the source and destination types
3281   Type *SrcTy = I.getOperand(0)->getType();
3282   Type *DestTy = I.getType();
3283 
3284   Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
3285 
3286   Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
3287   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
3288         &I);
3289 
3290   if (SrcTy->isVectorTy()) {
3291     auto *VSrc = cast<VectorType>(SrcTy);
3292     auto *VDest = cast<VectorType>(DestTy);
3293     Check(VSrc->getElementCount() == VDest->getElementCount(),
3294           "PtrToInt Vector width mismatch", &I);
3295   }
3296 
3297   visitInstruction(I);
3298 }
3299 
3300 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
3301   // Get the source and destination types
3302   Type *SrcTy = I.getOperand(0)->getType();
3303   Type *DestTy = I.getType();
3304 
3305   Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
3306   Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
3307 
3308   Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
3309         &I);
3310   if (SrcTy->isVectorTy()) {
3311     auto *VSrc = cast<VectorType>(SrcTy);
3312     auto *VDest = cast<VectorType>(DestTy);
3313     Check(VSrc->getElementCount() == VDest->getElementCount(),
3314           "IntToPtr Vector width mismatch", &I);
3315   }
3316   visitInstruction(I);
3317 }
3318 
3319 void Verifier::visitBitCastInst(BitCastInst &I) {
3320   Check(
3321       CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
3322       "Invalid bitcast", &I);
3323   visitInstruction(I);
3324 }
3325 
3326 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
3327   Type *SrcTy = I.getOperand(0)->getType();
3328   Type *DestTy = I.getType();
3329 
3330   Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
3331         &I);
3332   Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
3333         &I);
3334   Check(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
3335         "AddrSpaceCast must be between different address spaces", &I);
3336   if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
3337     Check(SrcVTy->getElementCount() ==
3338               cast<VectorType>(DestTy)->getElementCount(),
3339           "AddrSpaceCast vector pointer number of elements mismatch", &I);
3340   visitInstruction(I);
3341 }
3342 
3343 /// visitPHINode - Ensure that a PHI node is well formed.
3344 ///
3345 void Verifier::visitPHINode(PHINode &PN) {
3346   // Ensure that the PHI nodes are all grouped together at the top of the block.
3347   // This can be tested by checking whether the instruction before this is
3348   // either nonexistent (because this is begin()) or is a PHI node.  If not,
3349   // then there is some other instruction before a PHI.
3350   Check(&PN == &PN.getParent()->front() ||
3351             isa<PHINode>(--BasicBlock::iterator(&PN)),
3352         "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
3353 
3354   // Check that a PHI doesn't yield a Token.
3355   Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
3356 
3357   // Check that all of the values of the PHI node have the same type as the
3358   // result, and that the incoming blocks are really basic blocks.
3359   for (Value *IncValue : PN.incoming_values()) {
3360     Check(PN.getType() == IncValue->getType(),
3361           "PHI node operands are not the same type as the result!", &PN);
3362   }
3363 
3364   // All other PHI node constraints are checked in the visitBasicBlock method.
3365 
3366   visitInstruction(PN);
3367 }
3368 
3369 static bool isControlledConvergent(const CallBase &Call) {
3370   if (Call.getOperandBundle(LLVMContext::OB_convergencectrl))
3371     return true;
3372   if (const auto *F = dyn_cast<Function>(Call.getCalledOperand())) {
3373     switch (F->getIntrinsicID()) {
3374     case Intrinsic::experimental_convergence_anchor:
3375     case Intrinsic::experimental_convergence_entry:
3376     case Intrinsic::experimental_convergence_loop:
3377       return true;
3378     }
3379   }
3380   return false;
3381 }
3382 
3383 void Verifier::visitCallBase(CallBase &Call) {
3384   Check(Call.getCalledOperand()->getType()->isPointerTy(),
3385         "Called function must be a pointer!", Call);
3386   FunctionType *FTy = Call.getFunctionType();
3387 
3388   // Verify that the correct number of arguments are being passed
3389   if (FTy->isVarArg())
3390     Check(Call.arg_size() >= FTy->getNumParams(),
3391           "Called function requires more parameters than were provided!", Call);
3392   else
3393     Check(Call.arg_size() == FTy->getNumParams(),
3394           "Incorrect number of arguments passed to called function!", Call);
3395 
3396   // Verify that all arguments to the call match the function type.
3397   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
3398     Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
3399           "Call parameter type does not match function signature!",
3400           Call.getArgOperand(i), FTy->getParamType(i), Call);
3401 
3402   AttributeList Attrs = Call.getAttributes();
3403 
3404   Check(verifyAttributeCount(Attrs, Call.arg_size()),
3405         "Attribute after last parameter!", Call);
3406 
3407   Function *Callee =
3408       dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3409   bool IsIntrinsic = Callee && Callee->isIntrinsic();
3410   if (IsIntrinsic)
3411     Check(Callee->getValueType() == FTy,
3412           "Intrinsic called with incompatible signature", Call);
3413 
3414   // Disallow calls to functions with the amdgpu_cs_chain[_preserve] calling
3415   // convention.
3416   auto CC = Call.getCallingConv();
3417   Check(CC != CallingConv::AMDGPU_CS_Chain &&
3418             CC != CallingConv::AMDGPU_CS_ChainPreserve,
3419         "Direct calls to amdgpu_cs_chain/amdgpu_cs_chain_preserve functions "
3420         "not allowed. Please use the @llvm.amdgpu.cs.chain intrinsic instead.",
3421         Call);
3422 
3423   auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
3424     if (!Ty->isSized())
3425       return;
3426     Align ABIAlign = DL.getABITypeAlign(Ty);
3427     Align MaxAlign(ParamMaxAlignment);
3428     Check(ABIAlign <= MaxAlign,
3429           "Incorrect alignment of " + Message + " to called function!", Call);
3430   };
3431 
3432   if (!IsIntrinsic) {
3433     VerifyTypeAlign(FTy->getReturnType(), "return type");
3434     for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3435       Type *Ty = FTy->getParamType(i);
3436       VerifyTypeAlign(Ty, "argument passed");
3437     }
3438   }
3439 
3440   if (Attrs.hasFnAttr(Attribute::Speculatable)) {
3441     // Don't allow speculatable on call sites, unless the underlying function
3442     // declaration is also speculatable.
3443     Check(Callee && Callee->isSpeculatable(),
3444           "speculatable attribute may not apply to call sites", Call);
3445   }
3446 
3447   if (Attrs.hasFnAttr(Attribute::Preallocated)) {
3448     Check(Call.getCalledFunction()->getIntrinsicID() ==
3449               Intrinsic::call_preallocated_arg,
3450           "preallocated as a call site attribute can only be on "
3451           "llvm.call.preallocated.arg");
3452   }
3453 
3454   // Verify call attributes.
3455   verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
3456 
3457   // Conservatively check the inalloca argument.
3458   // We have a bug if we can find that there is an underlying alloca without
3459   // inalloca.
3460   if (Call.hasInAllocaArgument()) {
3461     Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
3462     if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
3463       Check(AI->isUsedWithInAlloca(),
3464             "inalloca argument for call has mismatched alloca", AI, Call);
3465   }
3466 
3467   // For each argument of the callsite, if it has the swifterror argument,
3468   // make sure the underlying alloca/parameter it comes from has a swifterror as
3469   // well.
3470   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
3471     if (Call.paramHasAttr(i, Attribute::SwiftError)) {
3472       Value *SwiftErrorArg = Call.getArgOperand(i);
3473       if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
3474         Check(AI->isSwiftError(),
3475               "swifterror argument for call has mismatched alloca", AI, Call);
3476         continue;
3477       }
3478       auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3479       Check(ArgI, "swifterror argument should come from an alloca or parameter",
3480             SwiftErrorArg, Call);
3481       Check(ArgI->hasSwiftErrorAttr(),
3482             "swifterror argument for call has mismatched parameter", ArgI,
3483             Call);
3484     }
3485 
3486     if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
3487       // Don't allow immarg on call sites, unless the underlying declaration
3488       // also has the matching immarg.
3489       Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3490             "immarg may not apply only to call sites", Call.getArgOperand(i),
3491             Call);
3492     }
3493 
3494     if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3495       Value *ArgVal = Call.getArgOperand(i);
3496       Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3497             "immarg operand has non-immediate parameter", ArgVal, Call);
3498     }
3499 
3500     if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3501       Value *ArgVal = Call.getArgOperand(i);
3502       bool hasOB =
3503           Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3504       bool isMustTail = Call.isMustTailCall();
3505       Check(hasOB != isMustTail,
3506             "preallocated operand either requires a preallocated bundle or "
3507             "the call to be musttail (but not both)",
3508             ArgVal, Call);
3509     }
3510   }
3511 
3512   if (FTy->isVarArg()) {
3513     // FIXME? is 'nest' even legal here?
3514     bool SawNest = false;
3515     bool SawReturned = false;
3516 
3517     for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3518       if (Attrs.hasParamAttr(Idx, Attribute::Nest))
3519         SawNest = true;
3520       if (Attrs.hasParamAttr(Idx, Attribute::Returned))
3521         SawReturned = true;
3522     }
3523 
3524     // Check attributes on the varargs part.
3525     for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3526       Type *Ty = Call.getArgOperand(Idx)->getType();
3527       AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
3528       verifyParameterAttrs(ArgAttrs, Ty, &Call);
3529 
3530       if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3531         Check(!SawNest, "More than one parameter has attribute nest!", Call);
3532         SawNest = true;
3533       }
3534 
3535       if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3536         Check(!SawReturned, "More than one parameter has attribute returned!",
3537               Call);
3538         Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3539               "Incompatible argument and return types for 'returned' "
3540               "attribute",
3541               Call);
3542         SawReturned = true;
3543       }
3544 
3545       // Statepoint intrinsic is vararg but the wrapped function may be not.
3546       // Allow sret here and check the wrapped function in verifyStatepoint.
3547       if (!Call.getCalledFunction() ||
3548           Call.getCalledFunction()->getIntrinsicID() !=
3549               Intrinsic::experimental_gc_statepoint)
3550         Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
3551               "Attribute 'sret' cannot be used for vararg call arguments!",
3552               Call);
3553 
3554       if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3555         Check(Idx == Call.arg_size() - 1,
3556               "inalloca isn't on the last argument!", Call);
3557     }
3558   }
3559 
3560   // Verify that there's no metadata unless it's a direct call to an intrinsic.
3561   if (!IsIntrinsic) {
3562     for (Type *ParamTy : FTy->params()) {
3563       Check(!ParamTy->isMetadataTy(),
3564             "Function has metadata parameter but isn't an intrinsic", Call);
3565       Check(!ParamTy->isTokenTy(),
3566             "Function has token parameter but isn't an intrinsic", Call);
3567     }
3568   }
3569 
3570   // Verify that indirect calls don't return tokens.
3571   if (!Call.getCalledFunction()) {
3572     Check(!FTy->getReturnType()->isTokenTy(),
3573           "Return type cannot be token for indirect call!");
3574     Check(!FTy->getReturnType()->isX86_AMXTy(),
3575           "Return type cannot be x86_amx for indirect call!");
3576   }
3577 
3578   if (Function *F = Call.getCalledFunction())
3579     if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3580       visitIntrinsicCall(ID, Call);
3581 
3582   // Verify that a callsite has at most one "deopt", at most one "funclet", at
3583   // most one "gc-transition", at most one "cfguardtarget", at most one
3584   // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
3585   bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3586        FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3587        FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
3588        FoundPtrauthBundle = false, FoundKCFIBundle = false,
3589        FoundAttachedCallBundle = false;
3590   for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3591     OperandBundleUse BU = Call.getOperandBundleAt(i);
3592     uint32_t Tag = BU.getTagID();
3593     if (Tag == LLVMContext::OB_deopt) {
3594       Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3595       FoundDeoptBundle = true;
3596     } else if (Tag == LLVMContext::OB_gc_transition) {
3597       Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3598             Call);
3599       FoundGCTransitionBundle = true;
3600     } else if (Tag == LLVMContext::OB_funclet) {
3601       Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3602       FoundFuncletBundle = true;
3603       Check(BU.Inputs.size() == 1,
3604             "Expected exactly one funclet bundle operand", Call);
3605       Check(isa<FuncletPadInst>(BU.Inputs.front()),
3606             "Funclet bundle operands should correspond to a FuncletPadInst",
3607             Call);
3608     } else if (Tag == LLVMContext::OB_cfguardtarget) {
3609       Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
3610             Call);
3611       FoundCFGuardTargetBundle = true;
3612       Check(BU.Inputs.size() == 1,
3613             "Expected exactly one cfguardtarget bundle operand", Call);
3614     } else if (Tag == LLVMContext::OB_ptrauth) {
3615       Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
3616       FoundPtrauthBundle = true;
3617       Check(BU.Inputs.size() == 2,
3618             "Expected exactly two ptrauth bundle operands", Call);
3619       Check(isa<ConstantInt>(BU.Inputs[0]) &&
3620                 BU.Inputs[0]->getType()->isIntegerTy(32),
3621             "Ptrauth bundle key operand must be an i32 constant", Call);
3622       Check(BU.Inputs[1]->getType()->isIntegerTy(64),
3623             "Ptrauth bundle discriminator operand must be an i64", Call);
3624     } else if (Tag == LLVMContext::OB_kcfi) {
3625       Check(!FoundKCFIBundle, "Multiple kcfi operand bundles", Call);
3626       FoundKCFIBundle = true;
3627       Check(BU.Inputs.size() == 1, "Expected exactly one kcfi bundle operand",
3628             Call);
3629       Check(isa<ConstantInt>(BU.Inputs[0]) &&
3630                 BU.Inputs[0]->getType()->isIntegerTy(32),
3631             "Kcfi bundle operand must be an i32 constant", Call);
3632     } else if (Tag == LLVMContext::OB_preallocated) {
3633       Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3634             Call);
3635       FoundPreallocatedBundle = true;
3636       Check(BU.Inputs.size() == 1,
3637             "Expected exactly one preallocated bundle operand", Call);
3638       auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3639       Check(Input &&
3640                 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3641             "\"preallocated\" argument must be a token from "
3642             "llvm.call.preallocated.setup",
3643             Call);
3644     } else if (Tag == LLVMContext::OB_gc_live) {
3645       Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
3646       FoundGCLiveBundle = true;
3647     } else if (Tag == LLVMContext::OB_clang_arc_attachedcall) {
3648       Check(!FoundAttachedCallBundle,
3649             "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
3650       FoundAttachedCallBundle = true;
3651       verifyAttachedCallBundle(Call, BU);
3652     }
3653   }
3654 
3655   // Verify that callee and callsite agree on whether to use pointer auth.
3656   Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
3657         "Direct call cannot have a ptrauth bundle", Call);
3658 
3659   // Verify that each inlinable callsite of a debug-info-bearing function in a
3660   // debug-info-bearing function has a debug location attached to it. Failure to
3661   // do so causes assertion failures when the inliner sets up inline scope info
3662   // (Interposable functions are not inlinable, neither are functions without
3663   //  definitions.)
3664   if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3665       !Call.getCalledFunction()->isInterposable() &&
3666       !Call.getCalledFunction()->isDeclaration() &&
3667       Call.getCalledFunction()->getSubprogram())
3668     CheckDI(Call.getDebugLoc(),
3669             "inlinable function call in a function with "
3670             "debug info must have a !dbg location",
3671             Call);
3672 
3673   if (Call.isInlineAsm())
3674     verifyInlineAsmCall(Call);
3675 
3676   if (isControlledConvergent(Call)) {
3677     Check(Call.isConvergent(),
3678           "Expected convergent attribute on a controlled convergent call.",
3679           Call);
3680     Check(ConvergenceKind != UncontrolledConvergence,
3681           "Cannot mix controlled and uncontrolled convergence in the same "
3682           "function.",
3683           Call);
3684     ConvergenceKind = ControlledConvergence;
3685   } else if (Call.isConvergent()) {
3686     Check(ConvergenceKind != ControlledConvergence,
3687           "Cannot mix controlled and uncontrolled convergence in the same "
3688           "function.",
3689           Call);
3690     ConvergenceKind = UncontrolledConvergence;
3691   }
3692 
3693   visitInstruction(Call);
3694 }
3695 
3696 void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
3697                                          StringRef Context) {
3698   Check(!Attrs.contains(Attribute::InAlloca),
3699         Twine("inalloca attribute not allowed in ") + Context);
3700   Check(!Attrs.contains(Attribute::InReg),
3701         Twine("inreg attribute not allowed in ") + Context);
3702   Check(!Attrs.contains(Attribute::SwiftError),
3703         Twine("swifterror attribute not allowed in ") + Context);
3704   Check(!Attrs.contains(Attribute::Preallocated),
3705         Twine("preallocated attribute not allowed in ") + Context);
3706   Check(!Attrs.contains(Attribute::ByRef),
3707         Twine("byref attribute not allowed in ") + Context);
3708 }
3709 
3710 /// Two types are "congruent" if they are identical, or if they are both pointer
3711 /// types with different pointee types and the same address space.
3712 static bool isTypeCongruent(Type *L, Type *R) {
3713   if (L == R)
3714     return true;
3715   PointerType *PL = dyn_cast<PointerType>(L);
3716   PointerType *PR = dyn_cast<PointerType>(R);
3717   if (!PL || !PR)
3718     return false;
3719   return PL->getAddressSpace() == PR->getAddressSpace();
3720 }
3721 
3722 static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
3723   static const Attribute::AttrKind ABIAttrs[] = {
3724       Attribute::StructRet,  Attribute::ByVal,          Attribute::InAlloca,
3725       Attribute::InReg,      Attribute::StackAlignment, Attribute::SwiftSelf,
3726       Attribute::SwiftAsync, Attribute::SwiftError,     Attribute::Preallocated,
3727       Attribute::ByRef};
3728   AttrBuilder Copy(C);
3729   for (auto AK : ABIAttrs) {
3730     Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
3731     if (Attr.isValid())
3732       Copy.addAttribute(Attr);
3733   }
3734 
3735   // `align` is ABI-affecting only in combination with `byval` or `byref`.
3736   if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
3737       (Attrs.hasParamAttr(I, Attribute::ByVal) ||
3738        Attrs.hasParamAttr(I, Attribute::ByRef)))
3739     Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3740   return Copy;
3741 }
3742 
3743 void Verifier::verifyMustTailCall(CallInst &CI) {
3744   Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3745 
3746   Function *F = CI.getParent()->getParent();
3747   FunctionType *CallerTy = F->getFunctionType();
3748   FunctionType *CalleeTy = CI.getFunctionType();
3749   Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3750         "cannot guarantee tail call due to mismatched varargs", &CI);
3751   Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3752         "cannot guarantee tail call due to mismatched return types", &CI);
3753 
3754   // - The calling conventions of the caller and callee must match.
3755   Check(F->getCallingConv() == CI.getCallingConv(),
3756         "cannot guarantee tail call due to mismatched calling conv", &CI);
3757 
3758   // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3759   //   or a pointer bitcast followed by a ret instruction.
3760   // - The ret instruction must return the (possibly bitcasted) value
3761   //   produced by the call or void.
3762   Value *RetVal = &CI;
3763   Instruction *Next = CI.getNextNode();
3764 
3765   // Handle the optional bitcast.
3766   if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3767     Check(BI->getOperand(0) == RetVal,
3768           "bitcast following musttail call must use the call", BI);
3769     RetVal = BI;
3770     Next = BI->getNextNode();
3771   }
3772 
3773   // Check the return.
3774   ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3775   Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
3776   Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
3777             isa<UndefValue>(Ret->getReturnValue()),
3778         "musttail call result must be returned", Ret);
3779 
3780   AttributeList CallerAttrs = F->getAttributes();
3781   AttributeList CalleeAttrs = CI.getAttributes();
3782   if (CI.getCallingConv() == CallingConv::SwiftTail ||
3783       CI.getCallingConv() == CallingConv::Tail) {
3784     StringRef CCName =
3785         CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
3786 
3787     // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
3788     //   are allowed in swifttailcc call
3789     for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3790       AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3791       SmallString<32> Context{CCName, StringRef(" musttail caller")};
3792       verifyTailCCMustTailAttrs(ABIAttrs, Context);
3793     }
3794     for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
3795       AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3796       SmallString<32> Context{CCName, StringRef(" musttail callee")};
3797       verifyTailCCMustTailAttrs(ABIAttrs, Context);
3798     }
3799     // - Varargs functions are not allowed
3800     Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
3801                                      " tail call for varargs function");
3802     return;
3803   }
3804 
3805   // - The caller and callee prototypes must match.  Pointer types of
3806   //   parameters or return types may differ in pointee type, but not
3807   //   address space.
3808   if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3809     Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3810           "cannot guarantee tail call due to mismatched parameter counts", &CI);
3811     for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3812       Check(
3813           isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3814           "cannot guarantee tail call due to mismatched parameter types", &CI);
3815     }
3816   }
3817 
3818   // - All ABI-impacting function attributes, such as sret, byval, inreg,
3819   //   returned, preallocated, and inalloca, must match.
3820   for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3821     AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
3822     AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
3823     Check(CallerABIAttrs == CalleeABIAttrs,
3824           "cannot guarantee tail call due to mismatched ABI impacting "
3825           "function attributes",
3826           &CI, CI.getOperand(I));
3827   }
3828 }
3829 
3830 void Verifier::visitCallInst(CallInst &CI) {
3831   visitCallBase(CI);
3832 
3833   if (CI.isMustTailCall())
3834     verifyMustTailCall(CI);
3835 }
3836 
3837 void Verifier::visitInvokeInst(InvokeInst &II) {
3838   visitCallBase(II);
3839 
3840   // Verify that the first non-PHI instruction of the unwind destination is an
3841   // exception handling instruction.
3842   Check(
3843       II.getUnwindDest()->isEHPad(),
3844       "The unwind destination does not have an exception handling instruction!",
3845       &II);
3846 
3847   visitTerminator(II);
3848 }
3849 
3850 /// visitUnaryOperator - Check the argument to the unary operator.
3851 ///
3852 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3853   Check(U.getType() == U.getOperand(0)->getType(),
3854         "Unary operators must have same type for"
3855         "operands and result!",
3856         &U);
3857 
3858   switch (U.getOpcode()) {
3859   // Check that floating-point arithmetic operators are only used with
3860   // floating-point operands.
3861   case Instruction::FNeg:
3862     Check(U.getType()->isFPOrFPVectorTy(),
3863           "FNeg operator only works with float types!", &U);
3864     break;
3865   default:
3866     llvm_unreachable("Unknown UnaryOperator opcode!");
3867   }
3868 
3869   visitInstruction(U);
3870 }
3871 
3872 /// visitBinaryOperator - Check that both arguments to the binary operator are
3873 /// of the same type!
3874 ///
3875 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3876   Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3877         "Both operands to a binary operator are not of the same type!", &B);
3878 
3879   switch (B.getOpcode()) {
3880   // Check that integer arithmetic operators are only used with
3881   // integral operands.
3882   case Instruction::Add:
3883   case Instruction::Sub:
3884   case Instruction::Mul:
3885   case Instruction::SDiv:
3886   case Instruction::UDiv:
3887   case Instruction::SRem:
3888   case Instruction::URem:
3889     Check(B.getType()->isIntOrIntVectorTy(),
3890           "Integer arithmetic operators only work with integral types!", &B);
3891     Check(B.getType() == B.getOperand(0)->getType(),
3892           "Integer arithmetic operators must have same type "
3893           "for operands and result!",
3894           &B);
3895     break;
3896   // Check that floating-point arithmetic operators are only used with
3897   // floating-point operands.
3898   case Instruction::FAdd:
3899   case Instruction::FSub:
3900   case Instruction::FMul:
3901   case Instruction::FDiv:
3902   case Instruction::FRem:
3903     Check(B.getType()->isFPOrFPVectorTy(),
3904           "Floating-point arithmetic operators only work with "
3905           "floating-point types!",
3906           &B);
3907     Check(B.getType() == B.getOperand(0)->getType(),
3908           "Floating-point arithmetic operators must have same type "
3909           "for operands and result!",
3910           &B);
3911     break;
3912   // Check that logical operators are only used with integral operands.
3913   case Instruction::And:
3914   case Instruction::Or:
3915   case Instruction::Xor:
3916     Check(B.getType()->isIntOrIntVectorTy(),
3917           "Logical operators only work with integral types!", &B);
3918     Check(B.getType() == B.getOperand(0)->getType(),
3919           "Logical operators must have same type for operands and result!", &B);
3920     break;
3921   case Instruction::Shl:
3922   case Instruction::LShr:
3923   case Instruction::AShr:
3924     Check(B.getType()->isIntOrIntVectorTy(),
3925           "Shifts only work with integral types!", &B);
3926     Check(B.getType() == B.getOperand(0)->getType(),
3927           "Shift return type must be same as operands!", &B);
3928     break;
3929   default:
3930     llvm_unreachable("Unknown BinaryOperator opcode!");
3931   }
3932 
3933   visitInstruction(B);
3934 }
3935 
3936 void Verifier::visitICmpInst(ICmpInst &IC) {
3937   // Check that the operands are the same type
3938   Type *Op0Ty = IC.getOperand(0)->getType();
3939   Type *Op1Ty = IC.getOperand(1)->getType();
3940   Check(Op0Ty == Op1Ty,
3941         "Both operands to ICmp instruction are not of the same type!", &IC);
3942   // Check that the operands are the right type
3943   Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3944         "Invalid operand types for ICmp instruction", &IC);
3945   // Check that the predicate is valid.
3946   Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
3947 
3948   visitInstruction(IC);
3949 }
3950 
3951 void Verifier::visitFCmpInst(FCmpInst &FC) {
3952   // Check that the operands are the same type
3953   Type *Op0Ty = FC.getOperand(0)->getType();
3954   Type *Op1Ty = FC.getOperand(1)->getType();
3955   Check(Op0Ty == Op1Ty,
3956         "Both operands to FCmp instruction are not of the same type!", &FC);
3957   // Check that the operands are the right type
3958   Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
3959         &FC);
3960   // Check that the predicate is valid.
3961   Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
3962 
3963   visitInstruction(FC);
3964 }
3965 
3966 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3967   Check(ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3968         "Invalid extractelement operands!", &EI);
3969   visitInstruction(EI);
3970 }
3971 
3972 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3973   Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3974                                            IE.getOperand(2)),
3975         "Invalid insertelement operands!", &IE);
3976   visitInstruction(IE);
3977 }
3978 
3979 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3980   Check(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3981                                            SV.getShuffleMask()),
3982         "Invalid shufflevector operands!", &SV);
3983   visitInstruction(SV);
3984 }
3985 
3986 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3987   Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3988 
3989   Check(isa<PointerType>(TargetTy),
3990         "GEP base pointer is not a vector or a vector of pointers", &GEP);
3991   Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3992 
3993   if (auto *STy = dyn_cast<StructType>(GEP.getSourceElementType())) {
3994     SmallPtrSet<Type *, 4> Visited;
3995     Check(!STy->containsScalableVectorType(&Visited),
3996           "getelementptr cannot target structure that contains scalable vector"
3997           "type",
3998           &GEP);
3999   }
4000 
4001   SmallVector<Value *, 16> Idxs(GEP.indices());
4002   Check(
4003       all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
4004       "GEP indexes must be integers", &GEP);
4005   Type *ElTy =
4006       GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
4007   Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
4008 
4009   Check(GEP.getType()->isPtrOrPtrVectorTy() &&
4010             GEP.getResultElementType() == ElTy,
4011         "GEP is not of right type for indices!", &GEP, ElTy);
4012 
4013   if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
4014     // Additional checks for vector GEPs.
4015     ElementCount GEPWidth = GEPVTy->getElementCount();
4016     if (GEP.getPointerOperandType()->isVectorTy())
4017       Check(
4018           GEPWidth ==
4019               cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
4020           "Vector GEP result width doesn't match operand's", &GEP);
4021     for (Value *Idx : Idxs) {
4022       Type *IndexTy = Idx->getType();
4023       if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
4024         ElementCount IndexWidth = IndexVTy->getElementCount();
4025         Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
4026       }
4027       Check(IndexTy->isIntOrIntVectorTy(),
4028             "All GEP indices should be of integer type");
4029     }
4030   }
4031 
4032   if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
4033     Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
4034           "GEP address space doesn't match type", &GEP);
4035   }
4036 
4037   visitInstruction(GEP);
4038 }
4039 
4040 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
4041   return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
4042 }
4043 
4044 /// Verify !range and !absolute_symbol metadata. These have the same
4045 /// restrictions, except !absolute_symbol allows the full set.
4046 void Verifier::verifyRangeMetadata(const Value &I, const MDNode *Range,
4047                                    Type *Ty, bool IsAbsoluteSymbol) {
4048   unsigned NumOperands = Range->getNumOperands();
4049   Check(NumOperands % 2 == 0, "Unfinished range!", Range);
4050   unsigned NumRanges = NumOperands / 2;
4051   Check(NumRanges >= 1, "It should have at least one range!", Range);
4052 
4053   ConstantRange LastRange(1, true); // Dummy initial value
4054   for (unsigned i = 0; i < NumRanges; ++i) {
4055     ConstantInt *Low =
4056         mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
4057     Check(Low, "The lower limit must be an integer!", Low);
4058     ConstantInt *High =
4059         mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
4060     Check(High, "The upper limit must be an integer!", High);
4061     Check(High->getType() == Low->getType() &&
4062           High->getType() == Ty->getScalarType(),
4063           "Range types must match instruction type!", &I);
4064 
4065     APInt HighV = High->getValue();
4066     APInt LowV = Low->getValue();
4067 
4068     // ConstantRange asserts if the ranges are the same except for the min/max
4069     // value. Leave the cases it tolerates for the empty range error below.
4070     Check(LowV != HighV || LowV.isMaxValue() || LowV.isMinValue(),
4071           "The upper and lower limits cannot be the same value", &I);
4072 
4073     ConstantRange CurRange(LowV, HighV);
4074     Check(!CurRange.isEmptySet() && (IsAbsoluteSymbol || !CurRange.isFullSet()),
4075           "Range must not be empty!", Range);
4076     if (i != 0) {
4077       Check(CurRange.intersectWith(LastRange).isEmptySet(),
4078             "Intervals are overlapping", Range);
4079       Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
4080             Range);
4081       Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
4082             Range);
4083     }
4084     LastRange = ConstantRange(LowV, HighV);
4085   }
4086   if (NumRanges > 2) {
4087     APInt FirstLow =
4088         mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
4089     APInt FirstHigh =
4090         mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
4091     ConstantRange FirstRange(FirstLow, FirstHigh);
4092     Check(FirstRange.intersectWith(LastRange).isEmptySet(),
4093           "Intervals are overlapping", Range);
4094     Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
4095           Range);
4096   }
4097 }
4098 
4099 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
4100   assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
4101          "precondition violation");
4102   verifyRangeMetadata(I, Range, Ty, false);
4103 }
4104 
4105 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
4106   unsigned Size = DL.getTypeSizeInBits(Ty);
4107   Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
4108   Check(!(Size & (Size - 1)),
4109         "atomic memory access' operand must have a power-of-two size", Ty, I);
4110 }
4111 
4112 void Verifier::visitLoadInst(LoadInst &LI) {
4113   PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
4114   Check(PTy, "Load operand must be a pointer.", &LI);
4115   Type *ElTy = LI.getType();
4116   if (MaybeAlign A = LI.getAlign()) {
4117     Check(A->value() <= Value::MaximumAlignment,
4118           "huge alignment values are unsupported", &LI);
4119   }
4120   Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
4121   if (LI.isAtomic()) {
4122     Check(LI.getOrdering() != AtomicOrdering::Release &&
4123               LI.getOrdering() != AtomicOrdering::AcquireRelease,
4124           "Load cannot have Release ordering", &LI);
4125     Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4126           "atomic load operand must have integer, pointer, or floating point "
4127           "type!",
4128           ElTy, &LI);
4129     checkAtomicMemAccessSize(ElTy, &LI);
4130   } else {
4131     Check(LI.getSyncScopeID() == SyncScope::System,
4132           "Non-atomic load cannot have SynchronizationScope specified", &LI);
4133   }
4134 
4135   visitInstruction(LI);
4136 }
4137 
4138 void Verifier::visitStoreInst(StoreInst &SI) {
4139   PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
4140   Check(PTy, "Store operand must be a pointer.", &SI);
4141   Type *ElTy = SI.getOperand(0)->getType();
4142   if (MaybeAlign A = SI.getAlign()) {
4143     Check(A->value() <= Value::MaximumAlignment,
4144           "huge alignment values are unsupported", &SI);
4145   }
4146   Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
4147   if (SI.isAtomic()) {
4148     Check(SI.getOrdering() != AtomicOrdering::Acquire &&
4149               SI.getOrdering() != AtomicOrdering::AcquireRelease,
4150           "Store cannot have Acquire ordering", &SI);
4151     Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
4152           "atomic store operand must have integer, pointer, or floating point "
4153           "type!",
4154           ElTy, &SI);
4155     checkAtomicMemAccessSize(ElTy, &SI);
4156   } else {
4157     Check(SI.getSyncScopeID() == SyncScope::System,
4158           "Non-atomic store cannot have SynchronizationScope specified", &SI);
4159   }
4160   visitInstruction(SI);
4161 }
4162 
4163 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
4164 void Verifier::verifySwiftErrorCall(CallBase &Call,
4165                                     const Value *SwiftErrorVal) {
4166   for (const auto &I : llvm::enumerate(Call.args())) {
4167     if (I.value() == SwiftErrorVal) {
4168       Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
4169             "swifterror value when used in a callsite should be marked "
4170             "with swifterror attribute",
4171             SwiftErrorVal, Call);
4172     }
4173   }
4174 }
4175 
4176 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
4177   // Check that swifterror value is only used by loads, stores, or as
4178   // a swifterror argument.
4179   for (const User *U : SwiftErrorVal->users()) {
4180     Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
4181               isa<InvokeInst>(U),
4182           "swifterror value can only be loaded and stored from, or "
4183           "as a swifterror argument!",
4184           SwiftErrorVal, U);
4185     // If it is used by a store, check it is the second operand.
4186     if (auto StoreI = dyn_cast<StoreInst>(U))
4187       Check(StoreI->getOperand(1) == SwiftErrorVal,
4188             "swifterror value should be the second operand when used "
4189             "by stores",
4190             SwiftErrorVal, U);
4191     if (auto *Call = dyn_cast<CallBase>(U))
4192       verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
4193   }
4194 }
4195 
4196 void Verifier::visitAllocaInst(AllocaInst &AI) {
4197   SmallPtrSet<Type*, 4> Visited;
4198   Check(AI.getAllocatedType()->isSized(&Visited),
4199         "Cannot allocate unsized type", &AI);
4200   Check(AI.getArraySize()->getType()->isIntegerTy(),
4201         "Alloca array size must have integer type", &AI);
4202   if (MaybeAlign A = AI.getAlign()) {
4203     Check(A->value() <= Value::MaximumAlignment,
4204           "huge alignment values are unsupported", &AI);
4205   }
4206 
4207   if (AI.isSwiftError()) {
4208     Check(AI.getAllocatedType()->isPointerTy(),
4209           "swifterror alloca must have pointer type", &AI);
4210     Check(!AI.isArrayAllocation(),
4211           "swifterror alloca must not be array allocation", &AI);
4212     verifySwiftErrorValue(&AI);
4213   }
4214 
4215   visitInstruction(AI);
4216 }
4217 
4218 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
4219   Type *ElTy = CXI.getOperand(1)->getType();
4220   Check(ElTy->isIntOrPtrTy(),
4221         "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
4222   checkAtomicMemAccessSize(ElTy, &CXI);
4223   visitInstruction(CXI);
4224 }
4225 
4226 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
4227   Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
4228         "atomicrmw instructions cannot be unordered.", &RMWI);
4229   auto Op = RMWI.getOperation();
4230   Type *ElTy = RMWI.getOperand(1)->getType();
4231   if (Op == AtomicRMWInst::Xchg) {
4232     Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
4233               ElTy->isPointerTy(),
4234           "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4235               " operand must have integer or floating point type!",
4236           &RMWI, ElTy);
4237   } else if (AtomicRMWInst::isFPOperation(Op)) {
4238     Check(ElTy->isFloatingPointTy(),
4239           "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4240               " operand must have floating point type!",
4241           &RMWI, ElTy);
4242   } else {
4243     Check(ElTy->isIntegerTy(),
4244           "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
4245               " operand must have integer type!",
4246           &RMWI, ElTy);
4247   }
4248   checkAtomicMemAccessSize(ElTy, &RMWI);
4249   Check(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
4250         "Invalid binary operation!", &RMWI);
4251   visitInstruction(RMWI);
4252 }
4253 
4254 void Verifier::visitFenceInst(FenceInst &FI) {
4255   const AtomicOrdering Ordering = FI.getOrdering();
4256   Check(Ordering == AtomicOrdering::Acquire ||
4257             Ordering == AtomicOrdering::Release ||
4258             Ordering == AtomicOrdering::AcquireRelease ||
4259             Ordering == AtomicOrdering::SequentiallyConsistent,
4260         "fence instructions may only have acquire, release, acq_rel, or "
4261         "seq_cst ordering.",
4262         &FI);
4263   visitInstruction(FI);
4264 }
4265 
4266 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
4267   Check(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
4268                                          EVI.getIndices()) == EVI.getType(),
4269         "Invalid ExtractValueInst operands!", &EVI);
4270 
4271   visitInstruction(EVI);
4272 }
4273 
4274 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
4275   Check(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
4276                                          IVI.getIndices()) ==
4277             IVI.getOperand(1)->getType(),
4278         "Invalid InsertValueInst operands!", &IVI);
4279 
4280   visitInstruction(IVI);
4281 }
4282 
4283 static Value *getParentPad(Value *EHPad) {
4284   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
4285     return FPI->getParentPad();
4286 
4287   return cast<CatchSwitchInst>(EHPad)->getParentPad();
4288 }
4289 
4290 void Verifier::visitEHPadPredecessors(Instruction &I) {
4291   assert(I.isEHPad());
4292 
4293   BasicBlock *BB = I.getParent();
4294   Function *F = BB->getParent();
4295 
4296   Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
4297 
4298   if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
4299     // The landingpad instruction defines its parent as a landing pad block. The
4300     // landing pad block may be branched to only by the unwind edge of an
4301     // invoke.
4302     for (BasicBlock *PredBB : predecessors(BB)) {
4303       const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
4304       Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
4305             "Block containing LandingPadInst must be jumped to "
4306             "only by the unwind edge of an invoke.",
4307             LPI);
4308     }
4309     return;
4310   }
4311   if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
4312     if (!pred_empty(BB))
4313       Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
4314             "Block containg CatchPadInst must be jumped to "
4315             "only by its catchswitch.",
4316             CPI);
4317     Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
4318           "Catchswitch cannot unwind to one of its catchpads",
4319           CPI->getCatchSwitch(), CPI);
4320     return;
4321   }
4322 
4323   // Verify that each pred has a legal terminator with a legal to/from EH
4324   // pad relationship.
4325   Instruction *ToPad = &I;
4326   Value *ToPadParent = getParentPad(ToPad);
4327   for (BasicBlock *PredBB : predecessors(BB)) {
4328     Instruction *TI = PredBB->getTerminator();
4329     Value *FromPad;
4330     if (auto *II = dyn_cast<InvokeInst>(TI)) {
4331       Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
4332             "EH pad must be jumped to via an unwind edge", ToPad, II);
4333       if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
4334         FromPad = Bundle->Inputs[0];
4335       else
4336         FromPad = ConstantTokenNone::get(II->getContext());
4337     } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
4338       FromPad = CRI->getOperand(0);
4339       Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
4340     } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
4341       FromPad = CSI;
4342     } else {
4343       Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
4344     }
4345 
4346     // The edge may exit from zero or more nested pads.
4347     SmallSet<Value *, 8> Seen;
4348     for (;; FromPad = getParentPad(FromPad)) {
4349       Check(FromPad != ToPad,
4350             "EH pad cannot handle exceptions raised within it", FromPad, TI);
4351       if (FromPad == ToPadParent) {
4352         // This is a legal unwind edge.
4353         break;
4354       }
4355       Check(!isa<ConstantTokenNone>(FromPad),
4356             "A single unwind edge may only enter one EH pad", TI);
4357       Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
4358             FromPad);
4359 
4360       // This will be diagnosed on the corresponding instruction already. We
4361       // need the extra check here to make sure getParentPad() works.
4362       Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
4363             "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
4364     }
4365   }
4366 }
4367 
4368 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
4369   // The landingpad instruction is ill-formed if it doesn't have any clauses and
4370   // isn't a cleanup.
4371   Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
4372         "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
4373 
4374   visitEHPadPredecessors(LPI);
4375 
4376   if (!LandingPadResultTy)
4377     LandingPadResultTy = LPI.getType();
4378   else
4379     Check(LandingPadResultTy == LPI.getType(),
4380           "The landingpad instruction should have a consistent result type "
4381           "inside a function.",
4382           &LPI);
4383 
4384   Function *F = LPI.getParent()->getParent();
4385   Check(F->hasPersonalityFn(),
4386         "LandingPadInst needs to be in a function with a personality.", &LPI);
4387 
4388   // The landingpad instruction must be the first non-PHI instruction in the
4389   // block.
4390   Check(LPI.getParent()->getLandingPadInst() == &LPI,
4391         "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
4392 
4393   for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
4394     Constant *Clause = LPI.getClause(i);
4395     if (LPI.isCatch(i)) {
4396       Check(isa<PointerType>(Clause->getType()),
4397             "Catch operand does not have pointer type!", &LPI);
4398     } else {
4399       Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
4400       Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
4401             "Filter operand is not an array of constants!", &LPI);
4402     }
4403   }
4404 
4405   visitInstruction(LPI);
4406 }
4407 
4408 void Verifier::visitResumeInst(ResumeInst &RI) {
4409   Check(RI.getFunction()->hasPersonalityFn(),
4410         "ResumeInst needs to be in a function with a personality.", &RI);
4411 
4412   if (!LandingPadResultTy)
4413     LandingPadResultTy = RI.getValue()->getType();
4414   else
4415     Check(LandingPadResultTy == RI.getValue()->getType(),
4416           "The resume instruction should have a consistent result type "
4417           "inside a function.",
4418           &RI);
4419 
4420   visitTerminator(RI);
4421 }
4422 
4423 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
4424   BasicBlock *BB = CPI.getParent();
4425 
4426   Function *F = BB->getParent();
4427   Check(F->hasPersonalityFn(),
4428         "CatchPadInst needs to be in a function with a personality.", &CPI);
4429 
4430   Check(isa<CatchSwitchInst>(CPI.getParentPad()),
4431         "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
4432         CPI.getParentPad());
4433 
4434   // The catchpad instruction must be the first non-PHI instruction in the
4435   // block.
4436   Check(BB->getFirstNonPHI() == &CPI,
4437         "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
4438 
4439   visitEHPadPredecessors(CPI);
4440   visitFuncletPadInst(CPI);
4441 }
4442 
4443 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
4444   Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
4445         "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
4446         CatchReturn.getOperand(0));
4447 
4448   visitTerminator(CatchReturn);
4449 }
4450 
4451 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
4452   BasicBlock *BB = CPI.getParent();
4453 
4454   Function *F = BB->getParent();
4455   Check(F->hasPersonalityFn(),
4456         "CleanupPadInst needs to be in a function with a personality.", &CPI);
4457 
4458   // The cleanuppad instruction must be the first non-PHI instruction in the
4459   // block.
4460   Check(BB->getFirstNonPHI() == &CPI,
4461         "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
4462 
4463   auto *ParentPad = CPI.getParentPad();
4464   Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4465         "CleanupPadInst has an invalid parent.", &CPI);
4466 
4467   visitEHPadPredecessors(CPI);
4468   visitFuncletPadInst(CPI);
4469 }
4470 
4471 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
4472   User *FirstUser = nullptr;
4473   Value *FirstUnwindPad = nullptr;
4474   SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
4475   SmallSet<FuncletPadInst *, 8> Seen;
4476 
4477   while (!Worklist.empty()) {
4478     FuncletPadInst *CurrentPad = Worklist.pop_back_val();
4479     Check(Seen.insert(CurrentPad).second,
4480           "FuncletPadInst must not be nested within itself", CurrentPad);
4481     Value *UnresolvedAncestorPad = nullptr;
4482     for (User *U : CurrentPad->users()) {
4483       BasicBlock *UnwindDest;
4484       if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
4485         UnwindDest = CRI->getUnwindDest();
4486       } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
4487         // We allow catchswitch unwind to caller to nest
4488         // within an outer pad that unwinds somewhere else,
4489         // because catchswitch doesn't have a nounwind variant.
4490         // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
4491         if (CSI->unwindsToCaller())
4492           continue;
4493         UnwindDest = CSI->getUnwindDest();
4494       } else if (auto *II = dyn_cast<InvokeInst>(U)) {
4495         UnwindDest = II->getUnwindDest();
4496       } else if (isa<CallInst>(U)) {
4497         // Calls which don't unwind may be found inside funclet
4498         // pads that unwind somewhere else.  We don't *require*
4499         // such calls to be annotated nounwind.
4500         continue;
4501       } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
4502         // The unwind dest for a cleanup can only be found by
4503         // recursive search.  Add it to the worklist, and we'll
4504         // search for its first use that determines where it unwinds.
4505         Worklist.push_back(CPI);
4506         continue;
4507       } else {
4508         Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
4509         continue;
4510       }
4511 
4512       Value *UnwindPad;
4513       bool ExitsFPI;
4514       if (UnwindDest) {
4515         UnwindPad = UnwindDest->getFirstNonPHI();
4516         if (!cast<Instruction>(UnwindPad)->isEHPad())
4517           continue;
4518         Value *UnwindParent = getParentPad(UnwindPad);
4519         // Ignore unwind edges that don't exit CurrentPad.
4520         if (UnwindParent == CurrentPad)
4521           continue;
4522         // Determine whether the original funclet pad is exited,
4523         // and if we are scanning nested pads determine how many
4524         // of them are exited so we can stop searching their
4525         // children.
4526         Value *ExitedPad = CurrentPad;
4527         ExitsFPI = false;
4528         do {
4529           if (ExitedPad == &FPI) {
4530             ExitsFPI = true;
4531             // Now we can resolve any ancestors of CurrentPad up to
4532             // FPI, but not including FPI since we need to make sure
4533             // to check all direct users of FPI for consistency.
4534             UnresolvedAncestorPad = &FPI;
4535             break;
4536           }
4537           Value *ExitedParent = getParentPad(ExitedPad);
4538           if (ExitedParent == UnwindParent) {
4539             // ExitedPad is the ancestor-most pad which this unwind
4540             // edge exits, so we can resolve up to it, meaning that
4541             // ExitedParent is the first ancestor still unresolved.
4542             UnresolvedAncestorPad = ExitedParent;
4543             break;
4544           }
4545           ExitedPad = ExitedParent;
4546         } while (!isa<ConstantTokenNone>(ExitedPad));
4547       } else {
4548         // Unwinding to caller exits all pads.
4549         UnwindPad = ConstantTokenNone::get(FPI.getContext());
4550         ExitsFPI = true;
4551         UnresolvedAncestorPad = &FPI;
4552       }
4553 
4554       if (ExitsFPI) {
4555         // This unwind edge exits FPI.  Make sure it agrees with other
4556         // such edges.
4557         if (FirstUser) {
4558           Check(UnwindPad == FirstUnwindPad,
4559                 "Unwind edges out of a funclet "
4560                 "pad must have the same unwind "
4561                 "dest",
4562                 &FPI, U, FirstUser);
4563         } else {
4564           FirstUser = U;
4565           FirstUnwindPad = UnwindPad;
4566           // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4567           if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4568               getParentPad(UnwindPad) == getParentPad(&FPI))
4569             SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4570         }
4571       }
4572       // Make sure we visit all uses of FPI, but for nested pads stop as
4573       // soon as we know where they unwind to.
4574       if (CurrentPad != &FPI)
4575         break;
4576     }
4577     if (UnresolvedAncestorPad) {
4578       if (CurrentPad == UnresolvedAncestorPad) {
4579         // When CurrentPad is FPI itself, we don't mark it as resolved even if
4580         // we've found an unwind edge that exits it, because we need to verify
4581         // all direct uses of FPI.
4582         assert(CurrentPad == &FPI);
4583         continue;
4584       }
4585       // Pop off the worklist any nested pads that we've found an unwind
4586       // destination for.  The pads on the worklist are the uncles,
4587       // great-uncles, etc. of CurrentPad.  We've found an unwind destination
4588       // for all ancestors of CurrentPad up to but not including
4589       // UnresolvedAncestorPad.
4590       Value *ResolvedPad = CurrentPad;
4591       while (!Worklist.empty()) {
4592         Value *UnclePad = Worklist.back();
4593         Value *AncestorPad = getParentPad(UnclePad);
4594         // Walk ResolvedPad up the ancestor list until we either find the
4595         // uncle's parent or the last resolved ancestor.
4596         while (ResolvedPad != AncestorPad) {
4597           Value *ResolvedParent = getParentPad(ResolvedPad);
4598           if (ResolvedParent == UnresolvedAncestorPad) {
4599             break;
4600           }
4601           ResolvedPad = ResolvedParent;
4602         }
4603         // If the resolved ancestor search didn't find the uncle's parent,
4604         // then the uncle is not yet resolved.
4605         if (ResolvedPad != AncestorPad)
4606           break;
4607         // This uncle is resolved, so pop it from the worklist.
4608         Worklist.pop_back();
4609       }
4610     }
4611   }
4612 
4613   if (FirstUnwindPad) {
4614     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4615       BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4616       Value *SwitchUnwindPad;
4617       if (SwitchUnwindDest)
4618         SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4619       else
4620         SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4621       Check(SwitchUnwindPad == FirstUnwindPad,
4622             "Unwind edges out of a catch must have the same unwind dest as "
4623             "the parent catchswitch",
4624             &FPI, FirstUser, CatchSwitch);
4625     }
4626   }
4627 
4628   visitInstruction(FPI);
4629 }
4630 
4631 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4632   BasicBlock *BB = CatchSwitch.getParent();
4633 
4634   Function *F = BB->getParent();
4635   Check(F->hasPersonalityFn(),
4636         "CatchSwitchInst needs to be in a function with a personality.",
4637         &CatchSwitch);
4638 
4639   // The catchswitch instruction must be the first non-PHI instruction in the
4640   // block.
4641   Check(BB->getFirstNonPHI() == &CatchSwitch,
4642         "CatchSwitchInst not the first non-PHI instruction in the block.",
4643         &CatchSwitch);
4644 
4645   auto *ParentPad = CatchSwitch.getParentPad();
4646   Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4647         "CatchSwitchInst has an invalid parent.", ParentPad);
4648 
4649   if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4650     Instruction *I = UnwindDest->getFirstNonPHI();
4651     Check(I->isEHPad() && !isa<LandingPadInst>(I),
4652           "CatchSwitchInst must unwind to an EH block which is not a "
4653           "landingpad.",
4654           &CatchSwitch);
4655 
4656     // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4657     if (getParentPad(I) == ParentPad)
4658       SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4659   }
4660 
4661   Check(CatchSwitch.getNumHandlers() != 0,
4662         "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4663 
4664   for (BasicBlock *Handler : CatchSwitch.handlers()) {
4665     Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4666           "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4667   }
4668 
4669   visitEHPadPredecessors(CatchSwitch);
4670   visitTerminator(CatchSwitch);
4671 }
4672 
4673 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4674   Check(isa<CleanupPadInst>(CRI.getOperand(0)),
4675         "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4676         CRI.getOperand(0));
4677 
4678   if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4679     Instruction *I = UnwindDest->getFirstNonPHI();
4680     Check(I->isEHPad() && !isa<LandingPadInst>(I),
4681           "CleanupReturnInst must unwind to an EH block which is not a "
4682           "landingpad.",
4683           &CRI);
4684   }
4685 
4686   visitTerminator(CRI);
4687 }
4688 
4689 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4690   Instruction *Op = cast<Instruction>(I.getOperand(i));
4691   // If the we have an invalid invoke, don't try to compute the dominance.
4692   // We already reject it in the invoke specific checks and the dominance
4693   // computation doesn't handle multiple edges.
4694   if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4695     if (II->getNormalDest() == II->getUnwindDest())
4696       return;
4697   }
4698 
4699   // Quick check whether the def has already been encountered in the same block.
4700   // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4701   // uses are defined to happen on the incoming edge, not at the instruction.
4702   //
4703   // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4704   // wrapping an SSA value, assert that we've already encountered it.  See
4705   // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4706   if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4707     return;
4708 
4709   const Use &U = I.getOperandUse(i);
4710   Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
4711 }
4712 
4713 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4714   Check(I.getType()->isPointerTy(),
4715         "dereferenceable, dereferenceable_or_null "
4716         "apply only to pointer types",
4717         &I);
4718   Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4719         "dereferenceable, dereferenceable_or_null apply only to load"
4720         " and inttoptr instructions, use attributes for calls or invokes",
4721         &I);
4722   Check(MD->getNumOperands() == 1,
4723         "dereferenceable, dereferenceable_or_null "
4724         "take one operand!",
4725         &I);
4726   ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4727   Check(CI && CI->getType()->isIntegerTy(64),
4728         "dereferenceable, "
4729         "dereferenceable_or_null metadata value must be an i64!",
4730         &I);
4731 }
4732 
4733 void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4734   Check(MD->getNumOperands() >= 2,
4735         "!prof annotations should have no less than 2 operands", MD);
4736 
4737   // Check first operand.
4738   Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4739   Check(isa<MDString>(MD->getOperand(0)),
4740         "expected string with name of the !prof annotation", MD);
4741   MDString *MDS = cast<MDString>(MD->getOperand(0));
4742   StringRef ProfName = MDS->getString();
4743 
4744   // Check consistency of !prof branch_weights metadata.
4745   if (ProfName.equals("branch_weights")) {
4746     if (isa<InvokeInst>(&I)) {
4747       Check(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
4748             "Wrong number of InvokeInst branch_weights operands", MD);
4749     } else {
4750       unsigned ExpectedNumOperands = 0;
4751       if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4752         ExpectedNumOperands = BI->getNumSuccessors();
4753       else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4754         ExpectedNumOperands = SI->getNumSuccessors();
4755       else if (isa<CallInst>(&I))
4756         ExpectedNumOperands = 1;
4757       else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4758         ExpectedNumOperands = IBI->getNumDestinations();
4759       else if (isa<SelectInst>(&I))
4760         ExpectedNumOperands = 2;
4761       else if (CallBrInst *CI = dyn_cast<CallBrInst>(&I))
4762         ExpectedNumOperands = CI->getNumSuccessors();
4763       else
4764         CheckFailed("!prof branch_weights are not allowed for this instruction",
4765                     MD);
4766 
4767       Check(MD->getNumOperands() == 1 + ExpectedNumOperands,
4768             "Wrong number of operands", MD);
4769     }
4770     for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4771       auto &MDO = MD->getOperand(i);
4772       Check(MDO, "second operand should not be null", MD);
4773       Check(mdconst::dyn_extract<ConstantInt>(MDO),
4774             "!prof brunch_weights operand is not a const int");
4775     }
4776   }
4777 }
4778 
4779 void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
4780   assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
4781   bool ExpectedInstTy =
4782       isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
4783   CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
4784           I, MD);
4785   // Iterate over the MetadataAsValue uses of the DIAssignID - these should
4786   // only be found as DbgAssignIntrinsic operands.
4787   if (auto *AsValue = MetadataAsValue::getIfExists(Context, MD)) {
4788     for (auto *User : AsValue->users()) {
4789       CheckDI(isa<DbgAssignIntrinsic>(User),
4790               "!DIAssignID should only be used by llvm.dbg.assign intrinsics",
4791               MD, User);
4792       // All of the dbg.assign intrinsics should be in the same function as I.
4793       if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(User))
4794         CheckDI(DAI->getFunction() == I.getFunction(),
4795                 "dbg.assign not in same function as inst", DAI, &I);
4796     }
4797   }
4798 }
4799 
4800 void Verifier::visitCallStackMetadata(MDNode *MD) {
4801   // Call stack metadata should consist of a list of at least 1 constant int
4802   // (representing a hash of the location).
4803   Check(MD->getNumOperands() >= 1,
4804         "call stack metadata should have at least 1 operand", MD);
4805 
4806   for (const auto &Op : MD->operands())
4807     Check(mdconst::dyn_extract_or_null<ConstantInt>(Op),
4808           "call stack metadata operand should be constant integer", Op);
4809 }
4810 
4811 void Verifier::visitMemProfMetadata(Instruction &I, MDNode *MD) {
4812   Check(isa<CallBase>(I), "!memprof metadata should only exist on calls", &I);
4813   Check(MD->getNumOperands() >= 1,
4814         "!memprof annotations should have at least 1 metadata operand "
4815         "(MemInfoBlock)",
4816         MD);
4817 
4818   // Check each MIB
4819   for (auto &MIBOp : MD->operands()) {
4820     MDNode *MIB = dyn_cast<MDNode>(MIBOp);
4821     // The first operand of an MIB should be the call stack metadata.
4822     // There rest of the operands should be MDString tags, and there should be
4823     // at least one.
4824     Check(MIB->getNumOperands() >= 2,
4825           "Each !memprof MemInfoBlock should have at least 2 operands", MIB);
4826 
4827     // Check call stack metadata (first operand).
4828     Check(MIB->getOperand(0) != nullptr,
4829           "!memprof MemInfoBlock first operand should not be null", MIB);
4830     Check(isa<MDNode>(MIB->getOperand(0)),
4831           "!memprof MemInfoBlock first operand should be an MDNode", MIB);
4832     MDNode *StackMD = dyn_cast<MDNode>(MIB->getOperand(0));
4833     visitCallStackMetadata(StackMD);
4834 
4835     // Check that remaining operands are MDString.
4836     Check(llvm::all_of(llvm::drop_begin(MIB->operands()),
4837                        [](const MDOperand &Op) { return isa<MDString>(Op); }),
4838           "Not all !memprof MemInfoBlock operands 1 to N are MDString", MIB);
4839   }
4840 }
4841 
4842 void Verifier::visitCallsiteMetadata(Instruction &I, MDNode *MD) {
4843   Check(isa<CallBase>(I), "!callsite metadata should only exist on calls", &I);
4844   // Verify the partial callstack annotated from memprof profiles. This callsite
4845   // is a part of a profiled allocation callstack.
4846   visitCallStackMetadata(MD);
4847 }
4848 
4849 void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
4850   Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
4851   Check(Annotation->getNumOperands() >= 1,
4852         "annotation must have at least one operand");
4853   for (const MDOperand &Op : Annotation->operands()) {
4854     bool TupleOfStrings =
4855         isa<MDTuple>(Op.get()) &&
4856         all_of(cast<MDTuple>(Op)->operands(), [](auto &Annotation) {
4857           return isa<MDString>(Annotation.get());
4858         });
4859     Check(isa<MDString>(Op.get()) || TupleOfStrings,
4860           "operands must be a string or a tuple of strings");
4861   }
4862 }
4863 
4864 void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
4865   unsigned NumOps = MD->getNumOperands();
4866   Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
4867         MD);
4868   Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
4869         "first scope operand must be self-referential or string", MD);
4870   if (NumOps == 3)
4871     Check(isa<MDString>(MD->getOperand(2)),
4872           "third scope operand must be string (if used)", MD);
4873 
4874   MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
4875   Check(Domain != nullptr, "second scope operand must be MDNode", MD);
4876 
4877   unsigned NumDomainOps = Domain->getNumOperands();
4878   Check(NumDomainOps >= 1 && NumDomainOps <= 2,
4879         "domain must have one or two operands", Domain);
4880   Check(Domain->getOperand(0).get() == Domain ||
4881             isa<MDString>(Domain->getOperand(0)),
4882         "first domain operand must be self-referential or string", Domain);
4883   if (NumDomainOps == 2)
4884     Check(isa<MDString>(Domain->getOperand(1)),
4885           "second domain operand must be string (if used)", Domain);
4886 }
4887 
4888 void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
4889   for (const MDOperand &Op : MD->operands()) {
4890     const MDNode *OpMD = dyn_cast<MDNode>(Op);
4891     Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
4892     visitAliasScopeMetadata(OpMD);
4893   }
4894 }
4895 
4896 void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
4897   auto IsValidAccessScope = [](const MDNode *MD) {
4898     return MD->getNumOperands() == 0 && MD->isDistinct();
4899   };
4900 
4901   // It must be either an access scope itself...
4902   if (IsValidAccessScope(MD))
4903     return;
4904 
4905   // ...or a list of access scopes.
4906   for (const MDOperand &Op : MD->operands()) {
4907     const MDNode *OpMD = dyn_cast<MDNode>(Op);
4908     Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
4909     Check(IsValidAccessScope(OpMD),
4910           "Access scope list contains invalid access scope", MD);
4911   }
4912 }
4913 
4914 /// verifyInstruction - Verify that an instruction is well formed.
4915 ///
4916 void Verifier::visitInstruction(Instruction &I) {
4917   BasicBlock *BB = I.getParent();
4918   Check(BB, "Instruction not embedded in basic block!", &I);
4919 
4920   if (!isa<PHINode>(I)) {   // Check that non-phi nodes are not self referential
4921     for (User *U : I.users()) {
4922       Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
4923             "Only PHI nodes may reference their own value!", &I);
4924     }
4925   }
4926 
4927   // Check that void typed values don't have names
4928   Check(!I.getType()->isVoidTy() || !I.hasName(),
4929         "Instruction has a name, but provides a void value!", &I);
4930 
4931   // Check that the return value of the instruction is either void or a legal
4932   // value type.
4933   Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4934         "Instruction returns a non-scalar type!", &I);
4935 
4936   // Check that the instruction doesn't produce metadata. Calls are already
4937   // checked against the callee type.
4938   Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4939         "Invalid use of metadata!", &I);
4940 
4941   // Check that all uses of the instruction, if they are instructions
4942   // themselves, actually have parent basic blocks.  If the use is not an
4943   // instruction, it is an error!
4944   for (Use &U : I.uses()) {
4945     if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4946       Check(Used->getParent() != nullptr,
4947             "Instruction referencing"
4948             " instruction not embedded in a basic block!",
4949             &I, Used);
4950     else {
4951       CheckFailed("Use of instruction is not an instruction!", U);
4952       return;
4953     }
4954   }
4955 
4956   // Get a pointer to the call base of the instruction if it is some form of
4957   // call.
4958   const CallBase *CBI = dyn_cast<CallBase>(&I);
4959 
4960   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4961     Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4962 
4963     // Check to make sure that only first-class-values are operands to
4964     // instructions.
4965     if (!I.getOperand(i)->getType()->isFirstClassType()) {
4966       Check(false, "Instruction operands must be first-class values!", &I);
4967     }
4968 
4969     if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4970       // This code checks whether the function is used as the operand of a
4971       // clang_arc_attachedcall operand bundle.
4972       auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
4973                                       int Idx) {
4974         return CBI && CBI->isOperandBundleOfType(
4975                           LLVMContext::OB_clang_arc_attachedcall, Idx);
4976       };
4977 
4978       // Check to make sure that the "address of" an intrinsic function is never
4979       // taken. Ignore cases where the address of the intrinsic function is used
4980       // as the argument of operand bundle "clang.arc.attachedcall" as those
4981       // cases are handled in verifyAttachedCallBundle.
4982       Check((!F->isIntrinsic() ||
4983              (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
4984              IsAttachedCallOperand(F, CBI, i)),
4985             "Cannot take the address of an intrinsic!", &I);
4986       Check(!F->isIntrinsic() || isa<CallInst>(I) ||
4987                 F->getIntrinsicID() == Intrinsic::donothing ||
4988                 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
4989                 F->getIntrinsicID() == Intrinsic::seh_try_end ||
4990                 F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
4991                 F->getIntrinsicID() == Intrinsic::seh_scope_end ||
4992                 F->getIntrinsicID() == Intrinsic::coro_resume ||
4993                 F->getIntrinsicID() == Intrinsic::coro_destroy ||
4994                 F->getIntrinsicID() ==
4995                     Intrinsic::experimental_patchpoint_void ||
4996                 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4997                 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4998                 F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
4999                 IsAttachedCallOperand(F, CBI, i),
5000             "Cannot invoke an intrinsic other than donothing, patchpoint, "
5001             "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
5002             &I);
5003       Check(F->getParent() == &M, "Referencing function in another module!", &I,
5004             &M, F, F->getParent());
5005     } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
5006       Check(OpBB->getParent() == BB->getParent(),
5007             "Referring to a basic block in another function!", &I);
5008     } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
5009       Check(OpArg->getParent() == BB->getParent(),
5010             "Referring to an argument in another function!", &I);
5011     } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
5012       Check(GV->getParent() == &M, "Referencing global in another module!", &I,
5013             &M, GV, GV->getParent());
5014     } else if (isa<Instruction>(I.getOperand(i))) {
5015       verifyDominatesUse(I, i);
5016     } else if (isa<InlineAsm>(I.getOperand(i))) {
5017       Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
5018             "Cannot take the address of an inline asm!", &I);
5019     } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
5020       if (CE->getType()->isPtrOrPtrVectorTy()) {
5021         // If we have a ConstantExpr pointer, we need to see if it came from an
5022         // illegal bitcast.
5023         visitConstantExprsRecursively(CE);
5024       }
5025     }
5026   }
5027 
5028   if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
5029     Check(I.getType()->isFPOrFPVectorTy(),
5030           "fpmath requires a floating point result!", &I);
5031     Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
5032     if (ConstantFP *CFP0 =
5033             mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
5034       const APFloat &Accuracy = CFP0->getValueAPF();
5035       Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
5036             "fpmath accuracy must have float type", &I);
5037       Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
5038             "fpmath accuracy not a positive number!", &I);
5039     } else {
5040       Check(false, "invalid fpmath accuracy!", &I);
5041     }
5042   }
5043 
5044   if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
5045     Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
5046           "Ranges are only for loads, calls and invokes!", &I);
5047     visitRangeMetadata(I, Range, I.getType());
5048   }
5049 
5050   if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
5051     Check(isa<LoadInst>(I) || isa<StoreInst>(I),
5052           "invariant.group metadata is only for loads and stores", &I);
5053   }
5054 
5055   if (MDNode *MD = I.getMetadata(LLVMContext::MD_nonnull)) {
5056     Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
5057           &I);
5058     Check(isa<LoadInst>(I),
5059           "nonnull applies only to load instructions, use attributes"
5060           " for calls or invokes",
5061           &I);
5062     Check(MD->getNumOperands() == 0, "nonnull metadata must be empty", &I);
5063   }
5064 
5065   if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
5066     visitDereferenceableMetadata(I, MD);
5067 
5068   if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
5069     visitDereferenceableMetadata(I, MD);
5070 
5071   if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
5072     TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
5073 
5074   if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
5075     visitAliasScopeListMetadata(MD);
5076   if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
5077     visitAliasScopeListMetadata(MD);
5078 
5079   if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
5080     visitAccessGroupMetadata(MD);
5081 
5082   if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
5083     Check(I.getType()->isPointerTy(), "align applies only to pointer types",
5084           &I);
5085     Check(isa<LoadInst>(I),
5086           "align applies only to load instructions, "
5087           "use attributes for calls or invokes",
5088           &I);
5089     Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
5090     ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
5091     Check(CI && CI->getType()->isIntegerTy(64),
5092           "align metadata value must be an i64!", &I);
5093     uint64_t Align = CI->getZExtValue();
5094     Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
5095           &I);
5096     Check(Align <= Value::MaximumAlignment,
5097           "alignment is larger that implementation defined limit", &I);
5098   }
5099 
5100   if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
5101     visitProfMetadata(I, MD);
5102 
5103   if (MDNode *MD = I.getMetadata(LLVMContext::MD_memprof))
5104     visitMemProfMetadata(I, MD);
5105 
5106   if (MDNode *MD = I.getMetadata(LLVMContext::MD_callsite))
5107     visitCallsiteMetadata(I, MD);
5108 
5109   if (MDNode *MD = I.getMetadata(LLVMContext::MD_DIAssignID))
5110     visitDIAssignIDMetadata(I, MD);
5111 
5112   if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
5113     visitAnnotationMetadata(Annotation);
5114 
5115   if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
5116     CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
5117     visitMDNode(*N, AreDebugLocsAllowed::Yes);
5118   }
5119 
5120   if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
5121     verifyFragmentExpression(*DII);
5122     verifyNotEntryValue(*DII);
5123   }
5124 
5125   SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
5126   I.getAllMetadata(MDs);
5127   for (auto Attachment : MDs) {
5128     unsigned Kind = Attachment.first;
5129     auto AllowLocs =
5130         (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
5131             ? AreDebugLocsAllowed::Yes
5132             : AreDebugLocsAllowed::No;
5133     visitMDNode(*Attachment.second, AllowLocs);
5134   }
5135 
5136   InstsInThisBlock.insert(&I);
5137 }
5138 
5139 /// Allow intrinsics to be verified in different ways.
5140 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
5141   Function *IF = Call.getCalledFunction();
5142   Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
5143         IF);
5144 
5145   // Verify that the intrinsic prototype lines up with what the .td files
5146   // describe.
5147   FunctionType *IFTy = IF->getFunctionType();
5148   bool IsVarArg = IFTy->isVarArg();
5149 
5150   SmallVector<Intrinsic::IITDescriptor, 8> Table;
5151   getIntrinsicInfoTableEntries(ID, Table);
5152   ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
5153 
5154   // Walk the descriptors to extract overloaded types.
5155   SmallVector<Type *, 4> ArgTys;
5156   Intrinsic::MatchIntrinsicTypesResult Res =
5157       Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
5158   Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
5159         "Intrinsic has incorrect return type!", IF);
5160   Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
5161         "Intrinsic has incorrect argument type!", IF);
5162 
5163   // Verify if the intrinsic call matches the vararg property.
5164   if (IsVarArg)
5165     Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
5166           "Intrinsic was not defined with variable arguments!", IF);
5167   else
5168     Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
5169           "Callsite was not defined with variable arguments!", IF);
5170 
5171   // All descriptors should be absorbed by now.
5172   Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
5173 
5174   // Now that we have the intrinsic ID and the actual argument types (and we
5175   // know they are legal for the intrinsic!) get the intrinsic name through the
5176   // usual means.  This allows us to verify the mangling of argument types into
5177   // the name.
5178   const std::string ExpectedName =
5179       Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
5180   Check(ExpectedName == IF->getName(),
5181         "Intrinsic name not mangled correctly for type arguments! "
5182         "Should be: " +
5183             ExpectedName,
5184         IF);
5185 
5186   // If the intrinsic takes MDNode arguments, verify that they are either global
5187   // or are local to *this* function.
5188   for (Value *V : Call.args()) {
5189     if (auto *MD = dyn_cast<MetadataAsValue>(V))
5190       visitMetadataAsValue(*MD, Call.getCaller());
5191     if (auto *Const = dyn_cast<Constant>(V))
5192       Check(!Const->getType()->isX86_AMXTy(),
5193             "const x86_amx is not allowed in argument!");
5194   }
5195 
5196   switch (ID) {
5197   default:
5198     break;
5199   case Intrinsic::assume: {
5200     for (auto &Elem : Call.bundle_op_infos()) {
5201       unsigned ArgCount = Elem.End - Elem.Begin;
5202       // Separate storage assumptions are special insofar as they're the only
5203       // operand bundles allowed on assumes that aren't parameter attributes.
5204       if (Elem.Tag->getKey() == "separate_storage") {
5205         Check(ArgCount == 2,
5206               "separate_storage assumptions should have 2 arguments", Call);
5207         Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy() &&
5208                   Call.getOperand(Elem.Begin + 1)->getType()->isPointerTy(),
5209               "arguments to separate_storage assumptions should be pointers",
5210               Call);
5211         return;
5212       }
5213       Check(Elem.Tag->getKey() == "ignore" ||
5214                 Attribute::isExistingAttribute(Elem.Tag->getKey()),
5215             "tags must be valid attribute names", Call);
5216       Attribute::AttrKind Kind =
5217           Attribute::getAttrKindFromName(Elem.Tag->getKey());
5218       if (Kind == Attribute::Alignment) {
5219         Check(ArgCount <= 3 && ArgCount >= 2,
5220               "alignment assumptions should have 2 or 3 arguments", Call);
5221         Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
5222               "first argument should be a pointer", Call);
5223         Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
5224               "second argument should be an integer", Call);
5225         if (ArgCount == 3)
5226           Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
5227                 "third argument should be an integer if present", Call);
5228         return;
5229       }
5230       Check(ArgCount <= 2, "too many arguments", Call);
5231       if (Kind == Attribute::None)
5232         break;
5233       if (Attribute::isIntAttrKind(Kind)) {
5234         Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
5235         Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
5236               "the second argument should be a constant integral value", Call);
5237       } else if (Attribute::canUseAsParamAttr(Kind)) {
5238         Check((ArgCount) == 1, "this attribute should have one argument", Call);
5239       } else if (Attribute::canUseAsFnAttr(Kind)) {
5240         Check((ArgCount) == 0, "this attribute has no argument", Call);
5241       }
5242     }
5243     break;
5244   }
5245   case Intrinsic::coro_id: {
5246     auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
5247     if (isa<ConstantPointerNull>(InfoArg))
5248       break;
5249     auto *GV = dyn_cast<GlobalVariable>(InfoArg);
5250     Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
5251           "info argument of llvm.coro.id must refer to an initialized "
5252           "constant");
5253     Constant *Init = GV->getInitializer();
5254     Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
5255           "info argument of llvm.coro.id must refer to either a struct or "
5256           "an array");
5257     break;
5258   }
5259   case Intrinsic::is_fpclass: {
5260     const ConstantInt *TestMask = cast<ConstantInt>(Call.getOperand(1));
5261     Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
5262           "unsupported bits for llvm.is.fpclass test mask");
5263     break;
5264   }
5265   case Intrinsic::fptrunc_round: {
5266     // Check the rounding mode
5267     Metadata *MD = nullptr;
5268     auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
5269     if (MAV)
5270       MD = MAV->getMetadata();
5271 
5272     Check(MD != nullptr, "missing rounding mode argument", Call);
5273 
5274     Check(isa<MDString>(MD),
5275           ("invalid value for llvm.fptrunc.round metadata operand"
5276            " (the operand should be a string)"),
5277           MD);
5278 
5279     std::optional<RoundingMode> RoundMode =
5280         convertStrToRoundingMode(cast<MDString>(MD)->getString());
5281     Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
5282           "unsupported rounding mode argument", Call);
5283     break;
5284   }
5285 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
5286 #include "llvm/IR/VPIntrinsics.def"
5287     visitVPIntrinsic(cast<VPIntrinsic>(Call));
5288     break;
5289 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC)                        \
5290   case Intrinsic::INTRINSIC:
5291 #include "llvm/IR/ConstrainedOps.def"
5292     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
5293     break;
5294   case Intrinsic::dbg_declare: // llvm.dbg.declare
5295     Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
5296           "invalid llvm.dbg.declare intrinsic call 1", Call);
5297     visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
5298     break;
5299   case Intrinsic::dbg_value: // llvm.dbg.value
5300     visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
5301     break;
5302   case Intrinsic::dbg_assign: // llvm.dbg.assign
5303     visitDbgIntrinsic("assign", cast<DbgVariableIntrinsic>(Call));
5304     break;
5305   case Intrinsic::dbg_label: // llvm.dbg.label
5306     visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
5307     break;
5308   case Intrinsic::memcpy:
5309   case Intrinsic::memcpy_inline:
5310   case Intrinsic::memmove:
5311   case Intrinsic::memset:
5312   case Intrinsic::memset_inline: {
5313     break;
5314   }
5315   case Intrinsic::memcpy_element_unordered_atomic:
5316   case Intrinsic::memmove_element_unordered_atomic:
5317   case Intrinsic::memset_element_unordered_atomic: {
5318     const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
5319 
5320     ConstantInt *ElementSizeCI =
5321         cast<ConstantInt>(AMI->getRawElementSizeInBytes());
5322     const APInt &ElementSizeVal = ElementSizeCI->getValue();
5323     Check(ElementSizeVal.isPowerOf2(),
5324           "element size of the element-wise atomic memory intrinsic "
5325           "must be a power of 2",
5326           Call);
5327 
5328     auto IsValidAlignment = [&](MaybeAlign Alignment) {
5329       return Alignment && ElementSizeVal.ule(Alignment->value());
5330     };
5331     Check(IsValidAlignment(AMI->getDestAlign()),
5332           "incorrect alignment of the destination argument", Call);
5333     if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
5334       Check(IsValidAlignment(AMT->getSourceAlign()),
5335             "incorrect alignment of the source argument", Call);
5336     }
5337     break;
5338   }
5339   case Intrinsic::call_preallocated_setup: {
5340     auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5341     Check(NumArgs != nullptr,
5342           "llvm.call.preallocated.setup argument must be a constant");
5343     bool FoundCall = false;
5344     for (User *U : Call.users()) {
5345       auto *UseCall = dyn_cast<CallBase>(U);
5346       Check(UseCall != nullptr,
5347             "Uses of llvm.call.preallocated.setup must be calls");
5348       const Function *Fn = UseCall->getCalledFunction();
5349       if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
5350         auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
5351         Check(AllocArgIndex != nullptr,
5352               "llvm.call.preallocated.alloc arg index must be a constant");
5353         auto AllocArgIndexInt = AllocArgIndex->getValue();
5354         Check(AllocArgIndexInt.sge(0) &&
5355                   AllocArgIndexInt.slt(NumArgs->getValue()),
5356               "llvm.call.preallocated.alloc arg index must be between 0 and "
5357               "corresponding "
5358               "llvm.call.preallocated.setup's argument count");
5359       } else if (Fn && Fn->getIntrinsicID() ==
5360                            Intrinsic::call_preallocated_teardown) {
5361         // nothing to do
5362       } else {
5363         Check(!FoundCall, "Can have at most one call corresponding to a "
5364                           "llvm.call.preallocated.setup");
5365         FoundCall = true;
5366         size_t NumPreallocatedArgs = 0;
5367         for (unsigned i = 0; i < UseCall->arg_size(); i++) {
5368           if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
5369             ++NumPreallocatedArgs;
5370           }
5371         }
5372         Check(NumPreallocatedArgs != 0,
5373               "cannot use preallocated intrinsics on a call without "
5374               "preallocated arguments");
5375         Check(NumArgs->equalsInt(NumPreallocatedArgs),
5376               "llvm.call.preallocated.setup arg size must be equal to number "
5377               "of preallocated arguments "
5378               "at call site",
5379               Call, *UseCall);
5380         // getOperandBundle() cannot be called if more than one of the operand
5381         // bundle exists. There is already a check elsewhere for this, so skip
5382         // here if we see more than one.
5383         if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
5384             1) {
5385           return;
5386         }
5387         auto PreallocatedBundle =
5388             UseCall->getOperandBundle(LLVMContext::OB_preallocated);
5389         Check(PreallocatedBundle,
5390               "Use of llvm.call.preallocated.setup outside intrinsics "
5391               "must be in \"preallocated\" operand bundle");
5392         Check(PreallocatedBundle->Inputs.front().get() == &Call,
5393               "preallocated bundle must have token from corresponding "
5394               "llvm.call.preallocated.setup");
5395       }
5396     }
5397     break;
5398   }
5399   case Intrinsic::call_preallocated_arg: {
5400     auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5401     Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5402                        Intrinsic::call_preallocated_setup,
5403           "llvm.call.preallocated.arg token argument must be a "
5404           "llvm.call.preallocated.setup");
5405     Check(Call.hasFnAttr(Attribute::Preallocated),
5406           "llvm.call.preallocated.arg must be called with a \"preallocated\" "
5407           "call site attribute");
5408     break;
5409   }
5410   case Intrinsic::call_preallocated_teardown: {
5411     auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
5412     Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
5413                        Intrinsic::call_preallocated_setup,
5414           "llvm.call.preallocated.teardown token argument must be a "
5415           "llvm.call.preallocated.setup");
5416     break;
5417   }
5418   case Intrinsic::gcroot:
5419   case Intrinsic::gcwrite:
5420   case Intrinsic::gcread:
5421     if (ID == Intrinsic::gcroot) {
5422       AllocaInst *AI =
5423           dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
5424       Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
5425       Check(isa<Constant>(Call.getArgOperand(1)),
5426             "llvm.gcroot parameter #2 must be a constant.", Call);
5427       if (!AI->getAllocatedType()->isPointerTy()) {
5428         Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
5429               "llvm.gcroot parameter #1 must either be a pointer alloca, "
5430               "or argument #2 must be a non-null constant.",
5431               Call);
5432       }
5433     }
5434 
5435     Check(Call.getParent()->getParent()->hasGC(),
5436           "Enclosing function does not use GC.", Call);
5437     break;
5438   case Intrinsic::init_trampoline:
5439     Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
5440           "llvm.init_trampoline parameter #2 must resolve to a function.",
5441           Call);
5442     break;
5443   case Intrinsic::prefetch:
5444     Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
5445           "rw argument to llvm.prefetch must be 0-1", Call);
5446     Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
5447           "locality argument to llvm.prefetch must be 0-4", Call);
5448     Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
5449           "cache type argument to llvm.prefetch must be 0-1", Call);
5450     break;
5451   case Intrinsic::stackprotector:
5452     Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
5453           "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
5454     break;
5455   case Intrinsic::localescape: {
5456     BasicBlock *BB = Call.getParent();
5457     Check(BB->isEntryBlock(), "llvm.localescape used outside of entry block",
5458           Call);
5459     Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
5460           Call);
5461     for (Value *Arg : Call.args()) {
5462       if (isa<ConstantPointerNull>(Arg))
5463         continue; // Null values are allowed as placeholders.
5464       auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
5465       Check(AI && AI->isStaticAlloca(),
5466             "llvm.localescape only accepts static allocas", Call);
5467     }
5468     FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
5469     SawFrameEscape = true;
5470     break;
5471   }
5472   case Intrinsic::localrecover: {
5473     Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
5474     Function *Fn = dyn_cast<Function>(FnArg);
5475     Check(Fn && !Fn->isDeclaration(),
5476           "llvm.localrecover first "
5477           "argument must be function defined in this module",
5478           Call);
5479     auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
5480     auto &Entry = FrameEscapeInfo[Fn];
5481     Entry.second = unsigned(
5482         std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
5483     break;
5484   }
5485 
5486   case Intrinsic::experimental_gc_statepoint:
5487     if (auto *CI = dyn_cast<CallInst>(&Call))
5488       Check(!CI->isInlineAsm(),
5489             "gc.statepoint support for inline assembly unimplemented", CI);
5490     Check(Call.getParent()->getParent()->hasGC(),
5491           "Enclosing function does not use GC.", Call);
5492 
5493     verifyStatepoint(Call);
5494     break;
5495   case Intrinsic::experimental_gc_result: {
5496     Check(Call.getParent()->getParent()->hasGC(),
5497           "Enclosing function does not use GC.", Call);
5498 
5499     auto *Statepoint = Call.getArgOperand(0);
5500     if (isa<UndefValue>(Statepoint))
5501       break;
5502 
5503     // Are we tied to a statepoint properly?
5504     const auto *StatepointCall = dyn_cast<CallBase>(Statepoint);
5505     const Function *StatepointFn =
5506         StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
5507     Check(StatepointFn && StatepointFn->isDeclaration() &&
5508               StatepointFn->getIntrinsicID() ==
5509                   Intrinsic::experimental_gc_statepoint,
5510           "gc.result operand #1 must be from a statepoint", Call,
5511           Call.getArgOperand(0));
5512 
5513     // Check that result type matches wrapped callee.
5514     auto *TargetFuncType =
5515         cast<FunctionType>(StatepointCall->getParamElementType(2));
5516     Check(Call.getType() == TargetFuncType->getReturnType(),
5517           "gc.result result type does not match wrapped callee", Call);
5518     break;
5519   }
5520   case Intrinsic::experimental_gc_relocate: {
5521     Check(Call.arg_size() == 3, "wrong number of arguments", Call);
5522 
5523     Check(isa<PointerType>(Call.getType()->getScalarType()),
5524           "gc.relocate must return a pointer or a vector of pointers", Call);
5525 
5526     // Check that this relocate is correctly tied to the statepoint
5527 
5528     // This is case for relocate on the unwinding path of an invoke statepoint
5529     if (LandingPadInst *LandingPad =
5530             dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
5531 
5532       const BasicBlock *InvokeBB =
5533           LandingPad->getParent()->getUniquePredecessor();
5534 
5535       // Landingpad relocates should have only one predecessor with invoke
5536       // statepoint terminator
5537       Check(InvokeBB, "safepoints should have unique landingpads",
5538             LandingPad->getParent());
5539       Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
5540             InvokeBB);
5541       Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
5542             "gc relocate should be linked to a statepoint", InvokeBB);
5543     } else {
5544       // In all other cases relocate should be tied to the statepoint directly.
5545       // This covers relocates on a normal return path of invoke statepoint and
5546       // relocates of a call statepoint.
5547       auto *Token = Call.getArgOperand(0);
5548       Check(isa<GCStatepointInst>(Token) || isa<UndefValue>(Token),
5549             "gc relocate is incorrectly tied to the statepoint", Call, Token);
5550     }
5551 
5552     // Verify rest of the relocate arguments.
5553     const Value &StatepointCall = *cast<GCRelocateInst>(Call).getStatepoint();
5554 
5555     // Both the base and derived must be piped through the safepoint.
5556     Value *Base = Call.getArgOperand(1);
5557     Check(isa<ConstantInt>(Base),
5558           "gc.relocate operand #2 must be integer offset", Call);
5559 
5560     Value *Derived = Call.getArgOperand(2);
5561     Check(isa<ConstantInt>(Derived),
5562           "gc.relocate operand #3 must be integer offset", Call);
5563 
5564     const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
5565     const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
5566 
5567     // Check the bounds
5568     if (isa<UndefValue>(StatepointCall))
5569       break;
5570     if (auto Opt = cast<GCStatepointInst>(StatepointCall)
5571                        .getOperandBundle(LLVMContext::OB_gc_live)) {
5572       Check(BaseIndex < Opt->Inputs.size(),
5573             "gc.relocate: statepoint base index out of bounds", Call);
5574       Check(DerivedIndex < Opt->Inputs.size(),
5575             "gc.relocate: statepoint derived index out of bounds", Call);
5576     }
5577 
5578     // Relocated value must be either a pointer type or vector-of-pointer type,
5579     // but gc_relocate does not need to return the same pointer type as the
5580     // relocated pointer. It can be casted to the correct type later if it's
5581     // desired. However, they must have the same address space and 'vectorness'
5582     GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
5583     auto *ResultType = Call.getType();
5584     auto *DerivedType = Relocate.getDerivedPtr()->getType();
5585     auto *BaseType = Relocate.getBasePtr()->getType();
5586 
5587     Check(BaseType->isPtrOrPtrVectorTy(),
5588           "gc.relocate: relocated value must be a pointer", Call);
5589     Check(DerivedType->isPtrOrPtrVectorTy(),
5590           "gc.relocate: relocated value must be a pointer", Call);
5591 
5592     Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
5593           "gc.relocate: vector relocates to vector and pointer to pointer",
5594           Call);
5595     Check(
5596         ResultType->getPointerAddressSpace() ==
5597             DerivedType->getPointerAddressSpace(),
5598         "gc.relocate: relocating a pointer shouldn't change its address space",
5599         Call);
5600 
5601     auto GC = llvm::getGCStrategy(Relocate.getFunction()->getGC());
5602     Check(GC, "gc.relocate: calling function must have GCStrategy",
5603           Call.getFunction());
5604     if (GC) {
5605       auto isGCPtr = [&GC](Type *PTy) {
5606         return GC->isGCManagedPointer(PTy->getScalarType()).value_or(true);
5607       };
5608       Check(isGCPtr(ResultType), "gc.relocate: must return gc pointer", Call);
5609       Check(isGCPtr(BaseType),
5610             "gc.relocate: relocated value must be a gc pointer", Call);
5611       Check(isGCPtr(DerivedType),
5612             "gc.relocate: relocated value must be a gc pointer", Call);
5613     }
5614     break;
5615   }
5616   case Intrinsic::eh_exceptioncode:
5617   case Intrinsic::eh_exceptionpointer: {
5618     Check(isa<CatchPadInst>(Call.getArgOperand(0)),
5619           "eh.exceptionpointer argument must be a catchpad", Call);
5620     break;
5621   }
5622   case Intrinsic::get_active_lane_mask: {
5623     Check(Call.getType()->isVectorTy(),
5624           "get_active_lane_mask: must return a "
5625           "vector",
5626           Call);
5627     auto *ElemTy = Call.getType()->getScalarType();
5628     Check(ElemTy->isIntegerTy(1),
5629           "get_active_lane_mask: element type is not "
5630           "i1",
5631           Call);
5632     break;
5633   }
5634   case Intrinsic::experimental_get_vector_length: {
5635     ConstantInt *VF = cast<ConstantInt>(Call.getArgOperand(1));
5636     Check(!VF->isNegative() && !VF->isZero(),
5637           "get_vector_length: VF must be positive", Call);
5638     break;
5639   }
5640   case Intrinsic::masked_load: {
5641     Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
5642           Call);
5643 
5644     ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
5645     Value *Mask = Call.getArgOperand(2);
5646     Value *PassThru = Call.getArgOperand(3);
5647     Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
5648           Call);
5649     Check(Alignment->getValue().isPowerOf2(),
5650           "masked_load: alignment must be a power of 2", Call);
5651     Check(PassThru->getType() == Call.getType(),
5652           "masked_load: pass through and return type must match", Call);
5653     Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5654               cast<VectorType>(Call.getType())->getElementCount(),
5655           "masked_load: vector mask must be same length as return", Call);
5656     break;
5657   }
5658   case Intrinsic::masked_store: {
5659     Value *Val = Call.getArgOperand(0);
5660     ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
5661     Value *Mask = Call.getArgOperand(3);
5662     Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
5663           Call);
5664     Check(Alignment->getValue().isPowerOf2(),
5665           "masked_store: alignment must be a power of 2", Call);
5666     Check(cast<VectorType>(Mask->getType())->getElementCount() ==
5667               cast<VectorType>(Val->getType())->getElementCount(),
5668           "masked_store: vector mask must be same length as value", Call);
5669     break;
5670   }
5671 
5672   case Intrinsic::masked_gather: {
5673     const APInt &Alignment =
5674         cast<ConstantInt>(Call.getArgOperand(1))->getValue();
5675     Check(Alignment.isZero() || Alignment.isPowerOf2(),
5676           "masked_gather: alignment must be 0 or a power of 2", Call);
5677     break;
5678   }
5679   case Intrinsic::masked_scatter: {
5680     const APInt &Alignment =
5681         cast<ConstantInt>(Call.getArgOperand(2))->getValue();
5682     Check(Alignment.isZero() || Alignment.isPowerOf2(),
5683           "masked_scatter: alignment must be 0 or a power of 2", Call);
5684     break;
5685   }
5686 
5687   case Intrinsic::experimental_guard: {
5688     Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
5689     Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5690           "experimental_guard must have exactly one "
5691           "\"deopt\" operand bundle");
5692     break;
5693   }
5694 
5695   case Intrinsic::experimental_deoptimize: {
5696     Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
5697           Call);
5698     Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
5699           "experimental_deoptimize must have exactly one "
5700           "\"deopt\" operand bundle");
5701     Check(Call.getType() == Call.getFunction()->getReturnType(),
5702           "experimental_deoptimize return type must match caller return type");
5703 
5704     if (isa<CallInst>(Call)) {
5705       auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
5706       Check(RI,
5707             "calls to experimental_deoptimize must be followed by a return");
5708 
5709       if (!Call.getType()->isVoidTy() && RI)
5710         Check(RI->getReturnValue() == &Call,
5711               "calls to experimental_deoptimize must be followed by a return "
5712               "of the value computed by experimental_deoptimize");
5713     }
5714 
5715     break;
5716   }
5717   case Intrinsic::vector_reduce_and:
5718   case Intrinsic::vector_reduce_or:
5719   case Intrinsic::vector_reduce_xor:
5720   case Intrinsic::vector_reduce_add:
5721   case Intrinsic::vector_reduce_mul:
5722   case Intrinsic::vector_reduce_smax:
5723   case Intrinsic::vector_reduce_smin:
5724   case Intrinsic::vector_reduce_umax:
5725   case Intrinsic::vector_reduce_umin: {
5726     Type *ArgTy = Call.getArgOperand(0)->getType();
5727     Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
5728           "Intrinsic has incorrect argument type!");
5729     break;
5730   }
5731   case Intrinsic::vector_reduce_fmax:
5732   case Intrinsic::vector_reduce_fmin: {
5733     Type *ArgTy = Call.getArgOperand(0)->getType();
5734     Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5735           "Intrinsic has incorrect argument type!");
5736     break;
5737   }
5738   case Intrinsic::vector_reduce_fadd:
5739   case Intrinsic::vector_reduce_fmul: {
5740     // Unlike the other reductions, the first argument is a start value. The
5741     // second argument is the vector to be reduced.
5742     Type *ArgTy = Call.getArgOperand(1)->getType();
5743     Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
5744           "Intrinsic has incorrect argument type!");
5745     break;
5746   }
5747   case Intrinsic::smul_fix:
5748   case Intrinsic::smul_fix_sat:
5749   case Intrinsic::umul_fix:
5750   case Intrinsic::umul_fix_sat:
5751   case Intrinsic::sdiv_fix:
5752   case Intrinsic::sdiv_fix_sat:
5753   case Intrinsic::udiv_fix:
5754   case Intrinsic::udiv_fix_sat: {
5755     Value *Op1 = Call.getArgOperand(0);
5756     Value *Op2 = Call.getArgOperand(1);
5757     Check(Op1->getType()->isIntOrIntVectorTy(),
5758           "first operand of [us][mul|div]_fix[_sat] must be an int type or "
5759           "vector of ints");
5760     Check(Op2->getType()->isIntOrIntVectorTy(),
5761           "second operand of [us][mul|div]_fix[_sat] must be an int type or "
5762           "vector of ints");
5763 
5764     auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
5765     Check(Op3->getType()->getBitWidth() <= 32,
5766           "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");
5767 
5768     if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
5769         ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
5770       Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
5771             "the scale of s[mul|div]_fix[_sat] must be less than the width of "
5772             "the operands");
5773     } else {
5774       Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
5775             "the scale of u[mul|div]_fix[_sat] must be less than or equal "
5776             "to the width of the operands");
5777     }
5778     break;
5779   }
5780   case Intrinsic::lround:
5781   case Intrinsic::llround:
5782   case Intrinsic::lrint:
5783   case Intrinsic::llrint: {
5784     Type *ValTy = Call.getArgOperand(0)->getType();
5785     Type *ResultTy = Call.getType();
5786     Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5787           "Intrinsic does not support vectors", &Call);
5788     break;
5789   }
5790   case Intrinsic::bswap: {
5791     Type *Ty = Call.getType();
5792     unsigned Size = Ty->getScalarSizeInBits();
5793     Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5794     break;
5795   }
5796   case Intrinsic::invariant_start: {
5797     ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
5798     Check(InvariantSize &&
5799               (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
5800           "invariant_start parameter must be -1, 0 or a positive number",
5801           &Call);
5802     break;
5803   }
5804   case Intrinsic::matrix_multiply:
5805   case Intrinsic::matrix_transpose:
5806   case Intrinsic::matrix_column_major_load:
5807   case Intrinsic::matrix_column_major_store: {
5808     Function *IF = Call.getCalledFunction();
5809     ConstantInt *Stride = nullptr;
5810     ConstantInt *NumRows;
5811     ConstantInt *NumColumns;
5812     VectorType *ResultTy;
5813     Type *Op0ElemTy = nullptr;
5814     Type *Op1ElemTy = nullptr;
5815     switch (ID) {
5816     case Intrinsic::matrix_multiply: {
5817       NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5818       ConstantInt *N = cast<ConstantInt>(Call.getArgOperand(3));
5819       NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5820       Check(cast<FixedVectorType>(Call.getArgOperand(0)->getType())
5821                     ->getNumElements() ==
5822                 NumRows->getZExtValue() * N->getZExtValue(),
5823             "First argument of a matrix operation does not match specified "
5824             "shape!");
5825       Check(cast<FixedVectorType>(Call.getArgOperand(1)->getType())
5826                     ->getNumElements() ==
5827                 N->getZExtValue() * NumColumns->getZExtValue(),
5828             "Second argument of a matrix operation does not match specified "
5829             "shape!");
5830 
5831       ResultTy = cast<VectorType>(Call.getType());
5832       Op0ElemTy =
5833           cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5834       Op1ElemTy =
5835           cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
5836       break;
5837     }
5838     case Intrinsic::matrix_transpose:
5839       NumRows = cast<ConstantInt>(Call.getArgOperand(1));
5840       NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
5841       ResultTy = cast<VectorType>(Call.getType());
5842       Op0ElemTy =
5843           cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5844       break;
5845     case Intrinsic::matrix_column_major_load: {
5846       Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
5847       NumRows = cast<ConstantInt>(Call.getArgOperand(3));
5848       NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5849       ResultTy = cast<VectorType>(Call.getType());
5850       break;
5851     }
5852     case Intrinsic::matrix_column_major_store: {
5853       Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
5854       NumRows = cast<ConstantInt>(Call.getArgOperand(4));
5855       NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
5856       ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
5857       Op0ElemTy =
5858           cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5859       break;
5860     }
5861     default:
5862       llvm_unreachable("unexpected intrinsic");
5863     }
5864 
5865     Check(ResultTy->getElementType()->isIntegerTy() ||
5866               ResultTy->getElementType()->isFloatingPointTy(),
5867           "Result type must be an integer or floating-point type!", IF);
5868 
5869     if (Op0ElemTy)
5870       Check(ResultTy->getElementType() == Op0ElemTy,
5871             "Vector element type mismatch of the result and first operand "
5872             "vector!",
5873             IF);
5874 
5875     if (Op1ElemTy)
5876       Check(ResultTy->getElementType() == Op1ElemTy,
5877             "Vector element type mismatch of the result and second operand "
5878             "vector!",
5879             IF);
5880 
5881     Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
5882               NumRows->getZExtValue() * NumColumns->getZExtValue(),
5883           "Result of a matrix operation does not fit in the returned vector!");
5884 
5885     if (Stride)
5886       Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
5887             "Stride must be greater or equal than the number of rows!", IF);
5888 
5889     break;
5890   }
5891   case Intrinsic::experimental_vector_splice: {
5892     VectorType *VecTy = cast<VectorType>(Call.getType());
5893     int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
5894     int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
5895     if (Call.getParent() && Call.getParent()->getParent()) {
5896       AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
5897       if (Attrs.hasFnAttr(Attribute::VScaleRange))
5898         KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
5899     }
5900     Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
5901               (Idx >= 0 && Idx < KnownMinNumElements),
5902           "The splice index exceeds the range [-VL, VL-1] where VL is the "
5903           "known minimum number of elements in the vector. For scalable "
5904           "vectors the minimum number of elements is determined from "
5905           "vscale_range.",
5906           &Call);
5907     break;
5908   }
5909   case Intrinsic::experimental_stepvector: {
5910     VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
5911     Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
5912               VecTy->getScalarSizeInBits() >= 8,
5913           "experimental_stepvector only supported for vectors of integers "
5914           "with a bitwidth of at least 8.",
5915           &Call);
5916     break;
5917   }
5918   case Intrinsic::vector_insert: {
5919     Value *Vec = Call.getArgOperand(0);
5920     Value *SubVec = Call.getArgOperand(1);
5921     Value *Idx = Call.getArgOperand(2);
5922     unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5923 
5924     VectorType *VecTy = cast<VectorType>(Vec->getType());
5925     VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
5926 
5927     ElementCount VecEC = VecTy->getElementCount();
5928     ElementCount SubVecEC = SubVecTy->getElementCount();
5929     Check(VecTy->getElementType() == SubVecTy->getElementType(),
5930           "vector_insert parameters must have the same element "
5931           "type.",
5932           &Call);
5933     Check(IdxN % SubVecEC.getKnownMinValue() == 0,
5934           "vector_insert index must be a constant multiple of "
5935           "the subvector's known minimum vector length.");
5936 
5937     // If this insertion is not the 'mixed' case where a fixed vector is
5938     // inserted into a scalable vector, ensure that the insertion of the
5939     // subvector does not overrun the parent vector.
5940     if (VecEC.isScalable() == SubVecEC.isScalable()) {
5941       Check(IdxN < VecEC.getKnownMinValue() &&
5942                 IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
5943             "subvector operand of vector_insert would overrun the "
5944             "vector being inserted into.");
5945     }
5946     break;
5947   }
5948   case Intrinsic::vector_extract: {
5949     Value *Vec = Call.getArgOperand(0);
5950     Value *Idx = Call.getArgOperand(1);
5951     unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5952 
5953     VectorType *ResultTy = cast<VectorType>(Call.getType());
5954     VectorType *VecTy = cast<VectorType>(Vec->getType());
5955 
5956     ElementCount VecEC = VecTy->getElementCount();
5957     ElementCount ResultEC = ResultTy->getElementCount();
5958 
5959     Check(ResultTy->getElementType() == VecTy->getElementType(),
5960           "vector_extract result must have the same element "
5961           "type as the input vector.",
5962           &Call);
5963     Check(IdxN % ResultEC.getKnownMinValue() == 0,
5964           "vector_extract index must be a constant multiple of "
5965           "the result type's known minimum vector length.");
5966 
5967     // If this extraction is not the 'mixed' case where a fixed vector is is
5968     // extracted from a scalable vector, ensure that the extraction does not
5969     // overrun the parent vector.
5970     if (VecEC.isScalable() == ResultEC.isScalable()) {
5971       Check(IdxN < VecEC.getKnownMinValue() &&
5972                 IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
5973             "vector_extract would overrun.");
5974     }
5975     break;
5976   }
5977   case Intrinsic::experimental_noalias_scope_decl: {
5978     NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
5979     break;
5980   }
5981   case Intrinsic::preserve_array_access_index:
5982   case Intrinsic::preserve_struct_access_index:
5983   case Intrinsic::aarch64_ldaxr:
5984   case Intrinsic::aarch64_ldxr:
5985   case Intrinsic::arm_ldaex:
5986   case Intrinsic::arm_ldrex: {
5987     Type *ElemTy = Call.getParamElementType(0);
5988     Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
5989           &Call);
5990     break;
5991   }
5992   case Intrinsic::aarch64_stlxr:
5993   case Intrinsic::aarch64_stxr:
5994   case Intrinsic::arm_stlex:
5995   case Intrinsic::arm_strex: {
5996     Type *ElemTy = Call.getAttributes().getParamElementType(1);
5997     Check(ElemTy,
5998           "Intrinsic requires elementtype attribute on second argument.",
5999           &Call);
6000     break;
6001   }
6002   case Intrinsic::aarch64_prefetch: {
6003     Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2,
6004           "write argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6005     Check(cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
6006           "target argument to llvm.aarch64.prefetch must be 0-3", Call);
6007     Check(cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue() < 2,
6008           "stream argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6009     Check(cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue() < 2,
6010           "isdata argument to llvm.aarch64.prefetch must be 0 or 1", Call);
6011     break;
6012   }
6013   case Intrinsic::callbr_landingpad: {
6014     const auto *CBR = dyn_cast<CallBrInst>(Call.getOperand(0));
6015     Check(CBR, "intrinstic requires callbr operand", &Call);
6016     if (!CBR)
6017       break;
6018 
6019     const BasicBlock *LandingPadBB = Call.getParent();
6020     const BasicBlock *PredBB = LandingPadBB->getUniquePredecessor();
6021     if (!PredBB) {
6022       CheckFailed("Intrinsic in block must have 1 unique predecessor", &Call);
6023       break;
6024     }
6025     if (!isa<CallBrInst>(PredBB->getTerminator())) {
6026       CheckFailed("Intrinsic must have corresponding callbr in predecessor",
6027                   &Call);
6028       break;
6029     }
6030     Check(llvm::any_of(CBR->getIndirectDests(),
6031                        [LandingPadBB](const BasicBlock *IndDest) {
6032                          return IndDest == LandingPadBB;
6033                        }),
6034           "Intrinsic's corresponding callbr must have intrinsic's parent basic "
6035           "block in indirect destination list",
6036           &Call);
6037     const Instruction &First = *LandingPadBB->begin();
6038     Check(&First == &Call, "No other instructions may proceed intrinsic",
6039           &Call);
6040     break;
6041   }
6042   case Intrinsic::amdgcn_cs_chain: {
6043     auto CallerCC = Call.getCaller()->getCallingConv();
6044     switch (CallerCC) {
6045     case CallingConv::AMDGPU_CS:
6046     case CallingConv::AMDGPU_CS_Chain:
6047     case CallingConv::AMDGPU_CS_ChainPreserve:
6048       break;
6049     default:
6050       CheckFailed("Intrinsic can only be used from functions with the "
6051                   "amdgpu_cs, amdgpu_cs_chain or amdgpu_cs_chain_preserve "
6052                   "calling conventions",
6053                   &Call);
6054       break;
6055     }
6056     break;
6057   }
6058   case Intrinsic::experimental_convergence_entry:
6059     Check(Call.getFunction()->isConvergent(),
6060           "Entry intrinsic can occur only in a convergent function.", &Call);
6061     Check(Call.getParent()->isEntryBlock(),
6062           "Entry intrinsic must occur in the entry block.", &Call);
6063     Check(Call.getParent()->getFirstNonPHI() == &Call,
6064           "Entry intrinsic must occur at the start of the basic block.", &Call);
6065     LLVM_FALLTHROUGH;
6066   case Intrinsic::experimental_convergence_anchor:
6067     Check(!Call.getOperandBundle(LLVMContext::OB_convergencectrl),
6068           "Entry or anchor intrinsic must not have a convergencectrl bundle.",
6069           &Call);
6070     break;
6071   case Intrinsic::experimental_convergence_loop:
6072     Check(Call.getOperandBundle(LLVMContext::OB_convergencectrl),
6073           "Loop intrinsic must have a convergencectrl bundle.", &Call);
6074     Check(Call.getParent()->getFirstNonPHI() == &Call,
6075           "Loop intrinsic must occur at the start of the basic block.", &Call);
6076     break;
6077   };
6078 
6079   // Verify that there aren't any unmediated control transfers between funclets.
6080   if (IntrinsicInst::mayLowerToFunctionCall(ID)) {
6081     Function *F = Call.getParent()->getParent();
6082     if (F->hasPersonalityFn() &&
6083         isScopedEHPersonality(classifyEHPersonality(F->getPersonalityFn()))) {
6084       // Run EH funclet coloring on-demand and cache results for other intrinsic
6085       // calls in this function
6086       if (BlockEHFuncletColors.empty())
6087         BlockEHFuncletColors = colorEHFunclets(*F);
6088 
6089       // Check for catch-/cleanup-pad in first funclet block
6090       bool InEHFunclet = false;
6091       BasicBlock *CallBB = Call.getParent();
6092       const ColorVector &CV = BlockEHFuncletColors.find(CallBB)->second;
6093       assert(CV.size() > 0 && "Uncolored block");
6094       for (BasicBlock *ColorFirstBB : CV)
6095         if (dyn_cast_or_null<FuncletPadInst>(ColorFirstBB->getFirstNonPHI()))
6096           InEHFunclet = true;
6097 
6098       // Check for funclet operand bundle
6099       bool HasToken = false;
6100       for (unsigned I = 0, E = Call.getNumOperandBundles(); I != E; ++I)
6101         if (Call.getOperandBundleAt(I).getTagID() == LLVMContext::OB_funclet)
6102           HasToken = true;
6103 
6104       // This would cause silent code truncation in WinEHPrepare
6105       if (InEHFunclet)
6106         Check(HasToken, "Missing funclet token on intrinsic call", &Call);
6107     }
6108   }
6109 }
6110 
6111 /// Carefully grab the subprogram from a local scope.
6112 ///
6113 /// This carefully grabs the subprogram from a local scope, avoiding the
6114 /// built-in assertions that would typically fire.
6115 static DISubprogram *getSubprogram(Metadata *LocalScope) {
6116   if (!LocalScope)
6117     return nullptr;
6118 
6119   if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
6120     return SP;
6121 
6122   if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
6123     return getSubprogram(LB->getRawScope());
6124 
6125   // Just return null; broken scope chains are checked elsewhere.
6126   assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
6127   return nullptr;
6128 }
6129 
6130 void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
6131   if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
6132     auto *RetTy = cast<VectorType>(VPCast->getType());
6133     auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
6134     Check(RetTy->getElementCount() == ValTy->getElementCount(),
6135           "VP cast intrinsic first argument and result vector lengths must be "
6136           "equal",
6137           *VPCast);
6138 
6139     switch (VPCast->getIntrinsicID()) {
6140     default:
6141       llvm_unreachable("Unknown VP cast intrinsic");
6142     case Intrinsic::vp_trunc:
6143       Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6144             "llvm.vp.trunc intrinsic first argument and result element type "
6145             "must be integer",
6146             *VPCast);
6147       Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6148             "llvm.vp.trunc intrinsic the bit size of first argument must be "
6149             "larger than the bit size of the return type",
6150             *VPCast);
6151       break;
6152     case Intrinsic::vp_zext:
6153     case Intrinsic::vp_sext:
6154       Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
6155             "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
6156             "element type must be integer",
6157             *VPCast);
6158       Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6159             "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
6160             "argument must be smaller than the bit size of the return type",
6161             *VPCast);
6162       break;
6163     case Intrinsic::vp_fptoui:
6164     case Intrinsic::vp_fptosi:
6165       Check(
6166           RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
6167           "llvm.vp.fptoui or llvm.vp.fptosi intrinsic first argument element "
6168           "type must be floating-point and result element type must be integer",
6169           *VPCast);
6170       break;
6171     case Intrinsic::vp_uitofp:
6172     case Intrinsic::vp_sitofp:
6173       Check(
6174           RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
6175           "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
6176           "type must be integer and result element type must be floating-point",
6177           *VPCast);
6178       break;
6179     case Intrinsic::vp_fptrunc:
6180       Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6181             "llvm.vp.fptrunc intrinsic first argument and result element type "
6182             "must be floating-point",
6183             *VPCast);
6184       Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
6185             "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
6186             "larger than the bit size of the return type",
6187             *VPCast);
6188       break;
6189     case Intrinsic::vp_fpext:
6190       Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
6191             "llvm.vp.fpext intrinsic first argument and result element type "
6192             "must be floating-point",
6193             *VPCast);
6194       Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
6195             "llvm.vp.fpext intrinsic the bit size of first argument must be "
6196             "smaller than the bit size of the return type",
6197             *VPCast);
6198       break;
6199     case Intrinsic::vp_ptrtoint:
6200       Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
6201             "llvm.vp.ptrtoint intrinsic first argument element type must be "
6202             "pointer and result element type must be integer",
6203             *VPCast);
6204       break;
6205     case Intrinsic::vp_inttoptr:
6206       Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
6207             "llvm.vp.inttoptr intrinsic first argument element type must be "
6208             "integer and result element type must be pointer",
6209             *VPCast);
6210       break;
6211     }
6212   }
6213   if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
6214     auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6215     Check(CmpInst::isFPPredicate(Pred),
6216           "invalid predicate for VP FP comparison intrinsic", &VPI);
6217   }
6218   if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
6219     auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
6220     Check(CmpInst::isIntPredicate(Pred),
6221           "invalid predicate for VP integer comparison intrinsic", &VPI);
6222   }
6223 }
6224 
6225 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
6226   unsigned NumOperands;
6227   bool HasRoundingMD;
6228   switch (FPI.getIntrinsicID()) {
6229 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6230   case Intrinsic::INTRINSIC:                                                   \
6231     NumOperands = NARG;                                                        \
6232     HasRoundingMD = ROUND_MODE;                                                \
6233     break;
6234 #include "llvm/IR/ConstrainedOps.def"
6235   default:
6236     llvm_unreachable("Invalid constrained FP intrinsic!");
6237   }
6238   NumOperands += (1 + HasRoundingMD);
6239   // Compare intrinsics carry an extra predicate metadata operand.
6240   if (isa<ConstrainedFPCmpIntrinsic>(FPI))
6241     NumOperands += 1;
6242   Check((FPI.arg_size() == NumOperands),
6243         "invalid arguments for constrained FP intrinsic", &FPI);
6244 
6245   switch (FPI.getIntrinsicID()) {
6246   case Intrinsic::experimental_constrained_lrint:
6247   case Intrinsic::experimental_constrained_llrint: {
6248     Type *ValTy = FPI.getArgOperand(0)->getType();
6249     Type *ResultTy = FPI.getType();
6250     Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6251           "Intrinsic does not support vectors", &FPI);
6252   }
6253     break;
6254 
6255   case Intrinsic::experimental_constrained_lround:
6256   case Intrinsic::experimental_constrained_llround: {
6257     Type *ValTy = FPI.getArgOperand(0)->getType();
6258     Type *ResultTy = FPI.getType();
6259     Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
6260           "Intrinsic does not support vectors", &FPI);
6261     break;
6262   }
6263 
6264   case Intrinsic::experimental_constrained_fcmp:
6265   case Intrinsic::experimental_constrained_fcmps: {
6266     auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
6267     Check(CmpInst::isFPPredicate(Pred),
6268           "invalid predicate for constrained FP comparison intrinsic", &FPI);
6269     break;
6270   }
6271 
6272   case Intrinsic::experimental_constrained_fptosi:
6273   case Intrinsic::experimental_constrained_fptoui: {
6274     Value *Operand = FPI.getArgOperand(0);
6275     ElementCount SrcEC;
6276     Check(Operand->getType()->isFPOrFPVectorTy(),
6277           "Intrinsic first argument must be floating point", &FPI);
6278     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6279       SrcEC = cast<VectorType>(OperandT)->getElementCount();
6280     }
6281 
6282     Operand = &FPI;
6283     Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6284           "Intrinsic first argument and result disagree on vector use", &FPI);
6285     Check(Operand->getType()->isIntOrIntVectorTy(),
6286           "Intrinsic result must be an integer", &FPI);
6287     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6288       Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6289             "Intrinsic first argument and result vector lengths must be equal",
6290             &FPI);
6291     }
6292   }
6293     break;
6294 
6295   case Intrinsic::experimental_constrained_sitofp:
6296   case Intrinsic::experimental_constrained_uitofp: {
6297     Value *Operand = FPI.getArgOperand(0);
6298     ElementCount SrcEC;
6299     Check(Operand->getType()->isIntOrIntVectorTy(),
6300           "Intrinsic first argument must be integer", &FPI);
6301     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6302       SrcEC = cast<VectorType>(OperandT)->getElementCount();
6303     }
6304 
6305     Operand = &FPI;
6306     Check(SrcEC.isNonZero() == Operand->getType()->isVectorTy(),
6307           "Intrinsic first argument and result disagree on vector use", &FPI);
6308     Check(Operand->getType()->isFPOrFPVectorTy(),
6309           "Intrinsic result must be a floating point", &FPI);
6310     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
6311       Check(SrcEC == cast<VectorType>(OperandT)->getElementCount(),
6312             "Intrinsic first argument and result vector lengths must be equal",
6313             &FPI);
6314     }
6315   } break;
6316 
6317   case Intrinsic::experimental_constrained_fptrunc:
6318   case Intrinsic::experimental_constrained_fpext: {
6319     Value *Operand = FPI.getArgOperand(0);
6320     Type *OperandTy = Operand->getType();
6321     Value *Result = &FPI;
6322     Type *ResultTy = Result->getType();
6323     Check(OperandTy->isFPOrFPVectorTy(),
6324           "Intrinsic first argument must be FP or FP vector", &FPI);
6325     Check(ResultTy->isFPOrFPVectorTy(),
6326           "Intrinsic result must be FP or FP vector", &FPI);
6327     Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
6328           "Intrinsic first argument and result disagree on vector use", &FPI);
6329     if (OperandTy->isVectorTy()) {
6330       Check(cast<VectorType>(OperandTy)->getElementCount() ==
6331                 cast<VectorType>(ResultTy)->getElementCount(),
6332             "Intrinsic first argument and result vector lengths must be equal",
6333             &FPI);
6334     }
6335     if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
6336       Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
6337             "Intrinsic first argument's type must be larger than result type",
6338             &FPI);
6339     } else {
6340       Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
6341             "Intrinsic first argument's type must be smaller than result type",
6342             &FPI);
6343     }
6344   }
6345     break;
6346 
6347   default:
6348     break;
6349   }
6350 
6351   // If a non-metadata argument is passed in a metadata slot then the
6352   // error will be caught earlier when the incorrect argument doesn't
6353   // match the specification in the intrinsic call table. Thus, no
6354   // argument type check is needed here.
6355 
6356   Check(FPI.getExceptionBehavior().has_value(),
6357         "invalid exception behavior argument", &FPI);
6358   if (HasRoundingMD) {
6359     Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
6360           &FPI);
6361   }
6362 }
6363 
6364 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
6365   auto *MD = DII.getRawLocation();
6366   CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
6367               (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
6368           "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
6369   CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
6370           "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
6371           DII.getRawVariable());
6372   CheckDI(isa<DIExpression>(DII.getRawExpression()),
6373           "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
6374           DII.getRawExpression());
6375 
6376   if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&DII)) {
6377     CheckDI(isa<DIAssignID>(DAI->getRawAssignID()),
6378             "invalid llvm.dbg.assign intrinsic DIAssignID", &DII,
6379             DAI->getRawAssignID());
6380     const auto *RawAddr = DAI->getRawAddress();
6381     CheckDI(
6382         isa<ValueAsMetadata>(RawAddr) ||
6383             (isa<MDNode>(RawAddr) && !cast<MDNode>(RawAddr)->getNumOperands()),
6384         "invalid llvm.dbg.assign intrinsic address", &DII,
6385         DAI->getRawAddress());
6386     CheckDI(isa<DIExpression>(DAI->getRawAddressExpression()),
6387             "invalid llvm.dbg.assign intrinsic address expression", &DII,
6388             DAI->getRawAddressExpression());
6389     // All of the linked instructions should be in the same function as DII.
6390     for (Instruction *I : at::getAssignmentInsts(DAI))
6391       CheckDI(DAI->getFunction() == I->getFunction(),
6392               "inst not in same function as dbg.assign", I, DAI);
6393   }
6394 
6395   // Ignore broken !dbg attachments; they're checked elsewhere.
6396   if (MDNode *N = DII.getDebugLoc().getAsMDNode())
6397     if (!isa<DILocation>(N))
6398       return;
6399 
6400   BasicBlock *BB = DII.getParent();
6401   Function *F = BB ? BB->getParent() : nullptr;
6402 
6403   // The scopes for variables and !dbg attachments must agree.
6404   DILocalVariable *Var = DII.getVariable();
6405   DILocation *Loc = DII.getDebugLoc();
6406   CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
6407           &DII, BB, F);
6408 
6409   DISubprogram *VarSP = getSubprogram(Var->getRawScope());
6410   DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6411   if (!VarSP || !LocSP)
6412     return; // Broken scope chains are checked elsewhere.
6413 
6414   CheckDI(VarSP == LocSP,
6415           "mismatched subprogram between llvm.dbg." + Kind +
6416               " variable and !dbg attachment",
6417           &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
6418           Loc->getScope()->getSubprogram());
6419 
6420   // This check is redundant with one in visitLocalVariable().
6421   CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
6422           Var->getRawType());
6423   verifyFnArgs(DII);
6424 }
6425 
6426 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
6427   CheckDI(isa<DILabel>(DLI.getRawLabel()),
6428           "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
6429           DLI.getRawLabel());
6430 
6431   // Ignore broken !dbg attachments; they're checked elsewhere.
6432   if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
6433     if (!isa<DILocation>(N))
6434       return;
6435 
6436   BasicBlock *BB = DLI.getParent();
6437   Function *F = BB ? BB->getParent() : nullptr;
6438 
6439   // The scopes for variables and !dbg attachments must agree.
6440   DILabel *Label = DLI.getLabel();
6441   DILocation *Loc = DLI.getDebugLoc();
6442   Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
6443         BB, F);
6444 
6445   DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
6446   DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
6447   if (!LabelSP || !LocSP)
6448     return;
6449 
6450   CheckDI(LabelSP == LocSP,
6451           "mismatched subprogram between llvm.dbg." + Kind +
6452               " label and !dbg attachment",
6453           &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
6454           Loc->getScope()->getSubprogram());
6455 }
6456 
6457 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
6458   DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
6459   DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6460 
6461   // We don't know whether this intrinsic verified correctly.
6462   if (!V || !E || !E->isValid())
6463     return;
6464 
6465   // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
6466   auto Fragment = E->getFragmentInfo();
6467   if (!Fragment)
6468     return;
6469 
6470   // The frontend helps out GDB by emitting the members of local anonymous
6471   // unions as artificial local variables with shared storage. When SROA splits
6472   // the storage for artificial local variables that are smaller than the entire
6473   // union, the overhang piece will be outside of the allotted space for the
6474   // variable and this check fails.
6475   // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
6476   if (V->isArtificial())
6477     return;
6478 
6479   verifyFragmentExpression(*V, *Fragment, &I);
6480 }
6481 
6482 template <typename ValueOrMetadata>
6483 void Verifier::verifyFragmentExpression(const DIVariable &V,
6484                                         DIExpression::FragmentInfo Fragment,
6485                                         ValueOrMetadata *Desc) {
6486   // If there's no size, the type is broken, but that should be checked
6487   // elsewhere.
6488   auto VarSize = V.getSizeInBits();
6489   if (!VarSize)
6490     return;
6491 
6492   unsigned FragSize = Fragment.SizeInBits;
6493   unsigned FragOffset = Fragment.OffsetInBits;
6494   CheckDI(FragSize + FragOffset <= *VarSize,
6495           "fragment is larger than or outside of variable", Desc, &V);
6496   CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
6497 }
6498 
6499 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
6500   // This function does not take the scope of noninlined function arguments into
6501   // account. Don't run it if current function is nodebug, because it may
6502   // contain inlined debug intrinsics.
6503   if (!HasDebugInfo)
6504     return;
6505 
6506   // For performance reasons only check non-inlined ones.
6507   if (I.getDebugLoc()->getInlinedAt())
6508     return;
6509 
6510   DILocalVariable *Var = I.getVariable();
6511   CheckDI(Var, "dbg intrinsic without variable");
6512 
6513   unsigned ArgNo = Var->getArg();
6514   if (!ArgNo)
6515     return;
6516 
6517   // Verify there are no duplicate function argument debug info entries.
6518   // These will cause hard-to-debug assertions in the DWARF backend.
6519   if (DebugFnArgs.size() < ArgNo)
6520     DebugFnArgs.resize(ArgNo, nullptr);
6521 
6522   auto *Prev = DebugFnArgs[ArgNo - 1];
6523   DebugFnArgs[ArgNo - 1] = Var;
6524   CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
6525           Prev, Var);
6526 }
6527 
6528 void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
6529   DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
6530 
6531   // We don't know whether this intrinsic verified correctly.
6532   if (!E || !E->isValid())
6533     return;
6534 
6535   // We allow EntryValues for swift async arguments, as they have an
6536   // ABI-guarantee to be turned into a specific register.
6537   if (isa<ValueAsMetadata>(I.getRawLocation()))
6538     if (auto *ArgLoc = dyn_cast_or_null<Argument>(I.getVariableLocationOp(0));
6539         ArgLoc && ArgLoc->hasAttribute(Attribute::SwiftAsync))
6540       return;
6541 
6542   CheckDI(!E->isEntryValue(),
6543           "Entry values are only allowed in MIR unless they target a "
6544           "swiftasync Argument",
6545           &I);
6546 }
6547 
6548 void Verifier::verifyCompileUnits() {
6549   // When more than one Module is imported into the same context, such as during
6550   // an LTO build before linking the modules, ODR type uniquing may cause types
6551   // to point to a different CU. This check does not make sense in this case.
6552   if (M.getContext().isODRUniquingDebugTypes())
6553     return;
6554   auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
6555   SmallPtrSet<const Metadata *, 2> Listed;
6556   if (CUs)
6557     Listed.insert(CUs->op_begin(), CUs->op_end());
6558   for (const auto *CU : CUVisited)
6559     CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
6560   CUVisited.clear();
6561 }
6562 
6563 void Verifier::verifyDeoptimizeCallingConvs() {
6564   if (DeoptimizeDeclarations.empty())
6565     return;
6566 
6567   const Function *First = DeoptimizeDeclarations[0];
6568   for (const auto *F : ArrayRef(DeoptimizeDeclarations).slice(1)) {
6569     Check(First->getCallingConv() == F->getCallingConv(),
6570           "All llvm.experimental.deoptimize declarations must have the same "
6571           "calling convention",
6572           First, F);
6573   }
6574 }
6575 
6576 void Verifier::verifyAttachedCallBundle(const CallBase &Call,
6577                                         const OperandBundleUse &BU) {
6578   FunctionType *FTy = Call.getFunctionType();
6579 
6580   Check((FTy->getReturnType()->isPointerTy() ||
6581          (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
6582         "a call with operand bundle \"clang.arc.attachedcall\" must call a "
6583         "function returning a pointer or a non-returning function that has a "
6584         "void return type",
6585         Call);
6586 
6587   Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
6588         "operand bundle \"clang.arc.attachedcall\" requires one function as "
6589         "an argument",
6590         Call);
6591 
6592   auto *Fn = cast<Function>(BU.Inputs.front());
6593   Intrinsic::ID IID = Fn->getIntrinsicID();
6594 
6595   if (IID) {
6596     Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
6597            IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
6598           "invalid function argument", Call);
6599   } else {
6600     StringRef FnName = Fn->getName();
6601     Check((FnName == "objc_retainAutoreleasedReturnValue" ||
6602            FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
6603           "invalid function argument", Call);
6604   }
6605 }
6606 
6607 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
6608   bool HasSource = F.getSource().has_value();
6609   if (!HasSourceDebugInfo.count(&U))
6610     HasSourceDebugInfo[&U] = HasSource;
6611   CheckDI(HasSource == HasSourceDebugInfo[&U],
6612           "inconsistent use of embedded source");
6613 }
6614 
6615 void Verifier::verifyNoAliasScopeDecl() {
6616   if (NoAliasScopeDecls.empty())
6617     return;
6618 
6619   // only a single scope must be declared at a time.
6620   for (auto *II : NoAliasScopeDecls) {
6621     assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
6622            "Not a llvm.experimental.noalias.scope.decl ?");
6623     const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
6624         II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
6625     Check(ScopeListMV != nullptr,
6626           "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
6627           "argument",
6628           II);
6629 
6630     const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
6631     Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
6632     Check(ScopeListMD->getNumOperands() == 1,
6633           "!id.scope.list must point to a list with a single scope", II);
6634     visitAliasScopeListMetadata(ScopeListMD);
6635   }
6636 
6637   // Only check the domination rule when requested. Once all passes have been
6638   // adapted this option can go away.
6639   if (!VerifyNoAliasScopeDomination)
6640     return;
6641 
6642   // Now sort the intrinsics based on the scope MDNode so that declarations of
6643   // the same scopes are next to each other.
6644   auto GetScope = [](IntrinsicInst *II) {
6645     const auto *ScopeListMV = cast<MetadataAsValue>(
6646         II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
6647     return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
6648   };
6649 
6650   // We are sorting on MDNode pointers here. For valid input IR this is ok.
6651   // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
6652   auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
6653     return GetScope(Lhs) < GetScope(Rhs);
6654   };
6655 
6656   llvm::sort(NoAliasScopeDecls, Compare);
6657 
6658   // Go over the intrinsics and check that for the same scope, they are not
6659   // dominating each other.
6660   auto ItCurrent = NoAliasScopeDecls.begin();
6661   while (ItCurrent != NoAliasScopeDecls.end()) {
6662     auto CurScope = GetScope(*ItCurrent);
6663     auto ItNext = ItCurrent;
6664     do {
6665       ++ItNext;
6666     } while (ItNext != NoAliasScopeDecls.end() &&
6667              GetScope(*ItNext) == CurScope);
6668 
6669     // [ItCurrent, ItNext) represents the declarations for the same scope.
6670     // Ensure they are not dominating each other.. but only if it is not too
6671     // expensive.
6672     if (ItNext - ItCurrent < 32)
6673       for (auto *I : llvm::make_range(ItCurrent, ItNext))
6674         for (auto *J : llvm::make_range(ItCurrent, ItNext))
6675           if (I != J)
6676             Check(!DT.dominates(I, J),
6677                   "llvm.experimental.noalias.scope.decl dominates another one "
6678                   "with the same scope",
6679                   I);
6680     ItCurrent = ItNext;
6681   }
6682 }
6683 
6684 //===----------------------------------------------------------------------===//
6685 //  Implement the public interfaces to this file...
6686 //===----------------------------------------------------------------------===//
6687 
6688 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
6689   Function &F = const_cast<Function &>(f);
6690 
6691   // Don't use a raw_null_ostream.  Printing IR is expensive.
6692   Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
6693 
6694   // Note that this function's return value is inverted from what you would
6695   // expect of a function called "verify".
6696   return !V.verify(F);
6697 }
6698 
6699 bool llvm::verifyModule(const Module &M, raw_ostream *OS,
6700                         bool *BrokenDebugInfo) {
6701   // Don't use a raw_null_ostream.  Printing IR is expensive.
6702   Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
6703 
6704   bool Broken = false;
6705   for (const Function &F : M)
6706     Broken |= !V.verify(F);
6707 
6708   Broken |= !V.verify();
6709   if (BrokenDebugInfo)
6710     *BrokenDebugInfo = V.hasBrokenDebugInfo();
6711   // Note that this function's return value is inverted from what you would
6712   // expect of a function called "verify".
6713   return Broken;
6714 }
6715 
6716 namespace {
6717 
6718 struct VerifierLegacyPass : public FunctionPass {
6719   static char ID;
6720 
6721   std::unique_ptr<Verifier> V;
6722   bool FatalErrors = true;
6723 
6724   VerifierLegacyPass() : FunctionPass(ID) {
6725     initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
6726   }
6727   explicit VerifierLegacyPass(bool FatalErrors)
6728       : FunctionPass(ID),
6729         FatalErrors(FatalErrors) {
6730     initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
6731   }
6732 
6733   bool doInitialization(Module &M) override {
6734     V = std::make_unique<Verifier>(
6735         &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
6736     return false;
6737   }
6738 
6739   bool runOnFunction(Function &F) override {
6740     if (!V->verify(F) && FatalErrors) {
6741       errs() << "in function " << F.getName() << '\n';
6742       report_fatal_error("Broken function found, compilation aborted!");
6743     }
6744     return false;
6745   }
6746 
6747   bool doFinalization(Module &M) override {
6748     bool HasErrors = false;
6749     for (Function &F : M)
6750       if (F.isDeclaration())
6751         HasErrors |= !V->verify(F);
6752 
6753     HasErrors |= !V->verify();
6754     if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
6755       report_fatal_error("Broken module found, compilation aborted!");
6756     return false;
6757   }
6758 
6759   void getAnalysisUsage(AnalysisUsage &AU) const override {
6760     AU.setPreservesAll();
6761   }
6762 };
6763 
6764 } // end anonymous namespace
6765 
6766 /// Helper to issue failure from the TBAA verification
6767 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
6768   if (Diagnostic)
6769     return Diagnostic->CheckFailed(Args...);
6770 }
6771 
6772 #define CheckTBAA(C, ...)                                                      \
6773   do {                                                                         \
6774     if (!(C)) {                                                                \
6775       CheckFailed(__VA_ARGS__);                                                \
6776       return false;                                                            \
6777     }                                                                          \
6778   } while (false)
6779 
6780 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
6781 /// TBAA scheme.  This means \p BaseNode is either a scalar node, or a
6782 /// struct-type node describing an aggregate data structure (like a struct).
6783 TBAAVerifier::TBAABaseNodeSummary
6784 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
6785                                  bool IsNewFormat) {
6786   if (BaseNode->getNumOperands() < 2) {
6787     CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
6788     return {true, ~0u};
6789   }
6790 
6791   auto Itr = TBAABaseNodes.find(BaseNode);
6792   if (Itr != TBAABaseNodes.end())
6793     return Itr->second;
6794 
6795   auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
6796   auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
6797   (void)InsertResult;
6798   assert(InsertResult.second && "We just checked!");
6799   return Result;
6800 }
6801 
6802 TBAAVerifier::TBAABaseNodeSummary
6803 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
6804                                      bool IsNewFormat) {
6805   const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
6806 
6807   if (BaseNode->getNumOperands() == 2) {
6808     // Scalar nodes can only be accessed at offset 0.
6809     return isValidScalarTBAANode(BaseNode)
6810                ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
6811                : InvalidNode;
6812   }
6813 
6814   if (IsNewFormat) {
6815     if (BaseNode->getNumOperands() % 3 != 0) {
6816       CheckFailed("Access tag nodes must have the number of operands that is a "
6817                   "multiple of 3!", BaseNode);
6818       return InvalidNode;
6819     }
6820   } else {
6821     if (BaseNode->getNumOperands() % 2 != 1) {
6822       CheckFailed("Struct tag nodes must have an odd number of operands!",
6823                   BaseNode);
6824       return InvalidNode;
6825     }
6826   }
6827 
6828   // Check the type size field.
6829   if (IsNewFormat) {
6830     auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
6831         BaseNode->getOperand(1));
6832     if (!TypeSizeNode) {
6833       CheckFailed("Type size nodes must be constants!", &I, BaseNode);
6834       return InvalidNode;
6835     }
6836   }
6837 
6838   // Check the type name field. In the new format it can be anything.
6839   if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
6840     CheckFailed("Struct tag nodes have a string as their first operand",
6841                 BaseNode);
6842     return InvalidNode;
6843   }
6844 
6845   bool Failed = false;
6846 
6847   std::optional<APInt> PrevOffset;
6848   unsigned BitWidth = ~0u;
6849 
6850   // We've already checked that BaseNode is not a degenerate root node with one
6851   // operand in \c verifyTBAABaseNode, so this loop should run at least once.
6852   unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
6853   unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
6854   for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
6855            Idx += NumOpsPerField) {
6856     const MDOperand &FieldTy = BaseNode->getOperand(Idx);
6857     const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
6858     if (!isa<MDNode>(FieldTy)) {
6859       CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
6860       Failed = true;
6861       continue;
6862     }
6863 
6864     auto *OffsetEntryCI =
6865         mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
6866     if (!OffsetEntryCI) {
6867       CheckFailed("Offset entries must be constants!", &I, BaseNode);
6868       Failed = true;
6869       continue;
6870     }
6871 
6872     if (BitWidth == ~0u)
6873       BitWidth = OffsetEntryCI->getBitWidth();
6874 
6875     if (OffsetEntryCI->getBitWidth() != BitWidth) {
6876       CheckFailed(
6877           "Bitwidth between the offsets and struct type entries must match", &I,
6878           BaseNode);
6879       Failed = true;
6880       continue;
6881     }
6882 
6883     // NB! As far as I can tell, we generate a non-strictly increasing offset
6884     // sequence only from structs that have zero size bit fields.  When
6885     // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
6886     // pick the field lexically the latest in struct type metadata node.  This
6887     // mirrors the actual behavior of the alias analysis implementation.
6888     bool IsAscending =
6889         !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
6890 
6891     if (!IsAscending) {
6892       CheckFailed("Offsets must be increasing!", &I, BaseNode);
6893       Failed = true;
6894     }
6895 
6896     PrevOffset = OffsetEntryCI->getValue();
6897 
6898     if (IsNewFormat) {
6899       auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
6900           BaseNode->getOperand(Idx + 2));
6901       if (!MemberSizeNode) {
6902         CheckFailed("Member size entries must be constants!", &I, BaseNode);
6903         Failed = true;
6904         continue;
6905       }
6906     }
6907   }
6908 
6909   return Failed ? InvalidNode
6910                 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
6911 }
6912 
6913 static bool IsRootTBAANode(const MDNode *MD) {
6914   return MD->getNumOperands() < 2;
6915 }
6916 
6917 static bool IsScalarTBAANodeImpl(const MDNode *MD,
6918                                  SmallPtrSetImpl<const MDNode *> &Visited) {
6919   if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
6920     return false;
6921 
6922   if (!isa<MDString>(MD->getOperand(0)))
6923     return false;
6924 
6925   if (MD->getNumOperands() == 3) {
6926     auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
6927     if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
6928       return false;
6929   }
6930 
6931   auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
6932   return Parent && Visited.insert(Parent).second &&
6933          (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
6934 }
6935 
6936 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
6937   auto ResultIt = TBAAScalarNodes.find(MD);
6938   if (ResultIt != TBAAScalarNodes.end())
6939     return ResultIt->second;
6940 
6941   SmallPtrSet<const MDNode *, 4> Visited;
6942   bool Result = IsScalarTBAANodeImpl(MD, Visited);
6943   auto InsertResult = TBAAScalarNodes.insert({MD, Result});
6944   (void)InsertResult;
6945   assert(InsertResult.second && "Just checked!");
6946 
6947   return Result;
6948 }
6949 
6950 /// Returns the field node at the offset \p Offset in \p BaseNode.  Update \p
6951 /// Offset in place to be the offset within the field node returned.
6952 ///
6953 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
6954 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
6955                                                    const MDNode *BaseNode,
6956                                                    APInt &Offset,
6957                                                    bool IsNewFormat) {
6958   assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
6959 
6960   // Scalar nodes have only one possible "field" -- their parent in the access
6961   // hierarchy.  Offset must be zero at this point, but our caller is supposed
6962   // to check that.
6963   if (BaseNode->getNumOperands() == 2)
6964     return cast<MDNode>(BaseNode->getOperand(1));
6965 
6966   unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
6967   unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
6968   for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
6969            Idx += NumOpsPerField) {
6970     auto *OffsetEntryCI =
6971         mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
6972     if (OffsetEntryCI->getValue().ugt(Offset)) {
6973       if (Idx == FirstFieldOpNo) {
6974         CheckFailed("Could not find TBAA parent in struct type node", &I,
6975                     BaseNode, &Offset);
6976         return nullptr;
6977       }
6978 
6979       unsigned PrevIdx = Idx - NumOpsPerField;
6980       auto *PrevOffsetEntryCI =
6981           mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
6982       Offset -= PrevOffsetEntryCI->getValue();
6983       return cast<MDNode>(BaseNode->getOperand(PrevIdx));
6984     }
6985   }
6986 
6987   unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
6988   auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
6989       BaseNode->getOperand(LastIdx + 1));
6990   Offset -= LastOffsetEntryCI->getValue();
6991   return cast<MDNode>(BaseNode->getOperand(LastIdx));
6992 }
6993 
6994 static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
6995   if (!Type || Type->getNumOperands() < 3)
6996     return false;
6997 
6998   // In the new format type nodes shall have a reference to the parent type as
6999   // its first operand.
7000   return isa_and_nonnull<MDNode>(Type->getOperand(0));
7001 }
7002 
7003 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
7004   CheckTBAA(MD->getNumOperands() > 0, "TBAA metadata cannot have 0 operands",
7005             &I, MD);
7006 
7007   CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
7008                 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
7009                 isa<AtomicCmpXchgInst>(I),
7010             "This instruction shall not have a TBAA access tag!", &I);
7011 
7012   bool IsStructPathTBAA =
7013       isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
7014 
7015   CheckTBAA(IsStructPathTBAA,
7016             "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
7017             &I);
7018 
7019   MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
7020   MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
7021 
7022   bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
7023 
7024   if (IsNewFormat) {
7025     CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
7026               "Access tag metadata must have either 4 or 5 operands", &I, MD);
7027   } else {
7028     CheckTBAA(MD->getNumOperands() < 5,
7029               "Struct tag metadata must have either 3 or 4 operands", &I, MD);
7030   }
7031 
7032   // Check the access size field.
7033   if (IsNewFormat) {
7034     auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
7035         MD->getOperand(3));
7036     CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
7037   }
7038 
7039   // Check the immutability flag.
7040   unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
7041   if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
7042     auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
7043         MD->getOperand(ImmutabilityFlagOpNo));
7044     CheckTBAA(IsImmutableCI,
7045               "Immutability tag on struct tag metadata must be a constant", &I,
7046               MD);
7047     CheckTBAA(
7048         IsImmutableCI->isZero() || IsImmutableCI->isOne(),
7049         "Immutability part of the struct tag metadata must be either 0 or 1",
7050         &I, MD);
7051   }
7052 
7053   CheckTBAA(BaseNode && AccessType,
7054             "Malformed struct tag metadata: base and access-type "
7055             "should be non-null and point to Metadata nodes",
7056             &I, MD, BaseNode, AccessType);
7057 
7058   if (!IsNewFormat) {
7059     CheckTBAA(isValidScalarTBAANode(AccessType),
7060               "Access type node must be a valid scalar type", &I, MD,
7061               AccessType);
7062   }
7063 
7064   auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
7065   CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
7066 
7067   APInt Offset = OffsetCI->getValue();
7068   bool SeenAccessTypeInPath = false;
7069 
7070   SmallPtrSet<MDNode *, 4> StructPath;
7071 
7072   for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
7073        BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
7074                                                IsNewFormat)) {
7075     if (!StructPath.insert(BaseNode).second) {
7076       CheckFailed("Cycle detected in struct path", &I, MD);
7077       return false;
7078     }
7079 
7080     bool Invalid;
7081     unsigned BaseNodeBitWidth;
7082     std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
7083                                                              IsNewFormat);
7084 
7085     // If the base node is invalid in itself, then we've already printed all the
7086     // errors we wanted to print.
7087     if (Invalid)
7088       return false;
7089 
7090     SeenAccessTypeInPath |= BaseNode == AccessType;
7091 
7092     if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
7093       CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
7094                 &I, MD, &Offset);
7095 
7096     CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
7097                   (BaseNodeBitWidth == 0 && Offset == 0) ||
7098                   (IsNewFormat && BaseNodeBitWidth == ~0u),
7099               "Access bit-width not the same as description bit-width", &I, MD,
7100               BaseNodeBitWidth, Offset.getBitWidth());
7101 
7102     if (IsNewFormat && SeenAccessTypeInPath)
7103       break;
7104   }
7105 
7106   CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
7107             MD);
7108   return true;
7109 }
7110 
7111 char VerifierLegacyPass::ID = 0;
7112 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
7113 
7114 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
7115   return new VerifierLegacyPass(FatalErrors);
7116 }
7117 
7118 AnalysisKey VerifierAnalysis::Key;
7119 VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
7120                                                ModuleAnalysisManager &) {
7121   Result Res;
7122   Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
7123   return Res;
7124 }
7125 
7126 VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
7127                                                FunctionAnalysisManager &) {
7128   return { llvm::verifyFunction(F, &dbgs()), false };
7129 }
7130 
7131 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
7132   auto Res = AM.getResult<VerifierAnalysis>(M);
7133   if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
7134     report_fatal_error("Broken module found, compilation aborted!");
7135 
7136   return PreservedAnalyses::all();
7137 }
7138 
7139 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
7140   auto res = AM.getResult<VerifierAnalysis>(F);
7141   if (res.IRBroken && FatalErrors)
7142     report_fatal_error("Broken function found, compilation aborted!");
7143 
7144   return PreservedAnalyses::all();
7145 }
7146